diff --git a/tests/models/albert/test_modeling_albert.py b/tests/models/albert/test_modeling_albert.py index d1e5631b342d33..a12b54e1fea82c 100644 --- a/tests/models/albert/test_modeling_albert.py +++ b/tests/models/albert/test_modeling_albert.py @@ -267,6 +267,7 @@ class AlbertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): else {} ) fx_compatible = True + pretrained_checkpoint = "albert/albert-base-v1" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -286,13 +287,6 @@ def setUp(self): self.model_tester = AlbertModelTester(self) self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @@ -319,12 +313,6 @@ def test_model_various_embeddings(self): config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "albert/albert-base-v1" - model = AlbertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class AlbertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/albert/test_modeling_flax_albert.py b/tests/models/albert/test_modeling_flax_albert.py index 956de9ebdc9e57..e12c69eccc5448 100644 --- a/tests/models/albert/test_modeling_flax_albert.py +++ b/tests/models/albert/test_modeling_flax_albert.py @@ -132,17 +132,11 @@ class FlaxAlbertModelTest(FlaxModelTesterMixin, unittest.TestCase): if is_flax_available() else () ) + pretrained_checkpoint = "albert/albert-base-v2" def setUp(self): self.model_tester = FlaxAlbertModelTester(self) - @slow - def test_model_from_pretrained(self): - for model_class_name in self.all_model_classes: - model = model_class_name.from_pretrained("albert/albert-base-v2") - outputs = model(np.ones((1, 1))) - self.assertIsNotNone(outputs) - @require_flax class FlaxAlbertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/align/test_modeling_align.py b/tests/models/align/test_modeling_align.py index ddeb585a757d5d..638da2b2fa31d6 100644 --- a/tests/models/align/test_modeling_align.py +++ b/tests/models/align/test_modeling_align.py @@ -138,6 +138,7 @@ class AlignVisionModelTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "kakaobrain/align-base" def setUp(self): self.model_tester = AlignVisionModelTester(self) @@ -149,9 +150,6 @@ def setUp(self): common_properties=["num_channels", "image_size"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="AlignVisionModel does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -176,10 +174,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -230,12 +224,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "kakaobrain/align-base" - model = AlignVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class AlignTextModelTester: def __init__( @@ -340,18 +328,12 @@ class AlignTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "kakaobrain/align-base" def setUp(self): self.model_tester = AlignTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AlignTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -388,12 +370,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "kakaobrain/align-base" - model = AlignTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class AlignModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -454,14 +430,11 @@ class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "kakaobrain/align-base" def setUp(self): self.model_tester = AlignModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Start to fail after using torch `cu118`.") def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() @@ -601,12 +574,6 @@ def test_load_vision_text_config(self): text_config = AlignTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "kakaobrain/align-base" - model = AlignModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/altclip/test_modeling_altclip.py b/tests/models/altclip/test_modeling_altclip.py index f4ac29479c5f1f..bb5aab04277d62 100755 --- a/tests/models/altclip/test_modeling_altclip.py +++ b/tests/models/altclip/test_modeling_altclip.py @@ -146,9 +146,6 @@ def setUp(self): self, config_class=AltCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -174,10 +171,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -309,6 +302,7 @@ class AltCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = True test_pruning = False test_head_masking = False + pretrained_checkpoint = "BAAI/AltCLIP" # TODO (@SunMarc): Fix me @unittest.skip(reason="It's broken.") @@ -319,13 +313,6 @@ def setUp(self): self.model_tester = AltCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AltCLIPTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -365,12 +352,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "BAAI/AltCLIP" - model = AltCLIPTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class AltCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -433,6 +414,7 @@ class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "BAAI/AltCLIP" # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( @@ -446,10 +428,6 @@ def is_pipeline_test_to_skip( def setUp(self): self.model_tester = AltCLIPModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -560,12 +538,6 @@ def _create_and_check_torchscript(self, config, inputs_dict): self.assertTrue(models_equal) - @slow - def test_model_from_pretrained(self): - model_name = "BAAI/AltCLIP" - model = AltCLIPModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_vision @require_torch diff --git a/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py b/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py index 0bbefda7ba50e8..b616e6d8ac63d8 100644 --- a/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py +++ b/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py @@ -164,6 +164,7 @@ class ASTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "MIT/ast-finetuned-audioset-10-10-0.4593" # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( @@ -178,9 +179,6 @@ def setUp(self): self.model_tester = ASTModelTester(self) self.config_tester = ConfigTester(self, config_class=ASTConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="AST does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -206,16 +204,6 @@ def test_forward_signature(self): expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "MIT/ast-finetuned-audioset-10-10-0.4593" - model = ASTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on some audio from AudioSet def prepare_audio(): diff --git a/tests/models/autoformer/test_modeling_autoformer.py b/tests/models/autoformer/test_modeling_autoformer.py index f0cd5dad37b1b5..8981d49f7f8aa2 100644 --- a/tests/models/autoformer/test_modeling_autoformer.py +++ b/tests/models/autoformer/test_modeling_autoformer.py @@ -110,6 +110,23 @@ def get_config(self): scaling="std", # we need std to get non-zero `loc` ) + def create_and_check_model(self, config, input_values): + model = AutoformerModel(config=config) + model.to(torch_device) + model.eval() + result = model(**input_values) + + past_time_features = input_values["past_time_features"] + future_time_features = input_values["future_time_features"] + past_length = config.context_length + max(config.lags_sequence) + time_feat = torch.cat( + [past_time_features[:, past_length - config.context_length :, ...], future_time_features], dim=1 + ) + sequence_length = time_feat.shape[1] - (config.context_length - config.label_length) + self.parent.assertEqual( + tuple(result.last_hidden_state.shape), (self.batch_size, sequence_length, self.hidden_size) + ) + def prepare_autoformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) @@ -217,9 +234,6 @@ def setUp(self): self.model_tester = AutoformerModelTester(self) self.config_tester = ConfigTester(self, config_class=AutoformerConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/bark/test_modeling_bark.py b/tests/models/bark/test_modeling_bark.py index 9bb8ef33d75998..3ad712a68e101b 100644 --- a/tests/models/bark/test_modeling_bark.py +++ b/tests/models/bark/test_modeling_bark.py @@ -542,9 +542,6 @@ def setUp(self): self.model_tester = BarkSemanticModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -631,9 +628,6 @@ def setUp(self): self.model_tester = BarkCoarseModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -721,9 +715,6 @@ def setUp(self): self.model_tester = BarkFineModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/bart/test_modeling_bart.py b/tests/models/bart/test_modeling_bart.py index eda51d21199f31..37797d62bf19d2 100644 --- a/tests/models/bart/test_modeling_bart.py +++ b/tests/models/bart/test_modeling_bart.py @@ -443,9 +443,6 @@ def setUp(self): self.model_tester = BartModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -1514,9 +1511,6 @@ def setUp( self.model_tester = BartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index ac64f0fd3b0b11..a6da70dea0842c 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -245,6 +245,7 @@ class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) + pretrained_checkpoint = "microsoft/beit-base-patch16-224" test_pruning = False test_resize_embeddings = False @@ -254,9 +255,6 @@ def setUp(self): self.model_tester = BeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="BEiT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -279,10 +277,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -381,12 +375,6 @@ def test_initialization(self): msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/beit-base-patch16-224" - model = BeitModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/beit/test_modeling_flax_beit.py b/tests/models/beit/test_modeling_flax_beit.py index 78c24220c2d20b..1c5a42514abbd3 100644 --- a/tests/models/beit/test_modeling_flax_beit.py +++ b/tests/models/beit/test_modeling_flax_beit.py @@ -149,9 +149,6 @@ def setUp(self) -> None: self.model_tester = FlaxBeitModelTester(self) self.config_tester = ConfigTester(self, config_class=BeitConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - # We need to override this test because Beit's forward signature is different than text models. def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -189,10 +186,6 @@ def model_jitted(pixel_values, **kwargs): for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) diff --git a/tests/models/bert/test_modeling_bert.py b/tests/models/bert/test_modeling_bert.py index 5c87fbea8ee795..12c6b863bf4879 100644 --- a/tests/models/bert/test_modeling_bert.py +++ b/tests/models/bert/test_modeling_bert.py @@ -466,6 +466,7 @@ class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ) fx_compatible = True model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "google-bert/bert-base-uncased" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -485,13 +486,6 @@ def setUp(self): self.model_tester = BertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -644,12 +638,6 @@ def test_for_warning_if_padding_and_no_attention_mask(self): model(input_ids, attention_mask=None, token_type_ids=token_type_ids) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) - @slow - def test_model_from_pretrained(self): - model_name = "google-bert/bert-base-uncased" - model = BertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_accelerator def test_torchscript_device_change(self): diff --git a/tests/models/bert_generation/test_modeling_bert_generation.py b/tests/models/bert_generation/test_modeling_bert_generation.py index ecd7a459e0ea8d..27ba3ba683e5d1 100644 --- a/tests/models/bert_generation/test_modeling_bert_generation.py +++ b/tests/models/bert_generation/test_modeling_bert_generation.py @@ -249,18 +249,12 @@ class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, Pipelin if is_torch_available() else {} ) + pretrained_checkpoint = "google/bert_for_seq_generation_L-24_bbc_encoder" def setUp(self): self.model_tester = BertGenerationEncoderTester(self) self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_as_bert(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() config.model_type = "bert" @@ -300,11 +294,6 @@ def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") - self.assertIsNotNone(model) - @require_torch class BertGenerationEncoderIntegrationTest(unittest.TestCase): diff --git a/tests/models/big_bird/test_modeling_big_bird.py b/tests/models/big_bird/test_modeling_big_bird.py index bda5cb62186af2..ae32e8b335075c 100644 --- a/tests/models/big_bird/test_modeling_big_bird.py +++ b/tests/models/big_bird/test_modeling_big_bird.py @@ -465,6 +465,7 @@ class BigBirdModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) if is_torch_available() else {} ) + pretrained_checkpoint = "google/bigbird-roberta-base" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -484,13 +485,6 @@ def setUp(self): self.model_tester = BigBirdModelTester(self) self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @@ -557,12 +551,6 @@ def test_retain_grad_hidden_states_attentions(self): if self.model_tester.attention_type == "original_full": super().test_retain_grad_hidden_states_attentions() - @slow - def test_model_from_pretrained(self): - model_name = "google/bigbird-roberta-base" - model = BigBirdForPreTraining.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_model_various_attn_type(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["original_full", "block_sparse"]: diff --git a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py index 0f28fc2d67b582..784be0013e36c6 100644 --- a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py +++ b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py @@ -309,9 +309,6 @@ def setUp(self): self.model_tester = BigBirdPegasusModelTester(self) self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -810,9 +807,6 @@ def setUp( self.model_tester = BigBirdPegasusStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BigBirdPegasusConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/biogpt/test_modeling_biogpt.py b/tests/models/biogpt/test_modeling_biogpt.py index 4f1d5d6a42f8a9..c2f9fc5b727cf2 100644 --- a/tests/models/biogpt/test_modeling_biogpt.py +++ b/tests/models/biogpt/test_modeling_biogpt.py @@ -297,18 +297,12 @@ class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix else {} ) test_pruning = False + pretrained_checkpoint = "microsoft/biogpt" def setUp(self): self.model_tester = BioGptModelTester(self) self.config_tester = ConfigTester(self, config_class=BioGptConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -379,12 +373,6 @@ def test_batch_generation(self): self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/biogpt" - model = BioGptModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # Copied from tests.models.opt.test_modeling_opt.OPTModelTest.test_opt_sequence_classification_model with OPT->BioGpt,opt->biogpt,prepare_config_and_inputs->prepare_config_and_inputs_for_common def test_biogpt_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py index 504e410bb466a8..e49fb1763caa3e 100644 --- a/tests/models/bit/test_modeling_bit.py +++ b/tests/models/bit/test_modeling_bit.py @@ -170,6 +170,7 @@ class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "google/bit-50" def setUp(self): self.model_tester = BitModelTester(self) @@ -177,9 +178,6 @@ def setUp(self): self, config_class=BitConfig, has_text_modality=False, common_properties=["num_channels"] ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Bit does not output attentions") def test_attention_outputs(self): pass @@ -192,10 +190,6 @@ def test_inputs_embeds(self): def test_model_get_set_embeddings(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -258,12 +252,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/bit-50" - model = BitModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/blenderbot/test_modeling_blenderbot.py b/tests/models/blenderbot/test_modeling_blenderbot.py index cecedb8a907133..a7de682fbec2db 100644 --- a/tests/models/blenderbot/test_modeling_blenderbot.py +++ b/tests/models/blenderbot/test_modeling_blenderbot.py @@ -247,9 +247,6 @@ def setUp(self): self.model_tester = BlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -543,9 +540,6 @@ def setUp( self.model_tester = BlenderbotStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py index 59f68b54754796..38c70e9f102ea4 100644 --- a/tests/models/blenderbot_small/test_modeling_blenderbot_small.py +++ b/tests/models/blenderbot_small/test_modeling_blenderbot_small.py @@ -244,9 +244,6 @@ def setUp(self): self.model_tester = BlenderbotSmallModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -545,9 +542,6 @@ def setUp( self.model_tester = BlenderbotSmallStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/blip/test_modeling_blip.py b/tests/models/blip/test_modeling_blip.py index 2f8ee3229ff2cd..fa0ed7cc724813 100644 --- a/tests/models/blip/test_modeling_blip.py +++ b/tests/models/blip/test_modeling_blip.py @@ -156,14 +156,12 @@ class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipVisionConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Blip does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -189,10 +187,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -221,12 +215,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class BlipTextModelTester: def __init__( @@ -325,18 +313,12 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -369,12 +351,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) @@ -445,14 +421,11 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -579,12 +552,6 @@ def test_load_vision_text_config(self): text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -884,14 +851,11 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipTextRetrievalModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -1096,12 +1060,6 @@ def test_load_vision_text_config(self): text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): @@ -1112,14 +1070,11 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipTextImageModelsModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -1312,12 +1267,6 @@ def test_load_vision_text_config(self): text_config = BlipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/blip/test_modeling_blip_text.py b/tests/models/blip/test_modeling_blip_text.py index 85ab462a0d54ab..9b66bea0be7fe2 100644 --- a/tests/models/blip/test_modeling_blip_text.py +++ b/tests/models/blip/test_modeling_blip_text.py @@ -19,7 +19,7 @@ import numpy as np from transformers import BlipTextConfig -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import require_torch, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester @@ -129,18 +129,12 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "Salesforce/blip-vqa-base" def setUp(self): self.model_tester = BlipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=BlipTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -173,11 +167,5 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip-vqa-base" - model = BlipTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_pt_tf_model_equivalence(self): super().test_pt_tf_model_equivalence(allow_missing_keys=True) diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index cee5d710a85fb8..7b95366142213c 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -157,6 +157,7 @@ class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Salesforce/blip2-opt-2.7b" def setUp(self): self.model_tester = Blip2VisionModelTester(self) @@ -164,9 +165,6 @@ def setUp(self): self, config_class=Blip2VisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="BLIP-2's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -192,10 +190,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -224,12 +218,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip2-opt-2.7b" - model = Blip2VisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class Blip2QFormerModelTester: def __init__( @@ -456,6 +444,7 @@ class Blip2ForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, GenerationT test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/blip2-opt-2.7b" def setUp(self): self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self) @@ -515,12 +504,6 @@ def test_load_vision_qformer_text_config(self): qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip2-opt-2.7b" - model = Blip2ForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - # this class is based on `T5ModelTester` found in tests/models/t5/test_modeling_t5.py class Blip2TextModelTester: @@ -715,6 +698,7 @@ class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixi test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/blip2-opt-2.7b" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -788,12 +772,6 @@ def test_load_vision_qformer_text_config(self): qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/blip2-opt-2.7b" - model = Blip2ForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_get_text_features(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -948,10 +926,6 @@ class Blip2TextModelWithProjectionTest(ModelTesterMixin, unittest.TestCase): def setUp(self): self.model_tester = Blip2TextModelWithProjectionTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @@ -1114,10 +1088,6 @@ class Blip2VisionModelWithProjectionTest(ModelTesterMixin, unittest.TestCase): def setUp(self): self.model_tester = Blip2VisionModelWithProjectionTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @@ -1272,10 +1242,6 @@ class Blip2TextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): def setUp(self): self.model_tester = Blip2TextRetrievalModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index b20012c2a19734..a671d659c533fa 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -134,7 +134,7 @@ def get_config(self, gradient_checkpointing=False, slow_but_exact=True): dtype="float32", ) - def create_and_check_bloom_model(self, config, input_ids, input_mask, *args): + def create_and_check_model(self, config, input_ids, input_mask, *args): model = BloomModel(config=config) model.to(torch_device) model.eval() @@ -345,18 +345,12 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi test_missing_keys = False test_pruning = False test_torchscript = True # torch.autograd functions seems not to be supported + pretrained_checkpoint = "bigscience/bigscience-small-testing" def setUp(self): self.model_tester = BloomModelTester(self) self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_bloom_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_bloom_model(*config_and_inputs) - def test_bloom_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_model_past(*config_and_inputs) @@ -389,12 +383,6 @@ def test_bloom_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "bigscience/bigscience-small-testing" - model = BloomModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_accelerator def test_simple_generation(self): diff --git a/tests/models/bridgetower/test_modeling_bridgetower.py b/tests/models/bridgetower/test_modeling_bridgetower.py index cceeee4912dc3f..4f18eb521909a8 100644 --- a/tests/models/bridgetower/test_modeling_bridgetower.py +++ b/tests/models/bridgetower/test_modeling_bridgetower.py @@ -310,6 +310,7 @@ class BridgeTowerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC else () ) pipeline_model_mapping = {"feature-extraction": BridgeTowerModel} if is_torch_available() else {} + pretrained_checkpoint = "BridgeTower/bridgetower-base" is_training = False test_headmasking = False @@ -338,13 +339,6 @@ def setUp(self): self.model_tester = BridgeTowerModelTester(self) self.config_tester = ConfigTester(self, config_class=BridgeTowerConfig, hidden_size=37, vocab_size=99) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_and_text_retrieval(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_and_text_retrieval(*config_and_inputs) @@ -353,12 +347,6 @@ def test_for_masked_language_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_language_modeling(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "BridgeTower/bridgetower-base" - model = BridgeTowerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_save_load_fast_init_from_base(self): # Override as it is a slow test on this model diff --git a/tests/models/bros/test_modeling_bros.py b/tests/models/bros/test_modeling_bros.py index 14f904c3b7e272..232e2fdae92b73 100644 --- a/tests/models/bros/test_modeling_bros.py +++ b/tests/models/bros/test_modeling_bros.py @@ -291,6 +291,7 @@ class BrosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) + pretrained_checkpoint = "jinho8345/bros-base-uncased" # BROS requires `bbox` in the inputs which doesn't fit into the above 2 pipelines' input formats. # see https://github.com/huggingface/transformers/pull/26294 @@ -337,13 +338,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() @@ -366,12 +360,6 @@ def test_for_spade_el_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_el_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "jinho8345/bros-base-uncased" - model = BrosModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def prepare_bros_batch_inputs(): attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) diff --git a/tests/models/canine/test_modeling_canine.py b/tests/models/canine/test_modeling_canine.py index efc70dff499c6c..ccc61c833d7163 100644 --- a/tests/models/canine/test_modeling_canine.py +++ b/tests/models/canine/test_modeling_canine.py @@ -237,19 +237,13 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False test_resize_embeddings = False test_pruning = False + pretrained_checkpoint = "google/canine-s" def setUp(self): self.model_tester = CanineModelTester(self) # we set has_text_modality to False as the config has no vocab_size attribute self.config_tester = ConfigTester(self, config_class=CanineConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @@ -527,12 +521,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/canine-s" - model = CanineModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class CanineModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/chameleon/test_modeling_chameleon.py b/tests/models/chameleon/test_modeling_chameleon.py index 00e3ad40a57652..ce823d9e212189 100644 --- a/tests/models/chameleon/test_modeling_chameleon.py +++ b/tests/models/chameleon/test_modeling_chameleon.py @@ -291,13 +291,6 @@ def setUp(self): self.model_tester = ChameleonModelTester(self) self.config_tester = ConfigTester(self, config_class=ChameleonConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/chinese_clip/test_modeling_chinese_clip.py b/tests/models/chinese_clip/test_modeling_chinese_clip.py index 647b3ac7b73a3d..a59b87c97d9777 100644 --- a/tests/models/chinese_clip/test_modeling_chinese_clip.py +++ b/tests/models/chinese_clip/test_modeling_chinese_clip.py @@ -318,6 +318,7 @@ def prepare_config_and_inputs_for_common(self): class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ChineseCLIPTextModel,) if is_torch_available() else () fx_compatible = False + pretrained_checkpoint = "OFA-Sys/chinese-clip-vit-base-patch16" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -337,13 +338,6 @@ def setUp(self): self.model_tester = ChineseCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ChineseCLIPTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -382,12 +376,6 @@ def test_model_as_decoder_with_default_input_mask(self): encoder_attention_mask, ) - @slow - def test_model_from_pretrained(self): - model_name = "OFA-Sys/chinese-clip-vit-base-patch16" - model = ChineseCLIPTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip def test_training(self): pass @@ -429,6 +417,7 @@ class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "OFA-Sys/chinese-clip-vit-base-patch16" def setUp(self): self.model_tester = ChineseCLIPVisionModelTester(self) @@ -436,9 +425,6 @@ def setUp(self): self, config_class=ChineseCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="CHINESE_CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -464,10 +450,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -496,12 +478,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "OFA-Sys/chinese-clip-vit-base-patch16" - model = ChineseCLIPVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class ChineseCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -570,16 +546,13 @@ class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "OFA-Sys/chinese-clip-vit-base-patch16" def setUp(self): text_kwargs = {"use_labels": False, "batch_size": 12} vision_kwargs = {"batch_size": 12} self.model_tester = ChineseCLIPModelTester(self, text_kwargs, vision_kwargs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -694,12 +667,6 @@ def _create_and_check_torchscript(self, config, inputs_dict): self.assertTrue(models_equal) - @slow - def test_model_from_pretrained(self): - model_name = "OFA-Sys/chinese-clip-vit-base-patch16" - model = ChineseCLIPModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of Pikachu def prepare_img(): diff --git a/tests/models/clap/test_modeling_clap.py b/tests/models/clap/test_modeling_clap.py index 9f8cc62d2e0fc3..5c9ae7f2f8ac24 100644 --- a/tests/models/clap/test_modeling_clap.py +++ b/tests/models/clap/test_modeling_clap.py @@ -165,14 +165,12 @@ class ClapAudioModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "laion/clap-htsat-fused" def setUp(self): self.model_tester = ClapAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapAudioConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ClapAudioModel does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -235,10 +233,6 @@ def test_forward_signature(self): expected_arg_names = ["input_features"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @@ -271,12 +265,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "laion/clap-htsat-fused" - model = ClapAudioModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_model_with_projection_from_pretrained(self): model_name = "laion/clap-htsat-fused" @@ -392,18 +380,12 @@ class ClapTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "laion/clap-htsat-fused" def setUp(self): self.model_tester = ClapTextModelTester(self) self.config_tester = ConfigTester(self, config_class=ClapTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @@ -440,12 +422,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "laion/clap-htsat-fused" - model = ClapTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_model_with_projection_from_pretrained(self): model_name = "laion/clap-htsat-fused" @@ -512,14 +488,11 @@ class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "laion/clap-htsat-fused" def setUp(self): self.model_tester = ClapModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -646,12 +619,6 @@ def test_load_audio_text_config(self): text_config = ClapTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "laion/clap-htsat-fused" - model = ClapModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch diff --git a/tests/models/clip/test_modeling_clip.py b/tests/models/clip/test_modeling_clip.py index 88824756a6fb54..8aa4a0f5a4f324 100644 --- a/tests/models/clip/test_modeling_clip.py +++ b/tests/models/clip/test_modeling_clip.py @@ -368,14 +368,12 @@ class CLIPVisionModelTest(CLIPModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "openai/clip-vit-base-patch32" def setUp(self): self.model_tester = CLIPVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -401,10 +399,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @@ -437,12 +431,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "openai/clip-vit-base-patch32" - model = CLIPVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_model_with_projection_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" @@ -567,18 +555,12 @@ class CLIPTextModelTest(CLIPModelTesterMixin, unittest.TestCase): test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "openai/clip-vit-base-patch32" def setUp(self): self.model_tester = CLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_projection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_projection(*config_and_inputs) @@ -615,12 +597,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "openai/clip-vit-base-patch32" - model = CLIPTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_model_with_projection_from_pretrained(self): model_name = "openai/clip-vit-base-patch32" @@ -704,14 +680,11 @@ class CLIPModelTest(CLIPModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "openai/clip-vit-base-patch32" def setUp(self): self.model_tester = CLIPModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -958,12 +931,6 @@ def test_equivalence_flax_to_pt(self): for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(force=True), 4e-2) - @slow - def test_model_from_pretrained(self): - model_name = "openai/clip-vit-base-patch32" - model = CLIPModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow diff --git a/tests/models/clipseg/test_modeling_clipseg.py b/tests/models/clipseg/test_modeling_clipseg.py index c5edf7cb757b30..d6ddbdb8cb74bc 100644 --- a/tests/models/clipseg/test_modeling_clipseg.py +++ b/tests/models/clipseg/test_modeling_clipseg.py @@ -155,6 +155,7 @@ class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "CIDAS/clipseg-rd64-refined" def setUp(self): self.model_tester = CLIPSegVisionModelTester(self) @@ -162,9 +163,6 @@ def setUp(self): self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -190,10 +188,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -222,12 +216,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "CIDAS/clipseg-rd64-refined" - model = CLIPSegVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class CLIPSegTextModelTester: def __init__( @@ -321,18 +309,12 @@ class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "CIDAS/clipseg-rd64-refined" def setUp(self): self.model_tester = CLIPSegTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -365,12 +347,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "CIDAS/clipseg-rd64-refined" - model = CLIPSegTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class CLIPSegModelTester: def __init__( @@ -458,6 +434,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "CIDAS/clipseg-rd64-refined" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # CLIPSegForImageSegmentation requires special treatment @@ -473,10 +450,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): def setUp(self): self.model_tester = CLIPSegModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs) @@ -767,12 +740,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "CIDAS/clipseg-rd64-refined" - model = CLIPSegModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/clvp/test_modeling_clvp.py b/tests/models/clvp/test_modeling_clvp.py index 0cf89a74523364..f8cd885f908fa0 100644 --- a/tests/models/clvp/test_modeling_clvp.py +++ b/tests/models/clvp/test_modeling_clvp.py @@ -180,10 +180,6 @@ def tearDown(self): def test_config(self): self.encoder_config_tester.run_common_tests() - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="ClvpEncoder does not output loss") def test_training(self): pass @@ -297,10 +293,6 @@ def tearDown(self): gc.collect() torch.cuda.empty_cache() - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels and model_class == ClvpForCausalLM: inputs_dict["labels"] = torch.zeros( @@ -413,6 +405,7 @@ class ClvpModelForConditionalGenerationTest(ModelTesterMixin, unittest.TestCase) test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "susnato/clvp_dev" def setUp(self): self.model_tester = ClvpModelForConditionalGenerationTester(self) @@ -424,10 +417,6 @@ def tearDown(self): gc.collect() torch.cuda.empty_cache() - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -537,12 +526,6 @@ def test_load_speech_text_decoder_config(self): decoder_config = ClvpDecoderConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.decoder_config.to_dict(), decoder_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "susnato/clvp_dev" - model = ClvpModelForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - # Since Clvp has a lot of different models connected with each other it's better to test each of them individually along # with a test_full_model_integration. If the model breaks in future, it could be of a great help to identify the broken part. diff --git a/tests/models/codegen/test_modeling_codegen.py b/tests/models/codegen/test_modeling_codegen.py index 1ee4c7f57dbc9d..10bf67a1b1276a 100644 --- a/tests/models/codegen/test_modeling_codegen.py +++ b/tests/models/codegen/test_modeling_codegen.py @@ -332,6 +332,7 @@ class CodeGenModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi test_missing_keys = False test_model_parallel = False test_head_masking = False + pretrained_checkpoint = "Salesforce/codegen-350M-nl" # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -342,9 +343,6 @@ def setUp(self): self.model_tester = CodeGenModelTester(self) self.config_tester = ConfigTester(self, config_class=CodeGenConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_codegen_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_codegen_model(*config_and_inputs) @@ -425,12 +423,6 @@ def test_batch_generation(self): self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/codegen-350M-nl" - model = CodeGenModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class CodeGenModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/cohere/test_modeling_cohere.py b/tests/models/cohere/test_modeling_cohere.py index d80bc5c24cf9f3..a81dea8cfbd7ee 100644 --- a/tests/models/cohere/test_modeling_cohere.py +++ b/tests/models/cohere/test_modeling_cohere.py @@ -291,13 +291,6 @@ def setUp(self): self.model_tester = CohereModelTester(self) self.config_tester = ConfigTester(self, config_class=CohereConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py index 2e2973679e91b3..a08d51bbd7ce9c 100644 --- a/tests/models/conditional_detr/test_modeling_conditional_detr.py +++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py @@ -226,9 +226,6 @@ def setUp(self): self.model_tester = ConditionalDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_conditional_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) diff --git a/tests/models/convbert/test_modeling_convbert.py b/tests/models/convbert/test_modeling_convbert.py index 84b50f57290887..1590a75d23ac3c 100644 --- a/tests/models/convbert/test_modeling_convbert.py +++ b/tests/models/convbert/test_modeling_convbert.py @@ -273,18 +273,12 @@ class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase ) test_pruning = False test_head_masking = False + pretrained_checkpoint = "YituTech/conv-bert-base" def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) @@ -305,12 +299,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "YituTech/conv-bert-base" - model = ConvBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index 7d7ba5c9b80e6d..19c696ff9da042 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -180,6 +180,7 @@ class ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "facebook/convnext-tiny-224" def setUp(self): self.model_tester = ConvNextModelTester(self) @@ -191,9 +192,6 @@ def setUp(self): common_properties=["num_channels", "hidden_sizes"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -206,10 +204,6 @@ def test_model_get_set_embeddings(self): def test_feed_forward_chunking(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -250,12 +244,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/convnext-tiny-224" - model = ConvNextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py index e5bb8e3d190a03..d62313c0f4cbbd 100644 --- a/tests/models/convnextv2/test_modeling_convnextv2.py +++ b/tests/models/convnextv2/test_modeling_convnextv2.py @@ -188,6 +188,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "facebook/convnextv2-tiny-1k-224" def setUp(self): self.model_tester = ConvNextV2ModelTester(self) @@ -199,9 +200,6 @@ def setUp(self): common_properties=["hidden_sizes", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ConvNextV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -259,10 +257,6 @@ def test_training_gradient_checkpointing(self): loss = model(**inputs).loss loss.backward() - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -299,12 +293,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/convnextv2-tiny-1k-224" - model = ConvNextV2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/cpmant/test_modeling_cpmant.py b/tests/models/cpmant/test_modeling_cpmant.py index 404280428ef900..8820ad5e9c67cb 100644 --- a/tests/models/cpmant/test_modeling_cpmant.py +++ b/tests/models/cpmant/test_modeling_cpmant.py @@ -150,9 +150,6 @@ def setUp(self): self.model_tester = CpmAntModelTester(self) self.config_tester = ConfigTester(self, config_class=CpmAntConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_inputs_embeds(self): unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds) diff --git a/tests/models/ctrl/test_modeling_ctrl.py b/tests/models/ctrl/test_modeling_ctrl.py index 6d44bdfb4ae672..b8c3973dd3712f 100644 --- a/tests/models/ctrl/test_modeling_ctrl.py +++ b/tests/models/ctrl/test_modeling_ctrl.py @@ -208,6 +208,7 @@ class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin test_pruning = True test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Salesforce/ctrl" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -231,9 +232,6 @@ def tearDown(self): gc.collect() backend_empty_cache(torch_device) - def test_config(self): - self.config_tester.run_common_tests() - def test_ctrl_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*config_and_inputs) @@ -242,12 +240,6 @@ def test_ctrl_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/ctrl" - model = CTRLModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class CTRLModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/cvt/test_modeling_cvt.py b/tests/models/cvt/test_modeling_cvt.py index b07b8892957b7c..d1b7b7fa371cf1 100644 --- a/tests/models/cvt/test_modeling_cvt.py +++ b/tests/models/cvt/test_modeling_cvt.py @@ -159,6 +159,7 @@ class CvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "microsoft/cvt-13" def setUp(self): self.model_tester = CvtModelTester(self) @@ -170,9 +171,6 @@ def setUp(self): common_properties=["hidden_size", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Cvt does not output attentions") def test_attention_outputs(self): pass @@ -185,10 +183,6 @@ def test_inputs_embeds(self): def test_model_get_set_embeddings(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -229,12 +223,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/cvt-13" - model = CvtModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/dac/test_modeling_dac.py b/tests/models/dac/test_modeling_dac.py index ffe7f31b79a506..a32f6bc96a450d 100644 --- a/tests/models/dac/test_modeling_dac.py +++ b/tests/models/dac/test_modeling_dac.py @@ -140,9 +140,6 @@ def setUp(self): self, config_class=DacConfig, hidden_size=37, common_properties=[], has_text_modality=False ) - def test_config(self): - self.config_tester.run_common_tests() - def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) diff --git a/tests/models/data2vec/test_modeling_data2vec_audio.py b/tests/models/data2vec/test_modeling_data2vec_audio.py index d43128286853a5..9648eaf4099022 100644 --- a/tests/models/data2vec/test_modeling_data2vec_audio.py +++ b/tests/models/data2vec/test_modeling_data2vec_audio.py @@ -382,18 +382,12 @@ class Data2VecAudioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Tes ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "facebook/data2vec-audio-base" def setUp(self): self.model_tester = Data2VecAudioModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecAudioConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) @@ -590,11 +584,6 @@ def test_mask_time_prob_ctc(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = Data2VecAudioModel.from_pretrained("facebook/data2vec-audio-base") - self.assertIsNotNone(model) - @require_torch class Data2VecAudioUtilsTest(unittest.TestCase): diff --git a/tests/models/data2vec/test_modeling_data2vec_text.py b/tests/models/data2vec/test_modeling_data2vec_text.py index 4e44d8c6206846..28fbd256f14891 100644 --- a/tests/models/data2vec/test_modeling_data2vec_text.py +++ b/tests/models/data2vec/test_modeling_data2vec_text.py @@ -388,18 +388,12 @@ class Data2VecTextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTes else {} ) model_split_percents = [0.5, 0.9] + pretrained_checkpoint = "facebook/data2vec-text-base" def setUp(self): self.model_tester = Data2VecTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -467,12 +461,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/data2vec-text-base" - model = Data2VecTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 diff --git a/tests/models/data2vec/test_modeling_data2vec_vision.py b/tests/models/data2vec/test_modeling_data2vec_vision.py index c729d88d614fbc..297b48b11be6bb 100644 --- a/tests/models/data2vec/test_modeling_data2vec_vision.py +++ b/tests/models/data2vec/test_modeling_data2vec_vision.py @@ -186,6 +186,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/data2vec-vision-base-ft1k" def setUp(self): self.model_tester = Data2VecVisionModelTester(self) @@ -193,9 +194,6 @@ def setUp(self): self, config_class=Data2VecVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Data2VecVision does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -216,10 +214,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @@ -294,12 +288,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/data2vec-vision-base-ft1k" - model = Data2VecVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/dbrx/test_modeling_dbrx.py b/tests/models/dbrx/test_modeling_dbrx.py index d38a479ab36e42..ed0166caa8c36c 100644 --- a/tests/models/dbrx/test_modeling_dbrx.py +++ b/tests/models/dbrx/test_modeling_dbrx.py @@ -326,30 +326,18 @@ class DbrxModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin pipeline_model_mapping = {"text-generation": DbrxForCausalLM} if is_torch_available() else {} test_headmasking = False test_pruning = False + pretrained_checkpoint = "eitanturok/dbrx-tiny" def setUp(self): self.model_tester = DbrxModelTester(self) self.config_tester = ConfigTester(self, config_class=DbrxConfig, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "eitanturok/dbrx-tiny" - model = DbrxModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Dbrx models have weight tying disabled.") def test_tied_weights_keys(self): pass diff --git a/tests/models/deberta/test_modeling_deberta.py b/tests/models/deberta/test_modeling_deberta.py index 4b6f570e9ea7fc..a2730adc196d3a 100644 --- a/tests/models/deberta/test_modeling_deberta.py +++ b/tests/models/deberta/test_modeling_deberta.py @@ -243,14 +243,12 @@ class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_pruning = False test_head_masking = False is_encoder_decoder = False + pretrained_checkpoint = "microsoft/deberta-base" def setUp(self): self.model_tester = DebertaModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_deberta_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*config_and_inputs) @@ -271,12 +269,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/deberta-base" - model = DebertaModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch @require_sentencepiece diff --git a/tests/models/deberta_v2/test_modeling_deberta_v2.py b/tests/models/deberta_v2/test_modeling_deberta_v2.py index 0a9256aaf72360..5dfe6f1d8229b2 100644 --- a/tests/models/deberta_v2/test_modeling_deberta_v2.py +++ b/tests/models/deberta_v2/test_modeling_deberta_v2.py @@ -257,14 +257,12 @@ class DebertaV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas test_pruning = False test_head_masking = False is_encoder_decoder = False + pretrained_checkpoint = "microsoft/deberta-v2-xlarge" def setUp(self): self.model_tester = DebertaV2ModelTester(self) self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_deberta_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*config_and_inputs) @@ -289,12 +287,6 @@ def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/deberta-v2-xlarge" - model = DebertaV2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch @require_sentencepiece diff --git a/tests/models/decision_transformer/test_modeling_decision_transformer.py b/tests/models/decision_transformer/test_modeling_decision_transformer.py index 0c95e6291c503b..dd088b3815dd36 100644 --- a/tests/models/decision_transformer/test_modeling_decision_transformer.py +++ b/tests/models/decision_transformer/test_modeling_decision_transformer.py @@ -142,24 +142,12 @@ class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipe test_inputs_embeds = False test_gradient_checkpointing = False test_torchscript = False + pretrained_checkpoint = "edbeeching/decision-transformer-gym-hopper-medium" def setUp(self): self.model_tester = DecisionTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "edbeeching/decision-transformer-gym-hopper-medium" - model = DecisionTransformerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py index b77ffb6e7778e9..5db0696a8cbcdf 100644 --- a/tests/models/deformable_detr/test_modeling_deformable_detr.py +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -237,9 +237,6 @@ def setUp(self): common_properties=["num_channels", "d_model", "encoder_attention_heads", "decoder_attention_heads"], ) - def test_config(self): - self.config_tester.run_common_tests() - def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 1b4ca6e206a9a9..3c2c2333583f7c 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -222,6 +222,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/deit-base-distilled-patch16-224" def setUp(self): self.model_tester = DeiTModelTester(self) @@ -234,9 +235,6 @@ def setUp(self): def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="DeiT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -250,10 +248,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) @@ -377,12 +371,6 @@ def test_problem_types(self): loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "facebook/deit-base-distilled-patch16-224" - model = DeiTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/depth_anything/test_modeling_depth_anything.py b/tests/models/depth_anything/test_modeling_depth_anything.py index 0e59bc4d13fe70..7bf2cc607073ed 100644 --- a/tests/models/depth_anything/test_modeling_depth_anything.py +++ b/tests/models/depth_anything/test_modeling_depth_anything.py @@ -145,6 +145,7 @@ class DepthAnythingModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Tes test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "LiheYoung/depth-anything-small-hf" def setUp(self): self.model_tester = DepthAnythingModelTester(self) @@ -156,13 +157,14 @@ def setUp(self): common_properties=["patch_size"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Depth Anything with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass + @unittest.skip(reason="Depth Anything doesn't have a base model") + def test_model(self): + pass + def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @@ -199,12 +201,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "LiheYoung/depth-anything-small-hf" - model = DepthAnythingForDepthEstimation.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py index d1e36e32824d74..8a34332419e7bc 100644 --- a/tests/models/detr/test_modeling_detr.py +++ b/tests/models/detr/test_modeling_detr.py @@ -226,9 +226,6 @@ def setUp(self): self.model_tester = DetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DetrConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_detr_model(*config_and_inputs) diff --git a/tests/models/dinat/test_modeling_dinat.py b/tests/models/dinat/test_modeling_dinat.py index 7cfb5846e071fc..905bdc9db8f348 100644 --- a/tests/models/dinat/test_modeling_dinat.py +++ b/tests/models/dinat/test_modeling_dinat.py @@ -216,6 +216,7 @@ class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "shi-labs/dinat-mini-in1k-224" def setUp(self): self.model_tester = DinatModelTester(self) @@ -223,13 +224,6 @@ def setUp(self): self, config_class=DinatConfig, embed_dim=37, common_properties=["patch_size", "num_channels"] ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @@ -320,12 +314,6 @@ def test_hidden_states_output(self): self.check_hidden_states_output(inputs_dict, config, model_class, image_size) - @slow - def test_model_from_pretrained(self): - model_name = "shi-labs/dinat-mini-in1k-224" - model = DinatModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dinov2/test_modeling_dinov2.py b/tests/models/dinov2/test_modeling_dinov2.py index 5caa3baec1a2ca..bc695a34b92a9d 100644 --- a/tests/models/dinov2/test_modeling_dinov2.py +++ b/tests/models/dinov2/test_modeling_dinov2.py @@ -231,6 +231,7 @@ class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/dinov2-base" def setUp(self): self.model_tester = Dinov2ModelTester(self) @@ -240,9 +241,6 @@ def setUp(self): def test_initialization(self): super().test_initialization() - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Dinov2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -274,10 +272,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -290,12 +284,6 @@ def test_for_image_classification(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "facebook/dinov2-base" - model = Dinov2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/dinov2/test_modeling_flax_dinov2.py b/tests/models/dinov2/test_modeling_flax_dinov2.py index 68510bb505e5e2..65f9df2602859e 100644 --- a/tests/models/dinov2/test_modeling_flax_dinov2.py +++ b/tests/models/dinov2/test_modeling_flax_dinov2.py @@ -143,13 +143,6 @@ def setUp(self) -> None: self.model_tester = FlaxDinov2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Dinov2Config, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) diff --git a/tests/models/distilbert/test_modeling_distilbert.py b/tests/models/distilbert/test_modeling_distilbert.py index 3a74a1557cf9ba..6bbf9de14b5391 100644 --- a/tests/models/distilbert/test_modeling_distilbert.py +++ b/tests/models/distilbert/test_modeling_distilbert.py @@ -227,14 +227,12 @@ class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = True + pretrained_checkpoint = "distilbert-base-uncased" def setUp(self): self.model_tester = DistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) @@ -268,12 +266,6 @@ def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "distilbert-base-uncased" - model = DistilBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_accelerator def test_torchscript_device_change(self): diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py index 11c01c39fa6cd1..2d71842cb86a5c 100644 --- a/tests/models/donut/test_modeling_donut_swin.py +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -18,7 +18,7 @@ import unittest from transformers import DonutSwinConfig -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import require_torch, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester @@ -150,6 +150,7 @@ class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "naver-clova-ix/donut-base" def setUp(self): self.model_tester = DonutSwinModelTester(self) @@ -161,13 +162,6 @@ def setUp(self): common_properties=["image_size", "patch_size", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="DonutSwin does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -328,12 +322,6 @@ def test_hidden_states_output_with_padding(self): config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) - @slow - def test_model_from_pretrained(self): - model_name = "naver-clova-ix/donut-base" - model = DonutSwinModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dpr/test_modeling_dpr.py b/tests/models/dpr/test_modeling_dpr.py index 7a41820f2d8ea7..e0485d8751b021 100644 --- a/tests/models/dpr/test_modeling_dpr.py +++ b/tests/models/dpr/test_modeling_dpr.py @@ -195,9 +195,6 @@ def setUp(self): self.model_tester = DPRModelTester(self) self.config_tester = ConfigTester(self, config_class=DPRConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_context_encoder_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_context_encoder(*config_and_inputs) diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 5232b4cf462d8c..9faffce7a46859 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -171,14 +171,12 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Intel/dpt-large" def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -192,10 +190,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @@ -304,12 +298,6 @@ def _validate_backbone_init(): config.use_timm_backbone = False _validate_backbone_init() - @slow - def test_model_from_pretrained(self): - model_name = "Intel/dpt-large" - model = DPTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/dpt/test_modeling_dpt_auto_backbone.py b/tests/models/dpt/test_modeling_dpt_auto_backbone.py index 35283eebf5edca..61ad1cf3216ae7 100644 --- a/tests/models/dpt/test_modeling_dpt_auto_backbone.py +++ b/tests/models/dpt/test_modeling_dpt_auto_backbone.py @@ -140,14 +140,12 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Intel/dpt-large" def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="DPT with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass @@ -240,12 +238,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Intel/dpt-large" - model = DPTForDepthEstimation.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/dpt/test_modeling_dpt_hybrid.py b/tests/models/dpt/test_modeling_dpt_hybrid.py index ab117c14416f2d..1931d1f5622613 100644 --- a/tests/models/dpt/test_modeling_dpt_hybrid.py +++ b/tests/models/dpt/test_modeling_dpt_hybrid.py @@ -186,14 +186,12 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Intel/dpt-hybrid-midas" def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -207,10 +205,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @@ -291,12 +285,6 @@ def test_initialization(self): msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) - @slow - def test_model_from_pretrained(self): - model_name = "Intel/dpt-hybrid-midas" - model = DPTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_raise_readout_type(self): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index 4162e189140932..fd73fb54497adb 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -139,6 +139,7 @@ class EfficientNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "google/efficientnet-b7" def setUp(self): self.model_tester = EfficientNetModelTester(self) @@ -150,9 +151,6 @@ def setUp(self): common_properties=["num_channels", "image_size", "hidden_dim"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="EfficientNet does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -165,10 +163,6 @@ def test_model_get_set_embeddings(self): def test_feed_forward_chunking(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -204,12 +198,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/efficientnet-b7" - model = EfficientNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @is_pipeline_test @require_vision @slow diff --git a/tests/models/electra/test_modeling_electra.py b/tests/models/electra/test_modeling_electra.py index f6cab710779079..c60f3dd051dc65 100644 --- a/tests/models/electra/test_modeling_electra.py +++ b/tests/models/electra/test_modeling_electra.py @@ -403,6 +403,7 @@ class ElectraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) else {} ) fx_compatible = True + pretrained_checkpoint = "google/electra-small-generator" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -419,9 +420,6 @@ def setUp(self): self.model_tester = ElectraModelTester(self) self.config_tester = ConfigTester(self, config_class=ElectraConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_electra_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_model(*config_and_inputs) @@ -460,12 +458,6 @@ def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_electra_for_multiple_choice(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/electra-small-generator" - model = ElectraModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_electra_for_causal_lm(*config_and_inputs) diff --git a/tests/models/encodec/test_modeling_encodec.py b/tests/models/encodec/test_modeling_encodec.py index cff297be8e0002..11f33da8056237 100644 --- a/tests/models/encodec/test_modeling_encodec.py +++ b/tests/models/encodec/test_modeling_encodec.py @@ -123,7 +123,7 @@ def get_config(self): codebook_size=self.codebook_size, ) - def create_and_check_model_forward(self, config, inputs_dict): + def create_and_check_model(self, config, inputs_dict): model = EncodecModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] @@ -158,9 +158,6 @@ def setUp(self): self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False ) - def test_config(self): - self.config_tester.run_common_tests() - def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) diff --git a/tests/models/ernie/test_modeling_ernie.py b/tests/models/ernie/test_modeling_ernie.py index 232d9176034467..ede4ec89eace77 100644 --- a/tests/models/ernie/test_modeling_ernie.py +++ b/tests/models/ernie/test_modeling_ernie.py @@ -457,6 +457,7 @@ class ErnieModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi else {} ) fx_compatible = False + pretrained_checkpoint = "nghuyong/ernie-1.0-base-zh" # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -476,13 +477,6 @@ def setUp(self): self.model_tester = ErnieModelTester(self) self.config_tester = ConfigTester(self, config_class=ErnieConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -566,12 +560,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "nghuyong/ernie-1.0-base-zh" - model = ErnieModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_accelerator def test_torchscript_device_change(self): diff --git a/tests/models/esm/test_modeling_esm.py b/tests/models/esm/test_modeling_esm.py index 56a7e4d0c67fa1..0b3e5b58240a12 100644 --- a/tests/models/esm/test_modeling_esm.py +++ b/tests/models/esm/test_modeling_esm.py @@ -209,18 +209,12 @@ class EsmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) test_sequence_classification_problem_types = True model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "facebook/esm2_t6_8M_UR50D" def setUp(self): self.model_tester = EsmModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -239,12 +233,6 @@ def test_esm_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/esm2_t6_8M_UR50D" - model = EsmModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 diff --git a/tests/models/esm/test_modeling_esmfold.py b/tests/models/esm/test_modeling_esmfold.py index 5c05efb03f2fdd..660793310d009e 100644 --- a/tests/models/esm/test_modeling_esmfold.py +++ b/tests/models/esm/test_modeling_esmfold.py @@ -177,13 +177,6 @@ def setUp(self): self.model_tester = EsmFoldModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Does not support attention outputs") def test_attention_outputs(self): pass diff --git a/tests/models/falcon/test_modeling_falcon.py b/tests/models/falcon/test_modeling_falcon.py index f6c28344754ee9..5ff1c963a10396 100644 --- a/tests/models/falcon/test_modeling_falcon.py +++ b/tests/models/falcon/test_modeling_falcon.py @@ -320,13 +320,6 @@ def setUp(self): self.model_tester = FalconModelTester(self) self.config_tester = ConfigTester(self, config_class=FalconConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_position_embedding_types(self): config, *inputs = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: diff --git a/tests/models/falcon_mamba/test_modeling_falcon_mamba.py b/tests/models/falcon_mamba/test_modeling_falcon_mamba.py index 893132f4337dd4..194d3c9c7cd054 100644 --- a/tests/models/falcon_mamba/test_modeling_falcon_mamba.py +++ b/tests/models/falcon_mamba/test_modeling_falcon_mamba.py @@ -250,7 +250,7 @@ def prepare_config_and_inputs_for_common(self): not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @require_torch -# Copied from transformers.tests.models.mamba.MambaModelTest with Mamba->Falcon,mamba->falcon_mamba,FalconMambaCache->MambaCache +# Copied from transformers.tests.models.mamba.MambaModelTest with Mamba->Falcon,mamba->falcon_mamba,FalconMambaCache->MambaCache,hf-internal-testing/mamba-130m->tiiuae/falcon-mamba-7b class FalconMambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FalconMambaModel, FalconMambaForCausalLM) if is_torch_available() else () all_generative_model_classes = (FalconMambaForCausalLM,) if is_torch_available() else () @@ -295,9 +295,6 @@ def assertInterval(self, member, container, msg=None): standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) - def test_config(self): - self.config_tester.run_common_tests() - @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -366,14 +363,6 @@ def test_initialization(self): # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) - @slow - # Ignore copy - def test_model_from_pretrained(self): - model = FalconMambaModel.from_pretrained( - "tiiuae/falcon-mamba-7b", torch_dtype=torch.float16, low_cpu_mem_usage=True - ) - self.assertIsNotNone(model) - def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py b/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py index 5191105bc2a1f2..d5b257c2f1fc71 100644 --- a/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py +++ b/tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py @@ -126,18 +126,12 @@ class FastSpeech2ConformerModelTest(ModelTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False is_encoder_decoder = True + pretrained_checkpoint = "espnet/fastspeech2_conformer" def setUp(self): self.model_tester = FastSpeech2ConformerModelTester(self) self.config_tester = ConfigTester(self, config_class=FastSpeech2ConformerConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) @@ -334,11 +328,6 @@ def test_attention_outputs(self): [self.model_tester.num_attention_heads, seq_len, seq_len], ) - @slow - def test_model_from_pretrained(self): - model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer") - self.assertIsNotNone(model) - @unittest.skip(reason="FastSpeech2Conformer does not accept inputs_embeds") def test_inputs_embeds(self): pass @@ -549,14 +538,11 @@ class FastSpeech2ConformerWithHifiGanTest(ModelTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False is_encoder_decoder = True + pretrained_checkpoint = "espnet/fastspeech2_conformer" def setUp(self): self.model_tester = FastSpeech2ConformerWithHifiGanTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) @@ -756,11 +742,6 @@ def test_attention_outputs(self): [self.model_tester.num_attention_heads, seq_len, seq_len], ) - @slow - def test_model_from_pretrained(self): - model = FastSpeech2ConformerModel.from_pretrained("espnet/fastspeech2_conformer") - self.assertIsNotNone(model) - @unittest.skip(reason="FastSpeech2Conformer does not accept inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/flaubert/test_modeling_flaubert.py b/tests/models/flaubert/test_modeling_flaubert.py index 7e0ef420917955..e7a4e92f4950a9 100644 --- a/tests/models/flaubert/test_modeling_flaubert.py +++ b/tests/models/flaubert/test_modeling_flaubert.py @@ -157,7 +157,7 @@ def get_config(self): use_proj=self.use_proj, ) - def create_and_check_flaubert_model( + def create_and_check_model( self, config, input_ids, @@ -389,6 +389,7 @@ class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase if is_torch_available() and is_sacremoses_available() else {} ) + pretrained_checkpoint = "flaubert/flaubert_small_cased" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -425,9 +426,6 @@ def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) @@ -464,12 +462,6 @@ def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "flaubert/flaubert_small_cased" - model = FlaubertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_accelerator def test_torchscript_device_change(self): diff --git a/tests/models/flava/test_modeling_flava.py b/tests/models/flava/test_modeling_flava.py index d8c8f385e9ce11..a3130ce3ab000c 100644 --- a/tests/models/flava/test_modeling_flava.py +++ b/tests/models/flava/test_modeling_flava.py @@ -168,14 +168,12 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/flava-full" def setUp(self): self.model_tester = FlavaImageModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip("Flava does not use input_ids") def test_inputs_embeds(self): pass @@ -201,10 +199,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True @@ -330,12 +324,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "facebook/flava-full" - model = FlavaImageModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class FlavaTextModelTester: def __init__( @@ -449,18 +437,12 @@ class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_head_masking = False test_torchscript = False + pretrained_checkpoint = "facebook/flava-full" def setUp(self): self.model_tester = FlavaTextModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -494,12 +476,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "facebook/flava-full" - model = FlavaTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class FlavaMultimodalModelTester: def __init__( @@ -596,6 +572,7 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): test_head_masking = False test_resize_embeddings = False test_torchscript = False + pretrained_checkpoint = "facebook/flava-full" def setUp(self): self.model_tester = FlavaMultimodalModelTester(self) @@ -603,13 +580,6 @@ def setUp(self): self, config_class=FlavaMultimodalConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -658,12 +628,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "facebook/flava-full" - model = FlavaMultimodalModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class FlavaImageCodebookTester: def __init__( @@ -720,15 +684,12 @@ class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_torchscript = False has_attentions = False + pretrained_checkpoint = "facebook/flava-full" def setUp(self): self.model_tester = FlavaImageCodebookTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageCodebookConfig, has_text_modality=False) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -793,12 +754,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "facebook/flava-full" - model = FlavaImageCodebook.from_pretrained(model_name) - self.assertIsNotNone(model) - class FlavaModelTester: model_class = FlavaModel @@ -838,9 +793,6 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test - def test_config(self): - self.config_tester.run_common_tests() - def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() @@ -928,14 +880,11 @@ class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "facebook/flava-full" def setUp(self): self.model_tester = self.class_for_tester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="tested in individual model tests") def test_hidden_states_output(self): pass @@ -1078,13 +1027,6 @@ def test_load_image_text_config(self): multimodal_config = FlavaMultimodalConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.multimodal_config.to_dict(), multimodal_config.to_dict()) - # overwrite from common since FlavaModel/TFFlavaModel return FLAVAOutput/TFFLAVAOutput - @slow - def test_model_from_pretrained(self): - model_name = "facebook/flava-full" - model = FlavaModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class FlavaForPreTrainingTester(FlavaModelTester): model_class = FlavaForPreTraining diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py index 826bf485711017..8de774d7b15ac6 100644 --- a/tests/models/fnet/test_modeling_fnet.py +++ b/tests/models/fnet/test_modeling_fnet.py @@ -295,6 +295,7 @@ class FNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): # Skip Tests test_pruning = False test_head_masking = False + pretrained_checkpoint = "google/fnet-base" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -429,13 +430,6 @@ def setUp(self): self.model_tester = FNetModelTester(self) self.config_tester = FNetConfigTester(self, config_class=FNetConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @@ -460,12 +454,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/fnet-base" - model = FNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class FNetModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/focalnet/test_modeling_focalnet.py b/tests/models/focalnet/test_modeling_focalnet.py index 48a33fd96e52a4..e72e46beedb03f 100644 --- a/tests/models/focalnet/test_modeling_focalnet.py +++ b/tests/models/focalnet/test_modeling_focalnet.py @@ -247,6 +247,7 @@ class FocalNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "microsoft/focalnet-tiny" def setUp(self): self.model_tester = FocalNetModelTester(self) @@ -258,13 +259,6 @@ def setUp(self): common_properties=["image_size", "patch_size", "num_channels", "hidden_sizes"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -381,12 +375,6 @@ def test_hidden_states_output_with_padding(self): config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/focalnet-tiny" - model = FocalNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/fsmt/test_modeling_fsmt.py b/tests/models/fsmt/test_modeling_fsmt.py index af95e0dca89584..aa29cef4adce57 100644 --- a/tests/models/fsmt/test_modeling_fsmt.py +++ b/tests/models/fsmt/test_modeling_fsmt.py @@ -190,9 +190,6 @@ def setUp(self): config["vocab_size"] = 99 # no such thing in FSMT self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) - def test_config(self): - self.config_tester.run_common_tests() - # XXX: override test_model_get_set_embeddings / different Embedding type def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/funnel/test_modeling_funnel.py b/tests/models/funnel/test_modeling_funnel.py index e46e5dc58de63f..062479aa25a563 100644 --- a/tests/models/funnel/test_modeling_funnel.py +++ b/tests/models/funnel/test_modeling_funnel.py @@ -394,13 +394,6 @@ def setUp(self): self.model_tester = FunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @@ -442,9 +435,6 @@ def setUp(self): self.model_tester = FunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) diff --git a/tests/models/gemma/test_modeling_gemma.py b/tests/models/gemma/test_modeling_gemma.py index a02541d585447c..bd7893606eed0c 100644 --- a/tests/models/gemma/test_modeling_gemma.py +++ b/tests/models/gemma/test_modeling_gemma.py @@ -331,13 +331,6 @@ def setUp(self): self.model_tester = GemmaModelTester(self) self.config_tester = ConfigTester(self, config_class=GemmaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index 33da9e26cba03d..4c9b8c9243566e 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -130,14 +130,12 @@ class GitVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "microsoft/git-base" def setUp(self): self.model_tester = GitVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=GitVisionConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="GIT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -163,10 +161,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -195,12 +189,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/git-base" - model = GitVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class GitModelTester: def __init__( @@ -407,6 +395,7 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, ) fx_compatible = False test_torchscript = False + pretrained_checkpoint = "microsoft/git-base" # special case for GitForCausalLM model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -425,13 +414,6 @@ def setUp(self): self.model_tester = GitModelTester(self) self.config_tester = ConfigTester(self, config_class=GitConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @@ -497,12 +479,6 @@ def _check_hidden_states_for_generate( [expected_shape] * len(iter_hidden_states), ) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/git-base" - model = GitModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="GIT has pixel values as additional input") def test_beam_search_generate_dict_outputs_use_cache(self): pass diff --git a/tests/models/glpn/test_modeling_glpn.py b/tests/models/glpn/test_modeling_glpn.py index 81e95ab244f9aa..fdfae5ff3ee1ad 100644 --- a/tests/models/glpn/test_modeling_glpn.py +++ b/tests/models/glpn/test_modeling_glpn.py @@ -152,18 +152,12 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False test_resize_embeddings = False + pretrained_checkpoint = "vinvino02/glpn-kitti" def setUp(self): self.model_tester = GLPNModelTester(self) self.config_tester = GLPNConfigTester(self, config_class=GLPNConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @@ -305,12 +299,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "vinvino02/glpn-kitti" - model = GLPNModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/gpt2/test_modeling_gpt2.py b/tests/models/gpt2/test_modeling_gpt2.py index 3f96c20ab2dbd9..41d0194517c588 100644 --- a/tests/models/gpt2/test_modeling_gpt2.py +++ b/tests/models/gpt2/test_modeling_gpt2.py @@ -511,6 +511,7 @@ class GPT2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin fx_compatible = True test_missing_keys = False test_model_parallel = True + pretrained_checkpoint = "openai-community/gpt2" # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -545,9 +546,6 @@ def tearDown(self): gc.collect() backend_empty_cache(torch_device) - def test_config(self): - self.config_tester.run_common_tests() - def test_gpt2_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_model(*config_and_inputs) @@ -741,12 +739,6 @@ def test_batch_generation_2heads(self): self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) - @slow - def test_model_from_pretrained(self): - model_name = "openai-community/gpt2" - model = GPT2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class GPT2ModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py index 9d7750f5cf20cc..cef766e77e8ffd 100644 --- a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py +++ b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py @@ -426,9 +426,6 @@ def tearDown(self): gc.collect() - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="MQA models does not support retain_grad") def test_retain_grad_hidden_states_attentions(self): pass diff --git a/tests/models/gpt_neo/test_modeling_gpt_neo.py b/tests/models/gpt_neo/test_modeling_gpt_neo.py index 245fee4b71f1c4..8601ec7a7b4b62 100644 --- a/tests/models/gpt_neo/test_modeling_gpt_neo.py +++ b/tests/models/gpt_neo/test_modeling_gpt_neo.py @@ -393,6 +393,7 @@ class GPTNeoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix test_missing_keys = False test_pruning = False test_model_parallel = False + pretrained_checkpoint = "EleutherAI/gpt-neo-1.3B" # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -403,9 +404,6 @@ def setUp(self): self.model_tester = GPTNeoModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_gpt_neo_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_neo_model(*config_and_inputs) @@ -567,9 +565,3 @@ def test_batch_generation(self): ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) - - @slow - def test_model_from_pretrained(self): - model_name = "EleutherAI/gpt-neo-1.3B" - model = GPTNeoModel.from_pretrained(model_name) - self.assertIsNotNone(model) diff --git a/tests/models/gpt_neox/test_modeling_gpt_neox.py b/tests/models/gpt_neox/test_modeling_gpt_neox.py index 196f873696eb70..10aacf2b94ab75 100644 --- a/tests/models/gpt_neox/test_modeling_gpt_neox.py +++ b/tests/models/gpt_neox/test_modeling_gpt_neox.py @@ -291,13 +291,6 @@ def setUp(self): self.model_tester = GPTNeoXModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXConfig, hidden_size=64, num_attention_heads=8) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(config, input_ids, input_mask) - def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) diff --git a/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py index 784323afefdc3f..5dee67f751e7fa 100644 --- a/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py +++ b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py @@ -213,13 +213,6 @@ def setUp(self): self.model_tester = GPTNeoXJapaneseModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(config, input_ids, input_mask) - def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) diff --git a/tests/models/gptj/test_modeling_gptj.py b/tests/models/gptj/test_modeling_gptj.py index 71c121dbaa5a9e..12627011fa56b3 100644 --- a/tests/models/gptj/test_modeling_gptj.py +++ b/tests/models/gptj/test_modeling_gptj.py @@ -367,6 +367,7 @@ class GPTJModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin test_missing_keys = False test_model_parallel = False test_head_masking = False + pretrained_checkpoint = "EleutherAI/gpt-j-6B" @unittest.skipIf( not is_torch_greater_or_equal_than_1_12, reason="PR #22069 made changes that require torch v1.12+." @@ -405,9 +406,6 @@ def setUp(self): self.model_tester = GPTJModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTJConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_gptj_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gptj_model(*config_and_inputs) @@ -492,12 +490,6 @@ def test_batch_generation(self): self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) - @slow - def test_model_from_pretrained(self): - model_name = "EleutherAI/gpt-j-6B" - model = GPTJModel.from_pretrained(model_name, revision="float16", torch_dtype=torch.float16) - self.assertIsNotNone(model) - @require_flash_attn @require_torch_gpu @require_bitsandbytes diff --git a/tests/models/granite/test_modeling_granite.py b/tests/models/granite/test_modeling_granite.py index 0f4d7640a1bb7d..d9334f321e4b90 100644 --- a/tests/models/granite/test_modeling_granite.py +++ b/tests/models/granite/test_modeling_granite.py @@ -310,13 +310,6 @@ def setUp(self): self.model_tester = GraniteModelTester(self) self.config_tester = ConfigTester(self, config_class=GraniteConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/grounding_dino/test_modeling_grounding_dino.py b/tests/models/grounding_dino/test_modeling_grounding_dino.py index c6e9671dd59ae0..e6334619476d96 100644 --- a/tests/models/grounding_dino/test_modeling_grounding_dino.py +++ b/tests/models/grounding_dino/test_modeling_grounding_dino.py @@ -253,13 +253,6 @@ def setUp(self): common_properties=["d_model", "encoder_attention_heads", "decoder_attention_heads"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_object_detection_head_model(*config_and_inputs) diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py index ce31bc44a611d2..867ad813962b48 100644 --- a/tests/models/groupvit/test_modeling_groupvit.py +++ b/tests/models/groupvit/test_modeling_groupvit.py @@ -148,6 +148,7 @@ class GroupViTVisionModelTest(ModelTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "nvidia/groupvit-gcc-yfcc" def setUp(self): self.model_tester = GroupViTVisionModelTester(self) @@ -155,9 +156,6 @@ def setUp(self): self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -195,10 +193,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True @@ -350,12 +344,6 @@ def test_retain_grad_hidden_states_attentions(self): if self.has_attentions: self.assertIsNone(attentions.grad) - @slow - def test_model_from_pretrained(self): - model_name = "nvidia/groupvit-gcc-yfcc" - model = GroupViTVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class GroupViTTextModelTester: def __init__( @@ -448,18 +436,12 @@ class GroupViTTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (GroupViTTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False + pretrained_checkpoint = "nvidia/groupvit-gcc-yfcc" def setUp(self): self.model_tester = GroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -492,12 +474,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "nvidia/groupvit-gcc-yfcc" - model = GroupViTTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class GroupViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -556,14 +532,11 @@ class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "nvidia/groupvit-gcc-yfcc" def setUp(self): self.model_tester = GroupViTModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @@ -706,12 +679,6 @@ def test_load_vision_text_config(self): text_config = GroupViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "nvidia/groupvit-gcc-yfcc" - model = GroupViTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/hiera/test_modeling_hiera.py b/tests/models/hiera/test_modeling_hiera.py index b118d6db5af61a..b43d75a91c02c5 100644 --- a/tests/models/hiera/test_modeling_hiera.py +++ b/tests/models/hiera/test_modeling_hiera.py @@ -250,6 +250,7 @@ class HieraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/hiera-tiny-224-hf" def setUp(self): self.model_tester = HieraModelTester(self) @@ -493,10 +494,6 @@ def test_model_common_attributes(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -509,12 +506,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - for model_name in ["facebook/hiera-tiny-224-hf"]: - model = HieraModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/hubert/test_modeling_hubert.py b/tests/models/hubert/test_modeling_hubert.py index 86f2b4119324ae..4f0c8b847044e9 100644 --- a/tests/models/hubert/test_modeling_hubert.py +++ b/tests/models/hubert/test_modeling_hubert.py @@ -318,18 +318,12 @@ class HubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): fx_compatible = True test_pruning = False test_headmasking = False + pretrained_checkpoint = "facebook/hubert-base-ls960" def setUp(self): self.model_tester = HubertModelTester(self) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @@ -565,17 +559,13 @@ def _mock_init_weights(self, module): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = HubertModel.from_pretrained("facebook/hubert-base-ls960") - self.assertIsNotNone(model) - @require_torch class HubertRobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else () test_pruning = False test_headmasking = False + pretrained_checkpoint = "facebook/hubert-large-ls960-ft" def setUp(self): self.model_tester = HubertModelTester( @@ -583,13 +573,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) @@ -713,11 +696,6 @@ def _mock_init_weights(self, module): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft") - self.assertIsNotNone(model) - @require_torch class HubertUtilsTest(unittest.TestCase): diff --git a/tests/models/ibert/test_modeling_ibert.py b/tests/models/ibert/test_modeling_ibert.py index 3918b3efeacc60..480d0e94f74923 100644 --- a/tests/models/ibert/test_modeling_ibert.py +++ b/tests/models/ibert/test_modeling_ibert.py @@ -254,18 +254,12 @@ class IBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) + pretrained_checkpoint = "kssteven/ibert-roberta-base" def setUp(self): self.model_tester = IBertModelTester(self) self.config_tester = ConfigTester(self, config_class=IBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # I-BERT only supports absolute embedding @@ -289,12 +283,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "kssteven/ibert-roberta-base" - model = IBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index 0197ebcaff5388..c168883b9efc63 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -327,6 +327,7 @@ class IdeficsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_pruning = False test_headmasking = False test_torchscript = False + pretrained_checkpoint = "HuggingFaceM4/idefics-9b" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) @@ -353,9 +354,6 @@ def setUp(self): self.model_tester = IdeficsModelTester(self) self.config_tester = ConfigTester(self, config_class=IdeficsConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_model_single_image(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=1, interpolate_pos_encoding=False, image_expansion=0 @@ -565,12 +563,6 @@ def test_pt_tf_model_equivalence(self, allow_missing_keys=False): self.has_attentions = False super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) - @slow - def test_model_from_pretrained(self): - model_name = "HuggingFaceM4/idefics-9b" - model = IdeficsModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch_sdpa @slow @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py index d8ceed6885f0c8..9469cad9d4708c 100644 --- a/tests/models/imagegpt/test_modeling_imagegpt.py +++ b/tests/models/imagegpt/test_modeling_imagegpt.py @@ -237,6 +237,8 @@ class ImageGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterM else {} ) test_missing_keys = False + input_name = "pixel_values" + pretrained_checkpoint = "openai/imagegpt-small" # as ImageGPTForImageClassification isn't included in any auto mapping, we add labels here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -261,9 +263,6 @@ def setUp(self): self.model_tester = ImageGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=ImageGPTConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_imagegpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_imagegpt_model(*config_and_inputs) @@ -294,12 +293,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "openai/imagegpt-small" - model = ImageGPTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/informer/test_modeling_informer.py b/tests/models/informer/test_modeling_informer.py index 10cb2b71824e99..a12996048ee974 100644 --- a/tests/models/informer/test_modeling_informer.py +++ b/tests/models/informer/test_modeling_informer.py @@ -208,9 +208,6 @@ def setUp(self): prediction_length=self.model_tester.prediction_length, ) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/instructblip/test_modeling_instructblip.py b/tests/models/instructblip/test_modeling_instructblip.py index 8292567334bf3b..10c71217b286a8 100644 --- a/tests/models/instructblip/test_modeling_instructblip.py +++ b/tests/models/instructblip/test_modeling_instructblip.py @@ -151,6 +151,7 @@ class InstructBlipVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Salesforce/instructblip-flan-t5-xl" def setUp(self): self.model_tester = InstructBlipVisionModelTester(self) @@ -158,9 +159,6 @@ def setUp(self): self, config_class=InstructBlipVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="InstructBLIP's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -186,10 +184,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="InstructBlipVisionModel is an internal building block, doesn't support standalone training") def test_training(self): pass @@ -218,12 +212,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/instructblip-flan-t5-xl" - model = InstructBlipVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class InstructBlipQFormerModelTester: def __init__( @@ -460,6 +448,7 @@ class InstructBlipForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, Gene test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/instructblip-flan-t5-xl" def setUp(self): self.model_tester = InstructBlipForConditionalGenerationDecoderOnlyModelTester(self) @@ -523,12 +512,6 @@ def test_load_vision_qformer_text_config(self): qformer_config = InstructBlipQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/instructblip-flan-t5-xl" - model = InstructBlipForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/instructblipvideo/test_modeling_instructblipvideo.py b/tests/models/instructblipvideo/test_modeling_instructblipvideo.py index 8a9326c22ac11c..b5d8da403193d9 100644 --- a/tests/models/instructblipvideo/test_modeling_instructblipvideo.py +++ b/tests/models/instructblipvideo/test_modeling_instructblipvideo.py @@ -157,6 +157,7 @@ class InstructBlipVideoVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Salesforce/instructblip-vicuna-7b" def setUp(self): self.model_tester = InstructBlipVideoVisionModelTester(self) @@ -164,9 +165,6 @@ def setUp(self): self, config_class=InstructBlipVideoVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="InstructBlipVideo's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -196,10 +194,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip( reason="InstructBlipVideoVisionModel is an internal building block, doesn't support standalone training" ) @@ -232,12 +226,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/instructblip-vicuna-7b" - model = InstructBlipVideoVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class InstructBlipVideoQFormerModelTester: def __init__( @@ -481,6 +469,7 @@ class InstructBlipVideoForConditionalGenerationDecoderOnlyTest( test_resize_embeddings = False test_attention_outputs = False test_torchscript = False + pretrained_checkpoint = "Salesforce/instructblip-vicuna-7b" def setUp(self): self.model_tester = InstructBlipVideoForConditionalGenerationDecoderOnlyModelTester(self) @@ -544,12 +533,6 @@ def test_load_vision_qformer_text_config(self): qformer_config = InstructBlipVideoQFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "Salesforce/instructblip-vicuna-7b" - model = InstructBlipVideoForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_video(): diff --git a/tests/models/jamba/test_modeling_jamba.py b/tests/models/jamba/test_modeling_jamba.py index 6e1a2cf2cf9c44..aa69d8a1f99874 100644 --- a/tests/models/jamba/test_modeling_jamba.py +++ b/tests/models/jamba/test_modeling_jamba.py @@ -346,13 +346,6 @@ def setUp(self): self.model_tester = JambaModelTester(self) self.config_tester = JambaConfigTester(self, config_class=JambaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_casual_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) diff --git a/tests/models/jetmoe/test_modeling_jetmoe.py b/tests/models/jetmoe/test_modeling_jetmoe.py index 50fd7a27e1e6d1..0dc9bc2a157c2e 100644 --- a/tests/models/jetmoe/test_modeling_jetmoe.py +++ b/tests/models/jetmoe/test_modeling_jetmoe.py @@ -319,15 +319,6 @@ def setUp(self): self, config_class=JetMoeConfig, common_properties=["hidden_size", "num_hidden_layers"] ) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py index 913111c0a088e1..ea3ade1968784a 100644 --- a/tests/models/kosmos2/test_modeling_kosmos2.py +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -257,6 +257,7 @@ class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "microsoft/kosmos-2-patch14-224" # TODO: `image-to-text` pipeline for this model needs Processor. def is_pipeline_test_to_skip( @@ -299,10 +300,6 @@ def test_initialization(self): msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -421,12 +418,6 @@ def check_same_values(layer_1, layer_2): # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/kosmos-2-patch14-224" - model = Kosmos2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index 38dd86eb8b82e3..ef7ab469cd851e 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -250,13 +250,6 @@ def setUp(self): self.model_tester = LayoutLMModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py index 94cc4e95432c11..7bf0edaeca4e73 100644 --- a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py @@ -281,18 +281,12 @@ class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa if is_torch_available() else {} ) + pretrained_checkpoint = "microsoft/layoutlmv2-base-uncased" def setUp(self): self.model_tester = LayoutLMv2ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @require_torch_multi_gpu @unittest.skip( reason=( @@ -426,12 +420,6 @@ def check_hidden_states_output(inputs_dict, config, model_class): def test_model_is_small(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/layoutlmv2-base-uncased" - model = LayoutLMv2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/layoutlmv3/test_modeling_layoutlmv3.py b/tests/models/layoutlmv3/test_modeling_layoutlmv3.py index 6cb93d8b427be4..454c3186f51145 100644 --- a/tests/models/layoutlmv3/test_modeling_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_modeling_layoutlmv3.py @@ -289,6 +289,7 @@ class LayoutLMv3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa if is_torch_available() else {} ) + pretrained_checkpoint = "microsoft/layoutlmv3-base" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -340,13 +341,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -365,12 +359,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/layoutlmv3-base" - model = LayoutLMv3Model.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/led/test_modeling_led.py b/tests/models/led/test_modeling_led.py index a4d81ab2e1c6db..3ecf8ae004f33a 100644 --- a/tests/models/led/test_modeling_led.py +++ b/tests/models/led/test_modeling_led.py @@ -313,9 +313,6 @@ def setUp(self): self.model_tester = LEDModelTester(self) self.config_tester = ConfigTester(self, config_class=LEDConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/levit/test_modeling_levit.py b/tests/models/levit/test_modeling_levit.py index 6199d9cdfcfd2e..b3472af62cfe24 100644 --- a/tests/models/levit/test_modeling_levit.py +++ b/tests/models/levit/test_modeling_levit.py @@ -187,6 +187,7 @@ class LevitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "facebook/levit-128S" def setUp(self): self.model_tester = LevitModelTester(self) @@ -194,9 +195,6 @@ def setUp(self): self, config_class=LevitConfig, has_text_modality=False, common_properties=["image_size", "num_channels"] ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Levit does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -270,10 +268,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @@ -371,12 +365,6 @@ def test_problem_types(self): loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "facebook/levit-128S" - model = LevitModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/lilt/test_modeling_lilt.py b/tests/models/lilt/test_modeling_lilt.py index 0d0ed720c53a2f..6a0f513b0edcfd 100644 --- a/tests/models/lilt/test_modeling_lilt.py +++ b/tests/models/lilt/test_modeling_lilt.py @@ -242,6 +242,7 @@ class LiltModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ) fx_compatible = False test_pruning = False + pretrained_checkpoint = "SCUT-DLVCLab/lilt-roberta-en-base" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -253,13 +254,6 @@ def setUp(self): self.model_tester = LiltModelTester(self) self.config_tester = ConfigTester(self, config_class=LiltConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -292,12 +286,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "SCUT-DLVCLab/lilt-roberta-en-base" - model = LiltModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch @slow diff --git a/tests/models/llama/test_modeling_llama.py b/tests/models/llama/test_modeling_llama.py index a21665c822f2f9..c3c2930c966d65 100644 --- a/tests/models/llama/test_modeling_llama.py +++ b/tests/models/llama/test_modeling_llama.py @@ -323,13 +323,6 @@ def setUp(self): self.model_tester = LlamaModelTester(self) self.config_tester = ConfigTester(self, config_class=LlamaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/longformer/test_modeling_longformer.py b/tests/models/longformer/test_modeling_longformer.py index e7f2f67cc23236..a9c4241ae6f2ff 100644 --- a/tests/models/longformer/test_modeling_longformer.py +++ b/tests/models/longformer/test_modeling_longformer.py @@ -349,13 +349,6 @@ def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) diff --git a/tests/models/longt5/test_modeling_flax_longt5.py b/tests/models/longt5/test_modeling_flax_longt5.py index 9449cfa5e35a55..bc51bd832294b4 100644 --- a/tests/models/longt5/test_modeling_flax_longt5.py +++ b/tests/models/longt5/test_modeling_flax_longt5.py @@ -244,13 +244,6 @@ def setUp(self): self.model_tester = FlaxLongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work diff --git a/tests/models/longt5/test_modeling_longt5.py b/tests/models/longt5/test_modeling_longt5.py index c0cf21b2369d0a..968786a3c48c88 100644 --- a/tests/models/longt5/test_modeling_longt5.py +++ b/tests/models/longt5/test_modeling_longt5.py @@ -518,22 +518,16 @@ class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True + pretrained_checkpoint = "google/long-t5-local-base" def setUp(self): self.model_tester = LongT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) @@ -586,12 +580,6 @@ def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/long-t5-local-base" - model = LongT5Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() @@ -1032,13 +1020,6 @@ def setUp(self): self.model_tester = LongT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=LongT5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="has_attentions is set to False") diff --git a/tests/models/luke/test_modeling_luke.py b/tests/models/luke/test_modeling_luke.py index a9a5d4f9387b6c..0d66e1983a8528 100644 --- a/tests/models/luke/test_modeling_luke.py +++ b/tests/models/luke/test_modeling_luke.py @@ -618,6 +618,7 @@ class LukeModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = True test_head_masking = True + pretrained_checkpoint = "studio-ousia/luke-base" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -690,19 +691,6 @@ def setUp(self): self.model_tester = LukeModelTester(self) self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "studio-ousia/luke-base" - model = LukeModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) diff --git a/tests/models/lxmert/test_modeling_lxmert.py b/tests/models/lxmert/test_modeling_lxmert.py index 1ff8c002618bff..e9c1d9debb91d6 100644 --- a/tests/models/lxmert/test_modeling_lxmert.py +++ b/tests/models/lxmert/test_modeling_lxmert.py @@ -541,6 +541,7 @@ class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_head_masking = False test_pruning = False test_torchscript = False + pretrained_checkpoint = "unc-nlp/lxmert-base-uncased" # overwrite function because qa models takes different input label shape def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -562,9 +563,6 @@ def setUp(self): self.model_tester = LxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) @@ -581,13 +579,6 @@ def test_lxmert_question_answering_labels_resize(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.resize_lxmert_num_qa_labels(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "unc-nlp/lxmert-base-uncased" - model = LxmertModel.from_pretrained(model_name) - model.to(torch_device) - self.assertIsNotNone(model) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) diff --git a/tests/models/m2m_100/test_modeling_m2m_100.py b/tests/models/m2m_100/test_modeling_m2m_100.py index a29a9c8a9ec0dc..1537a0a386438b 100644 --- a/tests/models/m2m_100/test_modeling_m2m_100.py +++ b/tests/models/m2m_100/test_modeling_m2m_100.py @@ -271,9 +271,6 @@ def setUp(self): self.model_tester = M2M100ModelTester(self) self.config_tester = ConfigTester(self, config_class=M2M100Config) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/mamba/test_modeling_mamba.py b/tests/models/mamba/test_modeling_mamba.py index 3b4a18bb48ebf4..99418c874ed17e 100644 --- a/tests/models/mamba/test_modeling_mamba.py +++ b/tests/models/mamba/test_modeling_mamba.py @@ -256,6 +256,7 @@ class MambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi pipeline_model_mapping = ( {"feature-extraction": MambaModel, "text-generation": MambaForCausalLM} if is_torch_available() else {} ) + pretrained_checkpoint = "hf-internal-testing/mamba-130m" def setUp(self): self.model_tester = MambaModelTester(self) @@ -285,9 +286,6 @@ def assertInterval(self, member, container, msg=None): standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) - def test_config(self): - self.config_tester.run_common_tests() - @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -356,11 +354,6 @@ def test_initialization(self): # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) - @slow - def test_model_from_pretrained(self): - model = MambaModel.from_pretrained("hf-internal-testing/mamba-130m") - self.assertIsNotNone(model) - def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/marian/test_modeling_marian.py b/tests/models/marian/test_modeling_marian.py index aed5381fcc706a..980eb1d2274d12 100644 --- a/tests/models/marian/test_modeling_marian.py +++ b/tests/models/marian/test_modeling_marian.py @@ -258,9 +258,6 @@ def setUp(self): self.model_tester = MarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -881,9 +878,6 @@ def setUp( self.model_tester = MarianStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MarianConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/markuplm/test_modeling_markuplm.py b/tests/models/markuplm/test_modeling_markuplm.py index 71757385e87c91..dc7f1449a86b09 100644 --- a/tests/models/markuplm/test_modeling_markuplm.py +++ b/tests/models/markuplm/test_modeling_markuplm.py @@ -311,13 +311,6 @@ def setUp(self): self.model_tester = MarkupLMModelTester(self) self.config_tester = ConfigTester(self, config_class=MarkupLMConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index ba78cf9ce3f7d6..d410d689bd7299 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -204,14 +204,12 @@ class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_pruning = False test_head_masking = False test_missing_keys = False + pretrained_checkpoint = "facebook/mask2former-swin-small-coco-instance" def setUp(self): self.model_tester = Mask2FormerModelTester(self) self.config_tester = ConfigTester(self, config_class=Mask2FormerConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_mask2former_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_mask2former_model(config, **inputs, output_hidden_states=False) @@ -243,12 +241,6 @@ def test_resize_tokens_embeddings(self): def test_multi_gpu_data_parallel_forward(self): pass - @slow - def test_model_from_pretrained(self): - for model_name in ["facebook/mask2former-swin-small-coco-instance"]: - model = Mask2FormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index 025261841b300f..68eb88d574502c 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -209,6 +209,7 @@ class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa test_head_masking = False test_missing_keys = False zero_init_hidden_state = True + pretrained_checkpoint = "facebook/maskformer-swin-small-coco" def setUp(self): self.model_tester = MaskFormerModelTester(self) @@ -235,9 +236,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - def test_maskformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(config, **inputs, output_hidden_states=False) @@ -269,12 +267,6 @@ def test_resize_tokens_embeddings(self): def test_multi_gpu_data_parallel_forward(self): pass - @slow - def test_model_from_pretrained(self): - for model_name in ["facebook/maskformer-swin-small-coco"]: - model = MaskFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { diff --git a/tests/models/maskformer/test_modeling_maskformer_swin.py b/tests/models/maskformer/test_modeling_maskformer_swin.py index 513ac6f67b54ef..19bb65ae98afac 100644 --- a/tests/models/maskformer/test_modeling_maskformer_swin.py +++ b/tests/models/maskformer/test_modeling_maskformer_swin.py @@ -202,13 +202,6 @@ def setUp(self): def test_multi_gpu_data_parallel_forward(self): pass - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) diff --git a/tests/models/mbart/test_modeling_mbart.py b/tests/models/mbart/test_modeling_mbart.py index 9401d892daa39b..f846cb3b5d389c 100644 --- a/tests/models/mbart/test_modeling_mbart.py +++ b/tests/models/mbart/test_modeling_mbart.py @@ -263,9 +263,6 @@ def setUp(self): self.model_tester = MBartModelTester(self) self.config_tester = ConfigTester(self, config_class=MBartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -730,9 +727,6 @@ def setUp( self.model_tester = MBartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MBartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/megatron_bert/test_modeling_megatron_bert.py b/tests/models/megatron_bert/test_modeling_megatron_bert.py index ee6bedfd0ca413..bc2df877a7437d 100644 --- a/tests/models/megatron_bert/test_modeling_megatron_bert.py +++ b/tests/models/megatron_bert/test_modeling_megatron_bert.py @@ -317,9 +317,6 @@ def setUp(self): self.model_tester = MegatronBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MegatronBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_megatron_bert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*config_and_inputs) diff --git a/tests/models/mgp_str/test_modeling_mgp_str.py b/tests/models/mgp_str/test_modeling_mgp_str.py index 559a9e596840c6..609ffa4da5c79e 100644 --- a/tests/models/mgp_str/test_modeling_mgp_str.py +++ b/tests/models/mgp_str/test_modeling_mgp_str.py @@ -134,13 +134,6 @@ def setUp(self): self.model_tester = MgpstrModelTester(self) self.config_tester = ConfigTester(self, config_class=MgpstrConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="MgpstrModel does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/mistral/test_modeling_mistral.py b/tests/models/mistral/test_modeling_mistral.py index 0730f8ba444140..5f0b25fa30352b 100644 --- a/tests/models/mistral/test_modeling_mistral.py +++ b/tests/models/mistral/test_modeling_mistral.py @@ -328,13 +328,6 @@ def setUp(self): self.model_tester = MistralModelTester(self) self.config_tester = ConfigTester(self, config_class=MistralConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/mixtral/test_modeling_mixtral.py b/tests/models/mixtral/test_modeling_mixtral.py index db9641e3dcb2a9..0658f21804216b 100644 --- a/tests/models/mixtral/test_modeling_mixtral.py +++ b/tests/models/mixtral/test_modeling_mixtral.py @@ -328,13 +328,6 @@ def setUp(self): self.model_tester = MixtralModelTester(self) self.config_tester = ConfigTester(self, config_class=MixtralConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/mobilebert/test_modeling_mobilebert.py b/tests/models/mobilebert/test_modeling_mobilebert.py index d7a409427c9c51..993b74b7fcde3e 100644 --- a/tests/models/mobilebert/test_modeling_mobilebert.py +++ b/tests/models/mobilebert/test_modeling_mobilebert.py @@ -306,9 +306,6 @@ def setUp(self): self.model_tester = MobileBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_mobilebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*config_and_inputs) diff --git a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py index a04cfba45f645a..7d3e8ac5a33469 100644 --- a/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py @@ -154,14 +154,12 @@ class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "google/mobilenet_v1_1.0_224" def setUp(self): self.model_tester = MobileNetV1ModelTester(self) self.config_tester = MobileNetV1ConfigTester(self, config_class=MobileNetV1Config, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="MobileNetV1 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -174,10 +172,6 @@ def test_model_get_set_embeddings(self): def test_attention_outputs(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -208,12 +202,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/mobilenet_v1_1.0_224" - model = MobileNetV1Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @is_flaky(description="is_flaky https://github.com/huggingface/transformers/pull/31258") def test_batching_equivalence(self): super().test_batching_equivalence() diff --git a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py index 7df6cbd1196bc0..fe3f9062134f95 100644 --- a/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py @@ -205,14 +205,12 @@ class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "google/mobilenet_v2_1.4_224" def setUp(self): self.model_tester = MobileNetV2ModelTester(self) self.config_tester = MobileNetV2ConfigTester(self, config_class=MobileNetV2Config, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="MobileNetV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -225,10 +223,6 @@ def test_model_get_set_embeddings(self): def test_attention_outputs(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -263,12 +257,6 @@ def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/mobilenet_v2_1.4_224" - model = MobileNetV2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @is_flaky(description="is_flaky https://github.com/huggingface/transformers/issues/29516") def test_batching_equivalence(self): super().test_batching_equivalence() diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py index cd4cfa68e5dcde..d145a6827b1b29 100644 --- a/tests/models/mobilevit/test_modeling_mobilevit.py +++ b/tests/models/mobilevit/test_modeling_mobilevit.py @@ -193,6 +193,7 @@ class MobileViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas if is_torch_available() else {} ) + pretrained_checkpoint = "apple/mobilevit-small" test_pruning = False test_resize_embeddings = False @@ -203,9 +204,6 @@ def setUp(self): self.model_tester = MobileViTModelTester(self) self.config_tester = MobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="MobileViT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -218,10 +216,6 @@ def test_model_get_set_embeddings(self): def test_attention_outputs(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -268,12 +262,6 @@ def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "apple/mobilevit-small" - model = MobileViTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @is_flaky(description="is_flaky https://github.com/huggingface/transformers/issues/29516") def test_batching_equivalence(self): super().test_batching_equivalence() diff --git a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py index e2b565e4b9ce3e..9c34a9a6dc43d0 100644 --- a/tests/models/mobilevitv2/test_modeling_mobilevitv2.py +++ b/tests/models/mobilevitv2/test_modeling_mobilevitv2.py @@ -200,14 +200,12 @@ class MobileViTV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "apple/mobilevitv2-1.0-imagenet1k-256" def setUp(self): self.model_tester = MobileViTV2ModelTester(self) self.config_tester = MobileViTV2ConfigTester(self, config_class=MobileViTV2Config, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="MobileViTV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -225,10 +223,6 @@ def test_attention_outputs(self): def test_multi_gpu_data_parallel_forward(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -275,12 +269,6 @@ def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "apple/mobilevitv2-1.0-imagenet1k-256" - model = MobileViTV2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/mpnet/test_modeling_mpnet.py b/tests/models/mpnet/test_modeling_mpnet.py index 9f97f3c11b5838..5a92f4911b2f39 100644 --- a/tests/models/mpnet/test_modeling_mpnet.py +++ b/tests/models/mpnet/test_modeling_mpnet.py @@ -223,9 +223,6 @@ def setUp(self): self.model_tester = MPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) diff --git a/tests/models/mpt/test_modeling_mpt.py b/tests/models/mpt/test_modeling_mpt.py index 55919cbbf95941..12b26d0591830f 100644 --- a/tests/models/mpt/test_modeling_mpt.py +++ b/tests/models/mpt/test_modeling_mpt.py @@ -372,14 +372,12 @@ class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, if is_torch_available() else {} ) + pretrained_checkpoint = "mosaicml/mpt-7b" def setUp(self): self.model_tester = MptModelTester(self) self.config_tester = MptConfigTester(self, config_class=MptConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_mpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model(*config_and_inputs) @@ -426,12 +424,6 @@ def test_mpt_weight_initialization(self): def test_model_weights_reload_no_missing_tied_weights(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "mosaicml/mpt-7b" - model = MptModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @slow @require_torch_gpu diff --git a/tests/models/mra/test_modeling_mra.py b/tests/models/mra/test_modeling_mra.py index 4c839f5da10a68..17479fad7a3d07 100644 --- a/tests/models/mra/test_modeling_mra.py +++ b/tests/models/mra/test_modeling_mra.py @@ -310,18 +310,12 @@ class MraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) + pretrained_checkpoint = "uw-madison/mra-base-512-4" def setUp(self): self.model_tester = MraModelTester(self) self.config_tester = ConfigTester(self, config_class=MraConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -348,12 +342,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "uw-madison/mra-base-512-4" - model = MraModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="MRA does not output attentions") def test_attention_outputs(self): return diff --git a/tests/models/mt5/test_modeling_mt5.py b/tests/models/mt5/test_modeling_mt5.py index ec6ec6cd85c651..27aa6cfc76a170 100644 --- a/tests/models/mt5/test_modeling_mt5.py +++ b/tests/models/mt5/test_modeling_mt5.py @@ -574,6 +574,7 @@ class MT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, is_encoder_decoder = True # The small MT5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "google/mt5-small" def setUp(self): self.model_tester = MT5ModelTester(self) @@ -711,17 +712,10 @@ def flatten_output(output): # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() - def test_config(self): - self.config_tester.run_common_tests() - def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work @@ -831,12 +825,6 @@ def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) - @slow - def test_model_from_pretrained(self): - model_name = "google/mt5-small" - model = MT5Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() @@ -1039,13 +1027,6 @@ def setUp(self): self.model_tester = MT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=MT5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/musicgen/test_modeling_musicgen.py b/tests/models/musicgen/test_modeling_musicgen.py index a385a18b91c5d5..51df7b87fb57f9 100644 --- a/tests/models/musicgen/test_modeling_musicgen.py +++ b/tests/models/musicgen/test_modeling_musicgen.py @@ -188,9 +188,6 @@ def setUp(self): self.model_tester = MusicgenDecoderTester(self) self.config_tester = ConfigTester(self, config_class=MusicgenDecoderConfig, hidden_size=16) - def test_config(self): - self.config_tester.run_common_tests() - # special case for labels def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) diff --git a/tests/models/musicgen_melody/test_modeling_musicgen_melody.py b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py index e8584e238d3cd9..383add717cb75c 100644 --- a/tests/models/musicgen_melody/test_modeling_musicgen_melody.py +++ b/tests/models/musicgen_melody/test_modeling_musicgen_melody.py @@ -189,9 +189,6 @@ def setUp(self): self.model_tester = MusicgenMelodyDecoderTester(self) self.config_tester = ConfigTester(self, config_class=MusicgenMelodyDecoderConfig, hidden_size=16) - def test_config(self): - self.config_tester.run_common_tests() - # special case for labels # Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest._prepare_for_class def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): diff --git a/tests/models/mvp/test_modeling_mvp.py b/tests/models/mvp/test_modeling_mvp.py index 33c6d778448d19..c517d6ba5755a5 100644 --- a/tests/models/mvp/test_modeling_mvp.py +++ b/tests/models/mvp/test_modeling_mvp.py @@ -459,9 +459,6 @@ def setUp(self): self.model_tester = MvpModelTester(self) self.config_tester = ConfigTester(self, config_class=MvpConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -807,9 +804,6 @@ def setUp( self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MvpConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/nllb_moe/test_modeling_nllb_moe.py b/tests/models/nllb_moe/test_modeling_nllb_moe.py index d8dc3b6ef31130..9ec8d29172c3dd 100644 --- a/tests/models/nllb_moe/test_modeling_nllb_moe.py +++ b/tests/models/nllb_moe/test_modeling_nllb_moe.py @@ -275,9 +275,6 @@ def setUp(self): self.model_tester = NllbMoeModelTester(self) self.config_tester = ConfigTester(self, config_class=NllbMoeConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/nystromformer/test_modeling_nystromformer.py b/tests/models/nystromformer/test_modeling_nystromformer.py index 3d812ebf04a333..db4426f81b222f 100644 --- a/tests/models/nystromformer/test_modeling_nystromformer.py +++ b/tests/models/nystromformer/test_modeling_nystromformer.py @@ -242,18 +242,12 @@ class NystromformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Tes ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "uw-madison/nystromformer-512" def setUp(self): self.model_tester = NystromformerModelTester(self) self.config_tester = ConfigTester(self, config_class=NystromformerConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -280,12 +274,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "uw-madison/nystromformer-512" - model = NystromformerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class NystromformerModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/olmo/test_modeling_olmo.py b/tests/models/olmo/test_modeling_olmo.py index b74d0fdf03b8f6..a8467cc0887758 100644 --- a/tests/models/olmo/test_modeling_olmo.py +++ b/tests/models/olmo/test_modeling_olmo.py @@ -294,13 +294,6 @@ def setUp(self): self.model_tester = OlmoModelTester(self) self.config_tester = ConfigTester(self, config_class=OlmoConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OLMo does not support head pruning.") def test_headmasking(self): pass diff --git a/tests/models/olmoe/test_modeling_olmoe.py b/tests/models/olmoe/test_modeling_olmoe.py index 1ce231e0373152..cab9dfde91f17b 100644 --- a/tests/models/olmoe/test_modeling_olmoe.py +++ b/tests/models/olmoe/test_modeling_olmoe.py @@ -309,13 +309,6 @@ def setUp(self): self.model_tester = OlmoeModelTester(self) self.config_tester = ConfigTester(self, config_class=OlmoeConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OLMoE does not support head pruning.") def test_headmasking(self): pass diff --git a/tests/models/oneformer/test_modeling_oneformer.py b/tests/models/oneformer/test_modeling_oneformer.py index ac8f044c556828..fc4d48a0cd700b 100644 --- a/tests/models/oneformer/test_modeling_oneformer.py +++ b/tests/models/oneformer/test_modeling_oneformer.py @@ -244,6 +244,7 @@ class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas test_pruning = False test_head_masking = False test_missing_keys = False + pretrained_checkpoint = "shi-labs/oneformer_ade20k_swin_tiny" # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( @@ -258,9 +259,6 @@ def setUp(self): self.model_tester = OneFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=OneFormerConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_oneformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=False) @@ -323,12 +321,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "task_inputs"] self.assertListEqual(arg_names[:2], expected_arg_names) - @slow - def test_model_from_pretrained(self): - for model_name in ["shi-labs/oneformer_ade20k_swin_tiny"]: - model = OneFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { diff --git a/tests/models/openai/test_modeling_openai.py b/tests/models/openai/test_modeling_openai.py index 49e6d50bc4287a..b609cb9ef2a3e5 100644 --- a/tests/models/openai/test_modeling_openai.py +++ b/tests/models/openai/test_modeling_openai.py @@ -208,6 +208,7 @@ class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester if is_torch_available() else {} ) + pretrained_checkpoint = "openai-community/openai-gpt" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -248,9 +249,6 @@ def setUp(self): self.model_tester = OpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) @@ -267,12 +265,6 @@ def test_openai_gpt_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "openai-community/openai-gpt" - model = OpenAIGPTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/opt/test_modeling_opt.py b/tests/models/opt/test_modeling_opt.py index 83721f1281f4d8..ab9aa1f8c8367e 100644 --- a/tests/models/opt/test_modeling_opt.py +++ b/tests/models/opt/test_modeling_opt.py @@ -239,9 +239,6 @@ def setUp(self): self.model_tester = OPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OPTConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/owlv2/test_modeling_owlv2.py b/tests/models/owlv2/test_modeling_owlv2.py index 48070c7bb86c6b..40836a79766eb3 100644 --- a/tests/models/owlv2/test_modeling_owlv2.py +++ b/tests/models/owlv2/test_modeling_owlv2.py @@ -148,6 +148,7 @@ class Owlv2VisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/owlv2-base-patch16-ensemble" def setUp(self): self.model_tester = Owlv2VisionModelTester(self) @@ -155,9 +156,6 @@ def setUp(self): self, config_class=Owlv2VisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="OWLV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -183,10 +181,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @@ -215,12 +209,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/owlv2-base-patch16-ensemble" - model = Owlv2VisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTTextModelTester with OwlViT->Owlv2 class Owlv2TextModelTester: @@ -319,18 +307,12 @@ class Owlv2TextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "google/owlv2-base-patch16-ensemble" def setUp(self): self.model_tester = Owlv2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Owlv2TextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OwlV2 does not support training yet") def test_training(self): pass @@ -363,12 +345,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/owlv2-base-patch16-ensemble" - model = Owlv2TextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class Owlv2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -444,14 +420,11 @@ class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "google/owlv2-base-patch16-ensemble" def setUp(self): self.model_tester = Owlv2ModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -574,12 +547,6 @@ def test_load_vision_text_config(self): text_config = Owlv2TextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "google/owlv2-base-patch16-ensemble" - model = Owlv2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - # Copied from tests.models.owlvit.test_modeling_owlvit.OwlViTForObjectDetectionTester with OwlViT->Owlv2, OWL-ViT->OwlV2, OWLVIT->OWLV2 class Owlv2ForObjectDetectionTester: @@ -650,14 +617,11 @@ class Owlv2ForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "google/owlv2-base-patch16-ensemble" def setUp(self): self.model_tester = Owlv2ForObjectDetectionTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -773,12 +737,6 @@ def _create_and_check_torchscript(self, config, inputs_dict): self.assertTrue(models_equal) - @slow - def test_model_from_pretrained(self): - model_name = "google/owlv2-base-patch16-ensemble" - model = Owlv2ForObjectDetection.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index a08fae0bc6d10e..47d0f619c423d8 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -146,6 +146,7 @@ class OwlViTVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/owlvit-base-patch32" def setUp(self): self.model_tester = OwlViTVisionModelTester(self) @@ -153,9 +154,6 @@ def setUp(self): self, config_class=OwlViTVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="OWLVIT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -181,10 +179,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @@ -213,12 +207,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/owlvit-base-patch32" - model = OwlViTVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class OwlViTTextModelTester: def __init__( @@ -315,18 +303,12 @@ class OwlViTTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "google/owlvit-base-patch32" def setUp(self): self.model_tester = OwlViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=OwlViTTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="OWL-ViT does not support training yet") def test_training(self): pass @@ -359,12 +341,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/owlvit-base-patch32" - model = OwlViTTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class OwlViTModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -439,14 +415,11 @@ class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "google/owlvit-base-patch32" def setUp(self): self.model_tester = OwlViTModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -569,12 +542,6 @@ def test_load_vision_text_config(self): text_config = OwlViTTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "google/owlvit-base-patch32" - model = OwlViTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class OwlViTForObjectDetectionTester: def __init__(self, parent, is_training=True): @@ -643,14 +610,11 @@ class OwlViTForObjectDetectionTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_attention_outputs = False + pretrained_checkpoint = "google/owlvit-base-patch32" def setUp(self): self.model_tester = OwlViTForObjectDetectionTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -766,12 +730,6 @@ def _create_and_check_torchscript(self, config, inputs_dict): self.assertTrue(models_equal) - @slow - def test_model_from_pretrained(self): - model_name = "google/owlvit-base-patch32" - model = OwlViTForObjectDetection.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/patchtsmixer/test_modeling_patchtsmixer.py b/tests/models/patchtsmixer/test_modeling_patchtsmixer.py index aae75b8586a34a..0027f7ed980739 100644 --- a/tests/models/patchtsmixer/test_modeling_patchtsmixer.py +++ b/tests/models/patchtsmixer/test_modeling_patchtsmixer.py @@ -246,9 +246,6 @@ def setUp(self): common_properties=["hidden_size", "expansion_factor", "num_hidden_layers"], ) - def test_config(self): - self.config_tester.run_common_tests() - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) diff --git a/tests/models/patchtst/test_modeling_patchtst.py b/tests/models/patchtst/test_modeling_patchtst.py index 3d0774f3726800..0744dca5d866ff 100644 --- a/tests/models/patchtst/test_modeling_patchtst.py +++ b/tests/models/patchtst/test_modeling_patchtst.py @@ -181,9 +181,6 @@ def setUp(self): prediction_length=self.model_tester.prediction_length, ) - def test_config(self): - self.config_tester.run_common_tests() - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) diff --git a/tests/models/pegasus/test_modeling_flax_pegasus.py b/tests/models/pegasus/test_modeling_flax_pegasus.py index 62b9077f0d47b3..659cd00ad9694a 100644 --- a/tests/models/pegasus/test_modeling_flax_pegasus.py +++ b/tests/models/pegasus/test_modeling_flax_pegasus.py @@ -240,9 +240,6 @@ def setUp(self): self.model_tester = FlaxPegasusModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/pegasus/test_modeling_pegasus.py b/tests/models/pegasus/test_modeling_pegasus.py index 2bd102b904e376..634aae064387c8 100644 --- a/tests/models/pegasus/test_modeling_pegasus.py +++ b/tests/models/pegasus/test_modeling_pegasus.py @@ -257,9 +257,6 @@ def setUp(self): self.model_tester = PegasusModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -573,9 +570,6 @@ def setUp( self.model_tester = PegasusStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PegasusConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py index c6b4b2c8648694..2f2e49be76ec89 100644 --- a/tests/models/pegasus_x/test_modeling_pegasus_x.py +++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py @@ -229,9 +229,6 @@ def setUp(self): def test_torchscript_output_attentions(self): pass - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -861,9 +858,6 @@ def setUp( self.model_tester = PegasusXStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/perceiver/test_modeling_perceiver.py b/tests/models/perceiver/test_modeling_perceiver.py index b0ec8ac45c35cc..7b8f1fc0b36d12 100644 --- a/tests/models/perceiver/test_modeling_perceiver.py +++ b/tests/models/perceiver/test_modeling_perceiver.py @@ -312,6 +312,7 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas test_torchscript = False maxDiff = None + pretrained_checkpoint = "deepmind/language-perceiver" def setUp(self): self.model_tester = PerceiverModelTester(self) @@ -348,9 +349,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): ) return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(model_class=PerceiverForMaskedLM) self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) @@ -836,12 +834,6 @@ def test_inputs_embeds(self): def test_load_with_mismatched_shapes(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "deepmind/language-perceiver" - model = PerceiverModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/persimmon/test_modeling_persimmon.py b/tests/models/persimmon/test_modeling_persimmon.py index 0d267fb86910d6..b797a58985cc83 100644 --- a/tests/models/persimmon/test_modeling_persimmon.py +++ b/tests/models/persimmon/test_modeling_persimmon.py @@ -309,15 +309,6 @@ def setUp(self): self.model_tester = PersimmonModelTester(self) self.config_tester = ConfigTester(self, config_class=PersimmonConfig, hidden_size=37) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/phi/test_modeling_phi.py b/tests/models/phi/test_modeling_phi.py index 95b0b01c0a23d9..731916836f11d8 100644 --- a/tests/models/phi/test_modeling_phi.py +++ b/tests/models/phi/test_modeling_phi.py @@ -313,15 +313,6 @@ def setUp(self): self.model_tester = PhiModelTester(self) self.config_tester = ConfigTester(self, config_class=PhiConfig, hidden_size=37) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi,llama->phi def test_phi_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/phi3/test_modeling_phi3.py b/tests/models/phi3/test_modeling_phi3.py index ce0a71878877b5..83398882605378 100644 --- a/tests/models/phi3/test_modeling_phi3.py +++ b/tests/models/phi3/test_modeling_phi3.py @@ -360,15 +360,6 @@ def setUp(self): self.model_tester = Phi3ModelTester(self) self.config_tester = ConfigTester(self, config_class=Phi3Config, hidden_size=37) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_llama_sequence_classification_model with Llama->Phi3,llama->phi3 def test_phi3_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/pix2struct/test_modeling_pix2struct.py b/tests/models/pix2struct/test_modeling_pix2struct.py index 2d762008cbbc3d..818c34d3cc77b9 100644 --- a/tests/models/pix2struct/test_modeling_pix2struct.py +++ b/tests/models/pix2struct/test_modeling_pix2struct.py @@ -148,6 +148,7 @@ class Pix2StructVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/pix2struct-textcaps-base" def setUp(self): self.model_tester = Pix2StructVisionModelTester(self) @@ -155,9 +156,6 @@ def setUp(self): self, config_class=Pix2StructVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Pix2StructVision does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -183,10 +181,6 @@ def test_forward_signature(self): expected_arg_names = ["flattened_patches"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training(self): pass @@ -219,12 +213,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/pix2struct-textcaps-base" - model = Pix2StructVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class Pix2StructTextModelTester: def __init__( @@ -324,18 +312,12 @@ class Pix2StructTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "google/pix2struct-textcaps-base" def setUp(self): self.model_tester = Pix2StructTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Pix2StructTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training(self): pass @@ -368,12 +350,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/pix2struct-textcaps-base" - model = Pix2StructTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class Pix2StructModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): @@ -429,6 +405,13 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa def setUp(self): self.model_tester = Pix2StructModelTester(self) + self.config_tester = ConfigTester( + self, + config_class=Pix2StructConfig, + common_properties=["is_vqa", "pad_token_id", "eos_token_id", "decoder_start_token_id"], + # common_properties=["decoder_start_token_id", "is_vqa"], + has_text_modality=False, + ) def test_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/plbart/test_modeling_plbart.py b/tests/models/plbart/test_modeling_plbart.py index 7a0eebd7bd0204..87a41c77da37c1 100644 --- a/tests/models/plbart/test_modeling_plbart.py +++ b/tests/models/plbart/test_modeling_plbart.py @@ -258,9 +258,6 @@ def setUp(self): self.model_tester = PLBartModelTester(self) self.config_tester = ConfigTester(self, config_class=PLBartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -659,9 +656,6 @@ def setUp(self): self.model_tester = PLBartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PLBartConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/poolformer/test_modeling_poolformer.py b/tests/models/poolformer/test_modeling_poolformer.py index d9a522cde6f435..2592ee5955c736 100644 --- a/tests/models/poolformer/test_modeling_poolformer.py +++ b/tests/models/poolformer/test_modeling_poolformer.py @@ -132,18 +132,12 @@ class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa test_resize_embeddings = False test_torchscript = False has_attentions = False + pretrained_checkpoint = "sail/poolformer_s12" def setUp(self): self.model_tester = PoolFormerModelTester(self) self.config_tester = PoolFormerConfigTester(self, config_class=PoolFormerConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="PoolFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -205,12 +199,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "sail/poolformer_s12" - model = PoolFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/pop2piano/test_modeling_pop2piano.py b/tests/models/pop2piano/test_modeling_pop2piano.py index 3a33b5a98128e2..8cc8c87c9e7f54 100644 --- a/tests/models/pop2piano/test_modeling_pop2piano.py +++ b/tests/models/pop2piano/test_modeling_pop2piano.py @@ -517,22 +517,16 @@ class Pop2PianoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True + pretrained_checkpoint = "sweetcocoa/pop2piano" def setUp(self): self.model_tester = Pop2PianoModelTester(self) self.config_tester = ConfigTester(self, config_class=Pop2PianoConfig, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work @@ -604,12 +598,6 @@ def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_pop2piano_v1_1(config) - @slow - def test_model_from_pretrained(self): - model_name = "sweetcocoa/pop2piano" - model = Pop2PianoForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_onnx def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/prophetnet/test_modeling_prophetnet.py b/tests/models/prophetnet/test_modeling_prophetnet.py index 99329437239bb5..0fa10d9559a87c 100644 --- a/tests/models/prophetnet/test_modeling_prophetnet.py +++ b/tests/models/prophetnet/test_modeling_prophetnet.py @@ -920,13 +920,6 @@ def setUp(self): self.model_tester = ProphetNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_lm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) @@ -1130,9 +1123,6 @@ def setUp(self): self.model_tester = ProphetNetStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) @@ -1158,9 +1148,6 @@ def setUp(self): self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) - def test_config(self): - self.config_tester.run_common_tests() - @require_torch class ProphetNetModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/pvt/test_modeling_pvt.py b/tests/models/pvt/test_modeling_pvt.py index e5f5fd0c143214..10ac5305b64710 100644 --- a/tests/models/pvt/test_modeling_pvt.py +++ b/tests/models/pvt/test_modeling_pvt.py @@ -166,18 +166,12 @@ class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_torchscript = False has_attentions = False + pretrained_checkpoint = "Zetatech/pvt-tiny-224" def setUp(self): self.model_tester = PvtModelTester(self) self.config_tester = PvtConfigTester(self, config_class=PvtConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Pvt does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -250,12 +244,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "Zetatech/pvt-tiny-224" - model = PvtModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class PvtModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/pvt_v2/test_modeling_pvt_v2.py b/tests/models/pvt_v2/test_modeling_pvt_v2.py index 334e890e7a8905..ee19c800237157 100644 --- a/tests/models/pvt_v2/test_modeling_pvt_v2.py +++ b/tests/models/pvt_v2/test_modeling_pvt_v2.py @@ -202,18 +202,12 @@ class PvtV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_torchscript = False has_attentions = False + pretrained_checkpoint = "OpenGVLab/pvt_v2_b0" def setUp(self): self.model_tester = PvtV2ModelTester(self) self.config_tester = PvtV2ConfigTester(self, config_class=PvtV2Config) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Pvt-V2 does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -309,12 +303,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - @slow - def test_model_from_pretrained(self): - model_name = "OpenGVLab/pvt_v2_b0" - model = PvtV2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class PvtV2ModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/qwen2/test_modeling_qwen2.py b/tests/models/qwen2/test_modeling_qwen2.py index 4d6c432f20424d..4325913e7f30b2 100644 --- a/tests/models/qwen2/test_modeling_qwen2.py +++ b/tests/models/qwen2/test_modeling_qwen2.py @@ -338,13 +338,6 @@ def setUp(self): self.model_tester = Qwen2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/qwen2_moe/test_modeling_qwen2_moe.py b/tests/models/qwen2_moe/test_modeling_qwen2_moe.py index 0425172a6fba4d..e8146d28d9107b 100644 --- a/tests/models/qwen2_moe/test_modeling_qwen2_moe.py +++ b/tests/models/qwen2_moe/test_modeling_qwen2_moe.py @@ -363,13 +363,6 @@ def setUp(self): self.model_tester = Qwen2MoeModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2MoeConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py b/tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py index 23dace68cf21a9..bec194dba06edc 100644 --- a/tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py +++ b/tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py @@ -315,13 +315,6 @@ def setUp(self): self.model_tester = RecurrentGemmaModelTester(self) self.config_tester = ConfigTester(self, config_class=RecurrentGemmaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/reformer/test_modeling_reformer.py b/tests/models/reformer/test_modeling_reformer.py index 11c2e821975d02..28a69072a0f0e1 100644 --- a/tests/models/reformer/test_modeling_reformer.py +++ b/tests/models/reformer/test_modeling_reformer.py @@ -503,9 +503,6 @@ class ReformerTesterMixin: Reformer Local and Reformer LSH run essentially the same tests """ - def test_config(self): - self.config_tester.run_common_tests() - def test_reformer_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reformer_model(*config_and_inputs) @@ -606,17 +603,12 @@ class ReformerLocalAttnModelTest(ReformerTesterMixin, GenerationTesterMixin, Mod test_headmasking = False test_torchscript = False test_sequence_classification_problem_types = True + pretrained_checkpoint = "google/reformer-crime-and-punishment" def setUp(self): self.model_tester = ReformerModelTester(self) self.config_tester = ConfigTester(self, config_class=ReformerConfig, hidden_size=37) - @slow - def test_model_from_pretrained(self): - model_name = "google/reformer-crime-and-punishment" - model = ReformerModelWithLMHead.from_pretrained(model_name) - self.assertIsNotNone(model) - def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): diff --git a/tests/models/regnet/test_modeling_flax_regnet.py b/tests/models/regnet/test_modeling_flax_regnet.py index 911d595c56e6a0..854c3e63240b2d 100644 --- a/tests/models/regnet/test_modeling_flax_regnet.py +++ b/tests/models/regnet/test_modeling_flax_regnet.py @@ -131,10 +131,6 @@ def test_config(self): def create_and_test_config_common_properties(self): return - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) diff --git a/tests/models/regnet/test_modeling_regnet.py b/tests/models/regnet/test_modeling_regnet.py index 8613eb7f3df48e..70314da492d190 100644 --- a/tests/models/regnet/test_modeling_regnet.py +++ b/tests/models/regnet/test_modeling_regnet.py @@ -133,6 +133,7 @@ class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "facebook/regnet-y-040" def setUp(self): self.model_tester = RegNetModelTester(self) @@ -143,9 +144,6 @@ def setUp(self): common_properties=["num_channels", "hidden_sizes"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="RegNet does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -154,10 +152,6 @@ def test_inputs_embeds(self): def test_model_get_set_embeddings(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -212,12 +206,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/regnet-y-040" - model = RegNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/rembert/test_modeling_rembert.py b/tests/models/rembert/test_modeling_rembert.py index 664888fcc0f3aa..ed3dbfe132c7ea 100644 --- a/tests/models/rembert/test_modeling_rembert.py +++ b/tests/models/rembert/test_modeling_rembert.py @@ -387,18 +387,12 @@ class RemBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) if is_torch_available() else {} ) + pretrained_checkpoint = "google/rembert" def setUp(self): self.model_tester = RemBertModelTester(self) self.config_tester = ConfigTester(self, config_class=RemBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -461,12 +455,6 @@ def test_model_as_decoder_with_default_input_mask(self): encoder_attention_mask, ) - @slow - def test_model_from_pretrained(self): - model_name = "google/rembert" - model = RemBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class RemBertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/resnet/test_modeling_flax_resnet.py b/tests/models/resnet/test_modeling_flax_resnet.py index e9566e2e2fd5fb..0a6d1e77f3392b 100644 --- a/tests/models/resnet/test_modeling_flax_resnet.py +++ b/tests/models/resnet/test_modeling_flax_resnet.py @@ -130,10 +130,6 @@ def test_config(self): def create_and_test_config_common_properties(self): return - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py index a89e85bf320cdc..684e1a0bc4d393 100644 --- a/tests/models/resnet/test_modeling_resnet.py +++ b/tests/models/resnet/test_modeling_resnet.py @@ -178,6 +178,7 @@ class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "microsoft/resnet-50" def setUp(self): self.model_tester = ResNetModelTester(self) @@ -188,9 +189,6 @@ def setUp(self): common_properties=["num_channels", "hidden_sizes"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -199,10 +197,6 @@ def test_inputs_embeds(self): def test_model_get_set_embeddings(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -265,12 +259,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/resnet-50" - model = ResNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/roberta/test_modeling_roberta.py b/tests/models/roberta/test_modeling_roberta.py index ca557937803cff..d04e4b10dcf00f 100644 --- a/tests/models/roberta/test_modeling_roberta.py +++ b/tests/models/roberta/test_modeling_roberta.py @@ -395,18 +395,12 @@ class RobertaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi ) fx_compatible = True model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "FacebookAI/roberta-base" def setUp(self): self.model_tester = RobertaModelTester(self) self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -474,12 +468,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "FacebookAI/roberta-base" - model = RobertaModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 diff --git a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py index e64aaddbeb4653..811ddf5d6cec88 100644 --- a/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py +++ b/tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py @@ -394,21 +394,13 @@ class RobertaPreLayerNormModelTest(ModelTesterMixin, GenerationTesterMixin, Pipe ) fx_compatible = False model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "andreasmadsen/efficient_mlm_m0.15" # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.setUp with Roberta->RobertaPreLayerNorm def setUp(self): self.model_tester = RobertaPreLayerNormModelTester(self) self.config_tester = ConfigTester(self, config_class=RobertaPreLayerNormConfig, hidden_size=37) - # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_model_various_embeddings def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() @@ -480,12 +472,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "andreasmadsen/efficient_mlm_m0.15" - model = RobertaPreLayerNormModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # Copied from tests.models.roberta.test_modeling_roberta.RobertaModelTest.test_create_position_ids_respects_padding_index with Roberta->RobertaPreLayerNorm def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 diff --git a/tests/models/roc_bert/test_modeling_roc_bert.py b/tests/models/roc_bert/test_modeling_roc_bert.py index 55dc8453f49e15..6fcc271a884900 100644 --- a/tests/models/roc_bert/test_modeling_roc_bert.py +++ b/tests/models/roc_bert/test_modeling_roc_bert.py @@ -584,6 +584,7 @@ class RoCBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) if is_torch_available() else {} ) + pretrained_checkpoint = "google/siglip-base-patch16-224" # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( @@ -633,13 +634,6 @@ def setUp(self): self.model_tester = RoCBertModelTester(self) self.config_tester = ConfigTester(self, config_class=RoCBertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -715,12 +709,6 @@ def test_model_as_decoder_with_default_input_mask(self): encoder_attention_mask, ) - @slow - def test_model_from_pretrained(self): - model_name = "weiweishi/roc-bert-base-zh" - model = RoCBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class RoCBertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py index 1c22243b370723..2548b9c27a24c0 100644 --- a/tests/models/roformer/test_modeling_roformer.py +++ b/tests/models/roformer/test_modeling_roformer.py @@ -406,18 +406,12 @@ class RoFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase if is_torch_available() else {} ) + pretrained_checkpoint = "junnyu/roformer_chinese_small" def setUp(self): self.model_tester = RoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) @@ -478,12 +472,6 @@ def test_model_as_decoder_with_default_input_mask(self): encoder_attention_mask, ) - @slow - def test_model_from_pretrained(self): - model_name = "junnyu/roformer_chinese_small" - model = RoFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) diff --git a/tests/models/rt_detr/test_modeling_rt_detr.py b/tests/models/rt_detr/test_modeling_rt_detr.py index 65a417fe56f618..c94a14d864d055 100644 --- a/tests/models/rt_detr/test_modeling_rt_detr.py +++ b/tests/models/rt_detr/test_modeling_rt_detr.py @@ -285,9 +285,6 @@ def setUp(self): common_properties=["hidden_size", "num_attention_heads"], ) - def test_config(self): - self.config_tester.run_common_tests() - def test_rt_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rt_detr_model(*config_and_inputs) diff --git a/tests/models/rwkv/test_modeling_rwkv.py b/tests/models/rwkv/test_modeling_rwkv.py index 5e82956e3efa6c..a3e843f5f7eb58 100644 --- a/tests/models/rwkv/test_modeling_rwkv.py +++ b/tests/models/rwkv/test_modeling_rwkv.py @@ -246,6 +246,7 @@ class RwkvModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin test_model_parallel = False test_pruning = False test_head_masking = False # Rwkv does not support head masking + pretrained_checkpoint = "RWKV/rwkv-4-169m-pile" def setUp(self): self.model_tester = RwkvModelTester(self) @@ -275,9 +276,6 @@ def assertInterval(self, member, container, msg=None): standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) - def test_config(self): - self.config_tester.run_common_tests() - def test_rwkv_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rwkv_model(*config_and_inputs) @@ -387,12 +385,6 @@ def test_attention_outputs(self): [batch_size, seq_len, config.hidden_size], ) - @slow - def test_model_from_pretrained(self): - model_name = "RWKV/rwkv-4-169m-pile" - model = RwkvModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_beam_sample_generate_dict_output(self): # This model has a custom attention output shape AND config flags, let's skip those checks old_has_attentions = self.has_attentions diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index 65f653e287fd9a..006435828511e0 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -296,6 +296,7 @@ class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False test_torchscript = False + pretrained_checkpoint = "facebook/sam-vit-huge" # TODO: Fix me @Arthur: `run_batch_test` in `tests/test_pipeline_mixin.py` not working def is_pipeline_test_to_skip( @@ -335,10 +336,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_get_image_features(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_get_image_features(*config_and_inputs) @@ -438,12 +435,6 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, nam # Use a slightly higher default tol to make the tests non-flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/sam-vit-huge" - model = SamModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def prepare_image(): img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" diff --git a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py index 79f705785541b6..840e5047eae9b5 100644 --- a/tests/models/seamless_m4t/test_modeling_seamless_m4t.py +++ b/tests/models/seamless_m4t/test_modeling_seamless_m4t.py @@ -359,26 +359,13 @@ class SeamlessM4TModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase): else () ) all_generative_model_classes = (SeamlessM4TForSpeechToText,) if is_torch_available() else () - + pretrained_checkpoint = "facebook/hf-seamless-m4t-medium" input_name = "input_features" def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "facebook/hf-seamless-m4t-medium" - model = SeamlessM4TModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] @@ -637,24 +624,12 @@ class SeamlessM4TModelWithTextInputTest( if is_torch_available() else {} ) + pretrained_checkpoint = "facebook/hf-seamless-m4t-medium" def setUp(self): self.model_tester = SeamlessM4TModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4TConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "facebook/hf-seamless-m4t-medium" - model = SeamlessM4TModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py b/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py index 1d11cbb247caca..61ff9400d58b3b 100644 --- a/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py +++ b/tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py @@ -377,24 +377,12 @@ class SeamlessM4Tv2ModelWithSpeechInputTest(ModelTesterMixin, unittest.TestCase) all_generative_model_classes = (SeamlessM4Tv2ForSpeechToText,) if is_torch_available() else () input_name = "input_features" + pretrained_checkpoint = "facebook/seamless-m4t-v2-large" def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="speech") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "facebook/seamless-m4t-v2-large" - model = SeamlessM4Tv2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - def _get_input_ids_and_config(self, batch_size=2): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] @@ -633,24 +621,12 @@ class SeamlessM4Tv2ModelWithTextInputTest(ModelTesterMixin, GenerationTesterMixi else () ) all_generative_model_classes = (SeamlessM4Tv2ForTextToText,) if is_torch_available() else () + pretrained_checkpoint = "facebook/seamless-m4t-v2-large" def setUp(self): self.model_tester = SeamlessM4Tv2ModelTester(self, input_modality="text") self.config_tester = ConfigTester(self, config_class=SeamlessM4Tv2Config) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "facebook/seamless-m4t-v2-large" - model = SeamlessM4Tv2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py index 9b5e04a5d02b19..a8a0191ac4a9ef 100644 --- a/tests/models/segformer/test_modeling_segformer.py +++ b/tests/models/segformer/test_modeling_segformer.py @@ -180,18 +180,12 @@ class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas test_head_masking = False test_pruning = False test_resize_embeddings = False + pretrained_checkpoint = "nvidia/segformer-b0-finetuned-ade-512-512" def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) @@ -331,12 +325,6 @@ def test_training(self): loss = model(**inputs).loss loss.backward() - @slow - def test_model_from_pretrained(self): - model_name = "nvidia/segformer-b0-finetuned-ade-512-512" - model = SegformerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/seggpt/test_modeling_seggpt.py b/tests/models/seggpt/test_modeling_seggpt.py index 50d141aaeff0ec..c84e21159aa9eb 100644 --- a/tests/models/seggpt/test_modeling_seggpt.py +++ b/tests/models/seggpt/test_modeling_seggpt.py @@ -175,14 +175,12 @@ class SegGptModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_model_mapping = ( {"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {} ) + pretrained_checkpoint = "BAAI/seggpt-vit-large" def setUp(self): self.model_tester = SegGptModelTester(self) self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="SegGpt does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -206,10 +204,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"] self.assertListEqual(arg_names[:3], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) @@ -315,12 +309,6 @@ def test_seggpt_loss(self): self.assertTrue(torch.allclose(loss_value, expected_loss_value, atol=1e-4)) - @slow - def test_model_from_pretrained(self): - model_name = "BAAI/seggpt-vit-large" - model = SegGptModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def prepare_img(): ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] diff --git a/tests/models/sew/test_modeling_sew.py b/tests/models/sew/test_modeling_sew.py index 852f87c8f58a57..2aa9c29412c574 100644 --- a/tests/models/sew/test_modeling_sew.py +++ b/tests/models/sew/test_modeling_sew.py @@ -312,18 +312,12 @@ class SEWModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "asapp/sew-tiny-100k" def setUp(self): self.model_tester = SEWModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @@ -455,11 +449,6 @@ def _mock_init_weights(self, module): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = SEWModel.from_pretrained("asapp/sew-tiny-100k") - self.assertIsNotNone(model) - @require_torch class SEWUtilsTest(unittest.TestCase): diff --git a/tests/models/sew_d/test_modeling_sew_d.py b/tests/models/sew_d/test_modeling_sew_d.py index 34374eb1e0e63b..2b1fc94868d389 100644 --- a/tests/models/sew_d/test_modeling_sew_d.py +++ b/tests/models/sew_d/test_modeling_sew_d.py @@ -334,18 +334,12 @@ class SEWDModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_headmasking = False test_torchscript = False + pretrained_checkpoint = "asapp/sew-d-tiny-100k" def setUp(self): self.model_tester = SEWDModelTester(self) self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @@ -469,11 +463,6 @@ def test_save_load_low_cpu_mem_usage_checkpoints(self): def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass - @slow - def test_model_from_pretrained(self): - model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k") - self.assertIsNotNone(model) - @require_torch class SEWDUtilsTest(unittest.TestCase): diff --git a/tests/models/siglip/test_modeling_siglip.py b/tests/models/siglip/test_modeling_siglip.py index 9d1e3109b313c3..57424d19c7125d 100644 --- a/tests/models/siglip/test_modeling_siglip.py +++ b/tests/models/siglip/test_modeling_siglip.py @@ -316,6 +316,7 @@ class SiglipVisionModelTest(SiglipModelTesterMixin, unittest.TestCase): test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False + pretrained_checkpoint = "google/siglip-base-patch16-224" def setUp(self): self.model_tester = SiglipVisionModelTester(self) @@ -323,9 +324,6 @@ def setUp(self): self, config_class=SiglipVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="SIGLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -351,10 +349,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @@ -383,12 +377,6 @@ def test_save_load_fast_init_to_base(self): def test_initialization(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/siglip-base-patch16-224" - model = SiglipVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @@ -495,21 +483,13 @@ class SiglipTextModelTest(SiglipModelTesterMixin, unittest.TestCase): test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "google/siglip-base-patch16-224" # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipTextConfig, hidden_size=37) - # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_config - def test_config(self): - self.config_tester.run_common_tests() - - # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training(self): pass @@ -545,12 +525,6 @@ def test_save_load_fast_init_to_base(self): def test_initialization(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/siglip-base-patch16-224" - model = SiglipTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @@ -629,16 +603,12 @@ class SiglipModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.Test test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False + pretrained_checkpoint = "google/siglip-base-patch16-224" # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipModelTester(self) - # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output def test_hidden_states_output(self): @@ -751,12 +721,6 @@ def test_load_vision_text_config(self): text_config = SiglipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "google/siglip-base-patch16-224" - model = SiglipModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_flash_attn @require_torch_gpu @mark.flash_attn_test diff --git a/tests/models/speech_to_text/test_modeling_speech_to_text.py b/tests/models/speech_to_text/test_modeling_speech_to_text.py index cef2a6781775a9..0bb36a2487cfcf 100644 --- a/tests/models/speech_to_text/test_modeling_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_speech_to_text.py @@ -301,9 +301,6 @@ def setUp(self): self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/speecht5/test_modeling_speecht5.py b/tests/models/speecht5/test_modeling_speecht5.py index e13cf8dd56c3ef..9300bdcb651050 100644 --- a/tests/models/speecht5/test_modeling_speecht5.py +++ b/tests/models/speecht5/test_modeling_speecht5.py @@ -153,7 +153,7 @@ def get_config(self): decoder_ffn_dim=self.intermediate_size, ) - def create_and_check_model_forward(self, config, inputs_dict): + def create_and_check_model(self, config, inputs_dict): model = SpeechT5Model(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] @@ -183,13 +183,6 @@ def setUp(self): self.model_tester = SpeechT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model_forward(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -323,7 +316,7 @@ def get_config(self): vocab_size=self.vocab_size, ) - def create_and_check_model_forward(self, config, inputs_dict): + def create_and_check_model(self, config, inputs_dict): model = SpeechT5ForSpeechToText(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] @@ -381,9 +374,6 @@ def setUp(self): self.model_tester = SpeechT5ForSpeechToTextTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -394,10 +384,6 @@ def test_save_load_strict(self): model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) - def test_model_forward(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) @@ -873,7 +859,7 @@ def get_config(self): speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) - def create_and_check_model_forward(self, config, inputs_dict): + def create_and_check_model(self, config, inputs_dict): model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] @@ -901,9 +887,6 @@ def setUp(self): self.model_tester = SpeechT5ForTextToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -914,10 +897,6 @@ def test_save_load_strict(self): model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) - def test_model_forward(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_model_forward_with_labels(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = SpeechT5ForTextToSpeech(config=config).to(torch_device).eval() @@ -1418,7 +1397,7 @@ def get_config(self): speech_decoder_prenet_units=self.speech_decoder_prenet_units, ) - def create_and_check_model_forward(self, config, inputs_dict): + def create_and_check_model(self, config, inputs_dict): model = SpeechT5ForSpeechToSpeech(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] @@ -1447,9 +1426,6 @@ def setUp(self): self.model_tester = SpeechT5ForSpeechToSpeechTester(self) self.config_tester = ConfigTester(self, config_class=SpeechT5Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -1460,10 +1436,6 @@ def test_save_load_strict(self): model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) - def test_model_forward(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_model_forward_with_labels(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = SpeechT5ForSpeechToSpeech(config=config).to(torch_device).eval() @@ -1869,10 +1841,6 @@ def test_config(self): self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/splinter/test_modeling_splinter.py b/tests/models/splinter/test_modeling_splinter.py index b62571f189f25c..c1c366edc9a9f4 100644 --- a/tests/models/splinter/test_modeling_splinter.py +++ b/tests/models/splinter/test_modeling_splinter.py @@ -222,6 +222,7 @@ class SplinterModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase if is_torch_available() else {} ) + pretrained_checkpoint = "tau/splinter-base" # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( @@ -270,13 +271,6 @@ def setUp(self): self.model_tester = SplinterModelTester(self) self.config_tester = ConfigTester(self, config_class=SplinterConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -325,12 +319,6 @@ def test_inputs_embeds(self): else: model(**inputs)[0] - @slow - def test_model_from_pretrained(self): - model_name = "tau/splinter-base" - model = SplinterModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # overwrite from common since `SplinterForPreTraining` could contain different number of question tokens in inputs. # When the batch is distributed to multiple devices, each replica could get different values for the maximal number # of question tokens (see `SplinterForPreTraining._prepare_question_positions()`), and the model returns different diff --git a/tests/models/squeezebert/test_modeling_squeezebert.py b/tests/models/squeezebert/test_modeling_squeezebert.py index e5323fe3e4bc5f..b0925562fee924 100644 --- a/tests/models/squeezebert/test_modeling_squeezebert.py +++ b/tests/models/squeezebert/test_modeling_squeezebert.py @@ -242,14 +242,12 @@ class SqueezeBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_pruning = False test_resize_embeddings = True test_head_masking = False + pretrained_checkpoint = "squeezebert/squeezebert-uncased" def setUp(self): self.model_tester = SqueezeBertModelTester(self) self.config_tester = ConfigTester(self, config_class=SqueezeBertConfig, dim=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_squeezebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*config_and_inputs) @@ -274,12 +272,6 @@ def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "squeezebert/squeezebert-uncased" - model = SqueezeBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_sentencepiece @require_tokenizers diff --git a/tests/models/stablelm/test_modeling_stablelm.py b/tests/models/stablelm/test_modeling_stablelm.py index 36cad89bcfdf06..23a0cf6d89d417 100644 --- a/tests/models/stablelm/test_modeling_stablelm.py +++ b/tests/models/stablelm/test_modeling_stablelm.py @@ -313,13 +313,6 @@ def setUp(self): self.model_tester = StableLmModelTester(self) self.config_tester = ConfigTester(self, config_class=StableLmConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_stablelm_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 diff --git a/tests/models/starcoder2/test_modeling_starcoder2.py b/tests/models/starcoder2/test_modeling_starcoder2.py index c1c7d45d4f18d7..6e46a233ada26f 100644 --- a/tests/models/starcoder2/test_modeling_starcoder2.py +++ b/tests/models/starcoder2/test_modeling_starcoder2.py @@ -319,13 +319,6 @@ def setUp(self): self.model_tester = Starcoder2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Starcoder2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/superpoint/test_modeling_superpoint.py b/tests/models/superpoint/test_modeling_superpoint.py index 25c384a7955793..34d2ec009ec89c 100644 --- a/tests/models/superpoint/test_modeling_superpoint.py +++ b/tests/models/superpoint/test_modeling_superpoint.py @@ -120,7 +120,7 @@ class SuperPointModelTest(ModelTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False has_attentions = False - from_pretrained_id = "magic-leap-community/superpoint" + pretrained_checkpoint = "magic-leap-community/superpoint" def setUp(self): self.model_tester = SuperPointModelTester(self) @@ -132,9 +132,6 @@ def setUp(self): common_properties=["encoder_hidden_sizes", "decoder_hidden_size"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="SuperPointForKeypointDetection does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -217,11 +214,6 @@ def check_hidden_states_output(inputs_dict, config, model_class): check_hidden_states_output(inputs_dict, config, model_class) - @slow - def test_model_from_pretrained(self): - model = SuperPointForKeypointDetection.from_pretrained(self.from_pretrained_id) - self.assertIsNotNone(model) - def test_forward_labels_should_be_none(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py index 3b8b3eb5ed65f4..0147b97aeafcf9 100644 --- a/tests/models/swiftformer/test_modeling_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_swiftformer.py @@ -147,6 +147,7 @@ class SwiftFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_resize_embeddings = False test_head_masking = False has_attentions = False + pretrained_checkpoint = "MBZUAI/swiftformer-xs" def setUp(self): self.model_tester = SwiftFormerModelTester(self) @@ -159,9 +160,6 @@ def setUp(self): num_hidden_layers=12, ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="SwiftFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -174,20 +172,10 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "MBZUAI/swiftformer-xs" - model = SwiftFormerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="SwiftFormer does not output attentions") def test_attention_outputs(self): pass diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py index 963de232d68eee..da7d7beeb8edb5 100644 --- a/tests/models/swin/test_modeling_swin.py +++ b/tests/models/swin/test_modeling_swin.py @@ -240,6 +240,7 @@ class SwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "microsoft/swin-tiny-patch4-window7-224" def setUp(self): self.model_tester = SwinModelTester(self) @@ -251,13 +252,6 @@ def setUp(self): common_properties=["image_size", "patch_size", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): @@ -440,12 +434,6 @@ def test_hidden_states_output_with_padding(self): config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/swin-tiny-patch4-window7-224" - model = SwinModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/swin2sr/test_modeling_swin2sr.py b/tests/models/swin2sr/test_modeling_swin2sr.py index a1358c9de0bfec..c81ad2a27a3a80 100644 --- a/tests/models/swin2sr/test_modeling_swin2sr.py +++ b/tests/models/swin2sr/test_modeling_swin2sr.py @@ -172,6 +172,7 @@ class Swin2SRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_resize_embeddings = False test_head_masking = False test_torchscript = False + pretrained_checkpoint = "caidas/swin2SR-classical-sr-x2-64" def setUp(self): self.model_tester = Swin2SRModelTester(self) @@ -183,13 +184,6 @@ def setUp(self): common_properties=["image_size", "patch_size", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_for_image_super_resolution(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_super_resolution(*config_and_inputs) @@ -232,12 +226,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - @slow - def test_model_from_pretrained(self): - model_name = "caidas/swin2SR-classical-sr-x2-64" - model = Swin2SRModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # overwriting because of `logit_scale` parameter def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/swinv2/test_modeling_swinv2.py b/tests/models/swinv2/test_modeling_swinv2.py index 5ef9a4b92e1388..8086187a0965cd 100644 --- a/tests/models/swinv2/test_modeling_swinv2.py +++ b/tests/models/swinv2/test_modeling_swinv2.py @@ -226,6 +226,7 @@ class Swinv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "microsoft/swinv2-tiny-patch4-window8-256" def setUp(self): self.model_tester = Swinv2ModelTester(self) @@ -237,13 +238,6 @@ def setUp(self): common_properties=["image_size", "patch_size", "num_channels"], ) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @@ -431,12 +425,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/swinv2-tiny-patch4-window8-256" - model = Swinv2Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Swinv2 does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass diff --git a/tests/models/switch_transformers/test_modeling_switch_transformers.py b/tests/models/switch_transformers/test_modeling_switch_transformers.py index 13215b2826fe0c..372e6ef1532e7a 100644 --- a/tests/models/switch_transformers/test_modeling_switch_transformers.py +++ b/tests/models/switch_transformers/test_modeling_switch_transformers.py @@ -575,22 +575,16 @@ class SwitchTransformersModelTest(ModelTesterMixin, GenerationTesterMixin, Pipel test_torchscript = False # The small SWITCH_TRANSFORMERS model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "google/switch-base-8" def setUp(self): self.model_tester = SwitchTransformersModelTester(self) self.config_tester = ConfigTester(self, config_class=SwitchTransformersConfig, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work @@ -666,12 +660,6 @@ def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_switch_transformers_v1_1(config) - @slow - def test_model_from_pretrained(self): - model_name = "google/switch-base-8" - model = SwitchTransformersModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() @@ -837,13 +825,6 @@ def setUp(self): self.model_tester = SwitchTransformersEncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=SwitchTransformersConfig, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/t5/test_modeling_flax_t5.py b/tests/models/t5/test_modeling_flax_t5.py index 204b84989be0f5..e9f7cc06e01353 100644 --- a/tests/models/t5/test_modeling_flax_t5.py +++ b/tests/models/t5/test_modeling_flax_t5.py @@ -236,13 +236,6 @@ def setUp(self): self.model_tester = FlaxT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work @@ -581,13 +574,6 @@ def setUp(self): self.model_tester = FlaxT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work diff --git a/tests/models/t5/test_modeling_t5.py b/tests/models/t5/test_modeling_t5.py index 93634ef2a67099..5d09561a45468d 100644 --- a/tests/models/t5/test_modeling_t5.py +++ b/tests/models/t5/test_modeling_t5.py @@ -577,6 +577,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, is_encoder_decoder = True # The small T5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "google-t5/t5-small" def setUp(self): self.model_tester = T5ModelTester(self) @@ -714,17 +715,10 @@ def flatten_output(output): # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() - def test_config(self): - self.config_tester.run_common_tests() - def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work @@ -834,12 +828,6 @@ def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) - @slow - def test_model_from_pretrained(self): - model_name = "google-t5/t5-small" - model = T5Model.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() @@ -1040,13 +1028,6 @@ def setUp(self): self.model_tester = T5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/table_transformer/test_modeling_table_transformer.py b/tests/models/table_transformer/test_modeling_table_transformer.py index 99d80b39e92b67..e3b11ac60240e2 100644 --- a/tests/models/table_transformer/test_modeling_table_transformer.py +++ b/tests/models/table_transformer/test_modeling_table_transformer.py @@ -241,9 +241,6 @@ def setUp(self): self.model_tester = TableTransformerModelTester(self) self.config_tester = ConfigTester(self, config_class=TableTransformerConfig, has_text_modality=False) - def test_config(self): - self.config_tester.run_common_tests() - def test_table_transformer_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_table_transformer_model(*config_and_inputs) diff --git a/tests/models/tapas/test_modeling_tapas.py b/tests/models/tapas/test_modeling_tapas.py index d4ca5e82e4c2e3..635408c39ad4af 100644 --- a/tests/models/tapas/test_modeling_tapas.py +++ b/tests/models/tapas/test_modeling_tapas.py @@ -500,13 +500,6 @@ def setUp(self): self.model_tester = TapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, dim=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) diff --git a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py index 5cd76b91612ed8..64f0b758658e7b 100644 --- a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py +++ b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py @@ -107,6 +107,14 @@ def get_config(self): scaling="std", # we need std to get non-zero `loc` ) + def create_and_check_model(self, config, input_values): + model = TimeSeriesTransformerModel(config=config) + model.to(torch_device) + model.eval() + result = model(**input_values) + future_dimension = input_values["future_values"].shape[-1] + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, future_dimension, self.hidden_size)) + def prepare_time_series_transformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) @@ -197,9 +205,6 @@ def setUp(self): prediction_length=self.model_tester.prediction_length, ) - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: diff --git a/tests/models/timesformer/test_modeling_timesformer.py b/tests/models/timesformer/test_modeling_timesformer.py index 3eaed42efb4ee0..558bcd5f6b8c52 100644 --- a/tests/models/timesformer/test_modeling_timesformer.py +++ b/tests/models/timesformer/test_modeling_timesformer.py @@ -167,6 +167,7 @@ class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/timesformer-base-finetuned-k400" def setUp(self): self.model_tester = TimesformerModelTester(self) @@ -185,9 +186,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="TimeSformer does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -201,20 +199,10 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/timesformer-base-finetuned-k400" - model = TimesformerModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model has no attentions") diff --git a/tests/models/timm_backbone/test_modeling_timm_backbone.py b/tests/models/timm_backbone/test_modeling_timm_backbone.py index 43c511e1efbf37..3d349aa0cab161 100644 --- a/tests/models/timm_backbone/test_modeling_timm_backbone.py +++ b/tests/models/timm_backbone/test_modeling_timm_backbone.py @@ -112,9 +112,6 @@ def setUp(self): self, config_class=self.config_class, has_text_modality=False, common_properties=["num_channels"] ) - def test_config(self): - self.config_tester.run_common_tests() - def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" diff --git a/tests/models/trocr/test_modeling_trocr.py b/tests/models/trocr/test_modeling_trocr.py index aa9e09759415bc..429d472f3d8c5d 100644 --- a/tests/models/trocr/test_modeling_trocr.py +++ b/tests/models/trocr/test_modeling_trocr.py @@ -182,9 +182,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) diff --git a/tests/models/tvp/test_modeling_tvp.py b/tests/models/tvp/test_modeling_tvp.py index 2912b877842447..1fe434485b5aff 100644 --- a/tests/models/tvp/test_modeling_tvp.py +++ b/tests/models/tvp/test_modeling_tvp.py @@ -182,10 +182,6 @@ class TVPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): def setUp(self): self.model_tester = TVPModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="TVP does not use inputs_embeds") def test_inputs_embeds(self): pass diff --git a/tests/models/udop/test_modeling_udop.py b/tests/models/udop/test_modeling_udop.py index a3ae498606a379..6d1d5edfbba7cc 100644 --- a/tests/models/udop/test_modeling_udop.py +++ b/tests/models/udop/test_modeling_udop.py @@ -285,6 +285,7 @@ class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_cpu_offload = False # The small UDOP model needs higher percentages for CPU/MP tests model_split_percents = [0.8, 0.9] + pretrained_checkpoint = "microsoft/udop-large" def setUp(self): self.model_tester = UdopModelTester(self) @@ -300,13 +301,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) @@ -371,12 +365,6 @@ def test_forward_signature(self): def test_save_load_low_cpu_mem_usage(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/udop-large" - model = UdopForConditionalGeneration.from_pretrained(model_name) - self.assertIsNotNone(model) - class UdopEncoderOnlyModelTester: def __init__( @@ -527,13 +515,6 @@ def setUp(self): self.model_tester = UdopEncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UdopConfig, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip( "Not currently compatible. Fails with - NotImplementedError: Cannot copy out of meta tensor; no data!" ) diff --git a/tests/models/umt5/test_modeling_umt5.py b/tests/models/umt5/test_modeling_umt5.py index 2bb841e65e6512..91cdc2c5c0123f 100644 --- a/tests/models/umt5/test_modeling_umt5.py +++ b/tests/models/umt5/test_modeling_umt5.py @@ -707,13 +707,6 @@ def setUp(self): self.model_tester = UMT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UMT5Config, d_model=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() diff --git a/tests/models/unispeech/test_modeling_unispeech.py b/tests/models/unispeech/test_modeling_unispeech.py index d0a1d352243b19..36f0c69430e66f 100644 --- a/tests/models/unispeech/test_modeling_unispeech.py +++ b/tests/models/unispeech/test_modeling_unispeech.py @@ -315,6 +315,7 @@ class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "microsoft/unispeech-large-1500h-cv" def setUp(self): self.model_tester = UniSpeechModelTester( @@ -322,13 +323,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=UniSpeechConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) @@ -538,11 +532,6 @@ def test_mask_time_feature_prob_ctc_single_batch(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = UniSpeechModel.from_pretrained("microsoft/unispeech-large-1500h-cv") - self.assertIsNotNone(model) - @require_torch @require_soundfile diff --git a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py index 1aa2da20d5ec85..eef027f06c97de 100644 --- a/tests/models/unispeech_sat/test_modeling_unispeech_sat.py +++ b/tests/models/unispeech_sat/test_modeling_unispeech_sat.py @@ -367,18 +367,12 @@ class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test test_pruning = False test_headmasking = False test_torchscript = False + pretrained_checkpoint = "microsoft/unispeech-sat-base-plus" def setUp(self): self.model_tester = UniSpeechSatModelTester(self) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @@ -558,11 +552,6 @@ def test_mask_time_prob_ctc(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-base-plus") - self.assertIsNotNone(model) - @require_torch class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): @@ -574,6 +563,7 @@ class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_headmasking = False test_torchscript = False + pretrained_checkpoint = "microsoft/unispeech-sat-large" def setUp(self): self.model_tester = UniSpeechSatModelTester( @@ -581,13 +571,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=UniSpeechSatConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) @@ -795,11 +778,6 @@ def test_mask_time_feature_prob_ctc_single_batch(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = UniSpeechSatModel.from_pretrained("microsoft/unispeech-sat-large") - self.assertIsNotNone(model) - @require_torch @require_soundfile diff --git a/tests/models/univnet/test_modeling_univnet.py b/tests/models/univnet/test_modeling_univnet.py index f26a423a1a2f5b..c0f6bbe7c589e9 100644 --- a/tests/models/univnet/test_modeling_univnet.py +++ b/tests/models/univnet/test_modeling_univnet.py @@ -130,13 +130,6 @@ def setUp(self): def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/upernet/test_modeling_upernet.py b/tests/models/upernet/test_modeling_upernet.py index 464061915e8b3d..72c216d7145d58 100644 --- a/tests/models/upernet/test_modeling_upernet.py +++ b/tests/models/upernet/test_modeling_upernet.py @@ -158,6 +158,7 @@ class UperNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) test_head_masking = False test_torchscript = False has_attentions = False + pretrained_checkpoint = "openmmlab/upernet-convnext-tiny" def setUp(self): self.model_tester = UperNetModelTester(self) @@ -169,9 +170,6 @@ def setUp(self): common_properties=["hidden_size"], ) - def test_config(self): - self.config_tester.run_common_tests() - def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @@ -275,12 +273,6 @@ def test_backbone_selection(self): def test_tied_model_weights_key_ignore(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "openmmlab/upernet-convnext-tiny" - model = UperNetForSemanticSegmentation.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of ADE20k def prepare_img(): diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py index 801990331fea53..34e62dc3e02b5b 100644 --- a/tests/models/videomae/test_modeling_videomae.py +++ b/tests/models/videomae/test_modeling_videomae.py @@ -186,6 +186,7 @@ class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "MCG-NJU/videomae-base" def setUp(self): self.model_tester = VideoMAEModelTester(self) @@ -213,9 +214,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="VideoMAE does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -229,20 +227,10 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "MCG-NJU/videomae-base" - model = VideoMAEModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not have attentions") diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index b7c2c604522e60..c163c9a4797ba3 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -234,6 +234,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_headmasking = False test_torchscript = False model_split_percents = [0.5, 0.8, 0.9] + pretrained_checkpoint = "dandelin/vilt-b32-mlm" # ViltForMaskedLM, ViltForQuestionAnswering and ViltForImagesAndTextClassification require special treatment def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -259,13 +260,6 @@ def setUp(self): self.model_tester = ViltModelTester(self) self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @@ -532,12 +526,6 @@ def test_retain_grad_hidden_states_attentions(self): self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) - @slow - def test_model_from_pretrained(self): - model_name = "dandelin/vilt-b32-mlm" - model = ViltModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase): diff --git a/tests/models/visual_bert/test_modeling_visual_bert.py b/tests/models/visual_bert/test_modeling_visual_bert.py index d24ea14b651083..08f644d20390d2 100644 --- a/tests/models/visual_bert/test_modeling_visual_bert.py +++ b/tests/models/visual_bert/test_modeling_visual_bert.py @@ -315,6 +315,7 @@ class VisualBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa pipeline_model_mapping = {"feature-extraction": VisualBertModel} if is_torch_available() else {} test_torchscript = False test_pruning = False + pretrained_checkpoint = "uclanlp/visualbert-vqa" def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) @@ -515,13 +516,6 @@ def check_hidden_states_output(inputs_dict, config, model_class): check_hidden_states_output(inputs_dict, config, model_class) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -548,12 +542,6 @@ def test_model_for_flickr(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_flickr() self.model_tester.create_and_check_for_flickr(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "uclanlp/visualbert-vqa" - model = VisualBertModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) diff --git a/tests/models/vit/test_modeling_flax_vit.py b/tests/models/vit/test_modeling_flax_vit.py index fb53caa3433ac2..71eceaa1889d9c 100644 --- a/tests/models/vit/test_modeling_flax_vit.py +++ b/tests/models/vit/test_modeling_flax_vit.py @@ -134,13 +134,6 @@ def setUp(self) -> None: self.model_tester = FlaxViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) diff --git a/tests/models/vit/test_modeling_vit.py b/tests/models/vit/test_modeling_vit.py index cace1d377034fb..5995cd9bbd0a08 100644 --- a/tests/models/vit/test_modeling_vit.py +++ b/tests/models/vit/test_modeling_vit.py @@ -207,6 +207,7 @@ class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/vit-base-patch16-224" def setUp(self): self.model_tester = ViTModelTester(self) @@ -219,9 +220,6 @@ def setUp(self): def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -235,10 +233,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) @@ -247,12 +241,6 @@ def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/vit-base-patch16-224" - model = ViTModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/vit_mae/test_modeling_vit_mae.py b/tests/models/vit_mae/test_modeling_vit_mae.py index 6020edca81a7ec..c0984b6c1805ec 100644 --- a/tests/models/vit_mae/test_modeling_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_vit_mae.py @@ -174,14 +174,12 @@ class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/vit-base-patch16-224" def setUp(self): self.model_tester = ViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -195,10 +193,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) @@ -278,12 +272,6 @@ def test_model_outputs_equivalence(self): def test_batching_equivalence(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "google/vit-base-patch16-224" - model = ViTMAEModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py index 3d4262d44970fe..5ffef7ef62a182 100644 --- a/tests/models/vit_msn/test_modeling_vit_msn.py +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -162,14 +162,12 @@ class ViTMSNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "facebook/vit-msn-small" def setUp(self): self.model_tester = ViTMSNModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMSNConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ViTMSN does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -183,20 +181,10 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/vit-msn-small" - model = ViTMSNModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/vitdet/test_modeling_vitdet.py b/tests/models/vitdet/test_modeling_vitdet.py index a9690eee23b8b6..3f83c40ae5d4a9 100644 --- a/tests/models/vitdet/test_modeling_vitdet.py +++ b/tests/models/vitdet/test_modeling_vitdet.py @@ -197,9 +197,6 @@ def test_disk_offload_safetensors(self): def test_model_parallelism(self): super().test_model_parallelism() - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="VitDet does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -213,10 +210,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) diff --git a/tests/models/vitmatte/test_modeling_vitmatte.py b/tests/models/vitmatte/test_modeling_vitmatte.py index 4f96eb8b11ffb8..027671abe6593e 100644 --- a/tests/models/vitmatte/test_modeling_vitmatte.py +++ b/tests/models/vitmatte/test_modeling_vitmatte.py @@ -143,6 +143,7 @@ class VitMatteModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "hustvl/vitmatte-small-composition-1k" def setUp(self): self.model_tester = VitMatteModelTester(self) @@ -154,9 +155,6 @@ def setUp(self): common_properties=["hidden_size"], ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="VitMatte does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -185,16 +183,6 @@ def test_training_gradient_checkpointing_use_reentrant_false(self): def test_model_get_set_embeddings(self): pass - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - - @slow - def test_model_from_pretrained(self): - model_name = "hustvl/vitmatte-small-composition-1k" - model = VitMatteForImageMatting.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="ViTMatte does not support retaining gradient on attention logits") def test_retain_grad_hidden_states_attentions(self): pass diff --git a/tests/models/vits/test_modeling_vits.py b/tests/models/vits/test_modeling_vits.py index 99ba51e35f6663..a4c23b11063337 100644 --- a/tests/models/vits/test_modeling_vits.py +++ b/tests/models/vits/test_modeling_vits.py @@ -173,9 +173,6 @@ def setUp(self): self.model_tester = VitsModelTester(self) self.config_tester = ConfigTester(self, config_class=VitsConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - # TODO: @ydshieh @is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`") def test_pipeline_feature_extraction(self): diff --git a/tests/models/vivit/test_modeling_vivit.py b/tests/models/vivit/test_modeling_vivit.py index 19a179a6a3e030..0d84ad78763dd4 100644 --- a/tests/models/vivit/test_modeling_vivit.py +++ b/tests/models/vivit/test_modeling_vivit.py @@ -169,6 +169,7 @@ class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_torchscript = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "google/vivit-b-16x2-kinetics400" def setUp(self): self.model_tester = VivitModelTester(self) @@ -185,9 +186,6 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="Vivit does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -213,20 +211,10 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "head_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_for_video_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "google/vivit-b-16x2-kinetics400" - model = VivitModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index ff7a85218d3a00..e0f89ab7c1e4d2 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -505,18 +505,12 @@ class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase fx_compatible = True test_pruning = False test_headmasking = False + pretrained_checkpoint = "facebook/wav2vec2-base-960h" def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) @@ -718,11 +712,6 @@ def test_mask_time_prob_ctc(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") - self.assertIsNotNone(model) - # Wav2Vec2 cannot be torchscripted because of group norm. def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): # TODO: fix it @@ -850,6 +839,7 @@ class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "facebook/wav2vec2-base-960h" def setUp(self): self.model_tester = Wav2Vec2ModelTester( @@ -857,13 +847,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) @@ -1278,11 +1261,6 @@ def get_logits(model, input_features): self.assertTrue(torch.allclose(logits, logits_2, atol=1e-3)) - @slow - def test_model_from_pretrained(self): - model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") - self.assertIsNotNone(model) - @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): diff --git a/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py b/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py index 80237fea9d1e43..0c53c878c0ec14 100644 --- a/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py +++ b/tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py @@ -423,7 +423,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -# Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTest with Conformer->Bert, input_values->input_features +# Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerModelTest with Conformer->Bert,input_values->input_features,facebook/wav2vec2-conformer-rel-pos-large->facebook/w2v-bert-2.0 class Wav2Vec2BertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): # Ignore copy all_model_classes = ( @@ -447,22 +447,16 @@ class Wav2Vec2BertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test if is_torch_available() else {} ) - test_pruning = False test_headmasking = False test_torchscript = False + pretrained_checkpoint = "facebook/w2v-bert-2.0" + def setUp(self): self.model_tester = Wav2Vec2BertModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2BertConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) @@ -683,12 +677,6 @@ def test_mask_time_prob_ctc(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - # Ignore copy - model = Wav2Vec2BertModel.from_pretrained("facebook/w2v-bert-2.0") - self.assertIsNotNone(model) - @require_torch # Copied from tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer.Wav2Vec2ConformerUtilsTest with Conformer->Bert, input_values->input_features diff --git a/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py b/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py index 096d1368ed02cb..74a068b142cd17 100644 --- a/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py +++ b/tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py @@ -428,6 +428,7 @@ class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest if is_torch_available() else () ) + pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ConformerForSequenceClassification, @@ -441,17 +442,12 @@ class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest test_headmasking = False test_torchscript = False + pretrained_checkpoint = "facebook/wav2vec2-conformer-rel-pos-large" + def setUp(self): self.model_tester = Wav2Vec2ConformerModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2ConformerConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_with_relative(self): config_and_inputs = self.model_tester.prepare_config_and_inputs(position_embeddings_type="relative") self.model_tester.create_and_check_model(*config_and_inputs) @@ -685,11 +681,6 @@ def test_mask_time_prob_ctc(self): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = Wav2Vec2ConformerModel.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") - self.assertIsNotNone(model) - @require_torch class Wav2Vec2ConformerUtilsTest(unittest.TestCase): diff --git a/tests/models/wavlm/test_modeling_wavlm.py b/tests/models/wavlm/test_modeling_wavlm.py index b20792d83545d8..1b8d86b8edf7ef 100644 --- a/tests/models/wavlm/test_modeling_wavlm.py +++ b/tests/models/wavlm/test_modeling_wavlm.py @@ -335,18 +335,12 @@ class WavLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ) test_pruning = False test_headmasking = False + pretrained_checkpoint = "microsoft/wavlm-base-plus" def setUp(self): self.model_tester = WavLMModelTester(self) self.config_tester = ConfigTester(self, config_class=WavLMConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) @@ -480,11 +474,6 @@ def _mock_init_weights(self, module): def test_feed_forward_chunking(self): pass - @slow - def test_model_from_pretrained(self): - model = WavLMModel.from_pretrained("microsoft/wavlm-base-plus") - self.assertIsNotNone(model) - @require_torch @require_torchaudio diff --git a/tests/models/whisper/test_modeling_flax_whisper.py b/tests/models/whisper/test_modeling_flax_whisper.py index 065c6536481d3c..29b60eb338982a 100644 --- a/tests/models/whisper/test_modeling_flax_whisper.py +++ b/tests/models/whisper/test_modeling_flax_whisper.py @@ -206,9 +206,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) - def test_config(self): - self.config_tester.run_common_tests() - # overwrite because of `input_features` def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -842,9 +839,6 @@ def setUp(self): ) self.config_tester = ConfigTester(self, config_class=WhisperConfig) - def test_config(self): - self.config_tester.run_common_tests() - # overwrite because of `input_features` def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index b4e71ca72e56ed..b66adb2c6390f4 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -296,7 +296,7 @@ def get_subsampled_output_lengths(self, input_lengths): return input_lengths - def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): + def create_and_check_model(self, config, inputs_dict, freeze_encoder=False): model = WhisperModel(config=config).to(torch_device).eval() if freeze_encoder: @@ -440,9 +440,6 @@ def setUp(self): self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 - def test_config(self): - self.config_tester.run_common_tests() - def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: @@ -453,13 +450,9 @@ def test_save_load_strict(self): model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) - def test_model_forward(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_model_forward_with_frozen_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs, freeze_encoder=True) + self.model_tester.create_and_check_model(*config_and_inputs, freeze_encoder=True) def test_requires_grad_with_frozen_encoder(self): config = self.model_tester.get_config() @@ -3490,7 +3483,7 @@ def get_subsampled_output_lengths(self, input_lengths): def encoder_seq_length(self): return self.get_subsampled_output_lengths(self.seq_length) - def create_and_check_model_forward(self, config, inputs_dict, use_weighted_layer_sum=False): + def create_and_check_model(self, config, inputs_dict, use_weighted_layer_sum=False): config.use_weighted_layer_sum = use_weighted_layer_sum model = WhisperForAudioClassification(config=config) model.to(torch_device).eval() @@ -3518,9 +3511,6 @@ def setUp(self): self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 - def test_config(self): - self.config_tester.run_common_tests() - def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -3533,13 +3523,9 @@ def test_forward_signature(self): expected_arg_names = ["input_features", "head_mask", "encoder_outputs"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) - def test_forward_pass(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs) - def test_forward_pass_weighted_layer_sum(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model_forward(*config_and_inputs, use_weighted_layer_sum=True) + self.model_tester.create_and_check_model(*config_and_inputs, use_weighted_layer_sum=True) @unittest.skip(reason="Some undefined behavior encountered with tiny versions of this model. Skip for now.") def test_cpu_offload(self): @@ -3874,6 +3860,20 @@ def prepare_config_and_inputs_for_common(self): return config, inputs_dict + def prepare_config_and_inputs_for_decoder(self): + config, input_features = self.prepare_config_and_inputs() + input_ids = input_features["input_ids"] + encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) + + return (config, input_ids, encoder_hidden_states) + + def create_and_check_model(self, config, input_values): + model = WhisperDecoder(config=config).to(torch_device).eval() + result = model(**input_values) + self.parent.assertTrue( + result["last_hidden_state"].shape == (self.batch_size, self.seq_length, self.hidden_size) + ) + def create_and_check_decoder_model_past(self, config, input_ids): config.use_cache = True model = WhisperDecoder(config=config).to(torch_device).eval() @@ -3959,9 +3959,6 @@ def setUp(self): self.model_tester = WhisperStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=WhisperConfig) - def test_config(self): - self.config_tester.run_common_tests() - def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config, inputs_dict = config_and_inputs diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py index 8b91019bae18cc..7d9c6d303ffc10 100644 --- a/tests/models/x_clip/test_modeling_x_clip.py +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -147,6 +147,7 @@ class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "microsoft/xclip-base-patch32" def setUp(self): self.model_tester = XCLIPVisionModelTester(self) @@ -154,9 +155,6 @@ def setUp(self): self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="X-CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -182,10 +180,6 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -214,12 +208,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/xclip-base-patch32" - model = XCLIPVisionModel.from_pretrained(model_name) - self.assertIsNotNone(model) - def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -410,18 +398,12 @@ class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): fx_compatible = False test_pruning = False test_head_masking = False + pretrained_checkpoint = "microsoft/xclip-base-patch32" def setUp(self): self.model_tester = XCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip def test_training(self): pass @@ -454,12 +436,6 @@ def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_to_base(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/xclip-base-patch32" - model = XCLIPTextModel.from_pretrained(model_name) - self.assertIsNotNone(model) - class XCLIPModelTester: def __init__( @@ -544,14 +520,11 @@ class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_attention_outputs = False test_torchscript = False maxdiff = None + pretrained_checkpoint = "microsoft/xclip-base-patch32" def setUp(self): self.model_tester = XCLIPModelTester(self) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @@ -684,12 +657,6 @@ def test_load_vision_text_config(self): text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) - @slow - def test_model_from_pretrained(self): - model_name = "microsoft/xclip-base-patch32" - model = XCLIPModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on a spaghetti video def prepare_video(): diff --git a/tests/models/xglm/test_modeling_xglm.py b/tests/models/xglm/test_modeling_xglm.py index 07a1e579c60b1f..8c844e9e6944d0 100644 --- a/tests/models/xglm/test_modeling_xglm.py +++ b/tests/models/xglm/test_modeling_xglm.py @@ -291,14 +291,12 @@ class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin fx_compatible = True test_missing_keys = False test_pruning = False + pretrained_checkpoint = "facebook/xglm-564M" def setUp(self): self.model_tester = XGLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) - def test_config(self): - self.config_tester.run_common_tests() - def test_xglm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_model(*config_and_inputs) @@ -327,12 +325,6 @@ def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "facebook/xglm-564M" - model = XGLMModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() diff --git a/tests/models/xlm/test_modeling_xlm.py b/tests/models/xlm/test_modeling_xlm.py index 268ba79d5931ff..32174fe434378f 100644 --- a/tests/models/xlm/test_modeling_xlm.py +++ b/tests/models/xlm/test_modeling_xlm.py @@ -159,7 +159,7 @@ def get_config(self): bos_token_id=self.bos_token_id, ) - def create_and_check_xlm_model( + def create_and_check_model( self, config, input_ids, @@ -390,6 +390,7 @@ class XLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, if is_torch_available() else {} ) + pretrained_checkpoint = "FacebookAI/xlm-mlm-en-2048" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -426,13 +427,6 @@ def setUp(self): self.model_tester = XLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_xlm_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_xlm_model(*config_and_inputs) - # Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->XLM def test_xlm_model_with_sinusoidal_encodings(self): config = XLMConfig(sinusoidal_embeddings=True) @@ -511,12 +505,6 @@ def _check_hidden_states_for_generate( ) pass - @slow - def test_model_from_pretrained(self): - model_name = "FacebookAI/xlm-mlm-en-2048" - model = XLMModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class XLMModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py b/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py index a73f5618ff7eda..fddd6d4a91af91 100644 --- a/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py +++ b/tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py @@ -403,13 +403,6 @@ def setUp(self): self.model_tester = XLMRobertaXLModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/xlnet/test_modeling_xlnet.py b/tests/models/xlnet/test_modeling_xlnet.py index 0d785d4a1fc85d..05633be7ce007c 100644 --- a/tests/models/xlnet/test_modeling_xlnet.py +++ b/tests/models/xlnet/test_modeling_xlnet.py @@ -170,7 +170,7 @@ def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) - def create_and_check_xlnet_base_model( + def create_and_check_model( self, config, input_ids_1, @@ -540,6 +540,7 @@ class XLNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ) fx_compatible = False test_pruning = False + pretrained_checkpoint = "xlnet/xlnet-base-cased" # TODO: Fix the failed tests def is_pipeline_test_to_skip( @@ -569,14 +570,6 @@ def setUp(self): self.model_tester = XLNetModelTester(self) self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_xlnet_base_model(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs) - def test_xlnet_base_model_use_mems(self): # checking that in auto-regressive mode, `use_mems` gives the same results self.model_tester.set_seed() @@ -686,12 +679,6 @@ def _check_attentions_for_generate( [expected_shape] * len(iter_attentions), ) - @slow - def test_model_from_pretrained(self): - model_name = "xlnet/xlnet-base-cased" - model = XLNetModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @require_torch class XLNetModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/xmod/test_modeling_xmod.py b/tests/models/xmod/test_modeling_xmod.py index 0140d007d06897..ac324e53893e2c 100644 --- a/tests/models/xmod/test_modeling_xmod.py +++ b/tests/models/xmod/test_modeling_xmod.py @@ -397,13 +397,6 @@ def setUp(self): self.model_tester = XmodModelTester(self) self.config_tester = ConfigTester(self, config_class=XmodConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: diff --git a/tests/models/yolos/test_modeling_yolos.py b/tests/models/yolos/test_modeling_yolos.py index 5c929aeb409944..e7a5621c02811b 100644 --- a/tests/models/yolos/test_modeling_yolos.py +++ b/tests/models/yolos/test_modeling_yolos.py @@ -178,6 +178,7 @@ class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_resize_embeddings = False test_head_masking = False test_torchscript = False + pretrained_checkpoint = "hustvl/yolos-small" # special case for head model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): @@ -203,9 +204,6 @@ def setUp(self): self.model_tester = YolosModelTester(self) self.config_tester = ConfigTester(self, config_class=YolosConfig, has_text_modality=False, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="YOLOS does not use inputs_embeds") def test_inputs_embeds(self): pass @@ -219,10 +217,6 @@ def test_model_get_set_embeddings(self): x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True @@ -318,12 +312,6 @@ def test_for_object_detection(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "hustvl/yolos-small" - model = YolosModel.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/models/yoso/test_modeling_yoso.py b/tests/models/yoso/test_modeling_yoso.py index 4cfb7e22a5d2ff..e9a55ee3b08e6f 100644 --- a/tests/models/yoso/test_modeling_yoso.py +++ b/tests/models/yoso/test_modeling_yoso.py @@ -309,18 +309,12 @@ class YosoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): if is_torch_available() else {} ) + pretrained_checkpoint = "uw-madison/yoso-4096" def setUp(self): self.model_tester = YosoModelTester(self) self.config_tester = ConfigTester(self, config_class=YosoConfig, hidden_size=37) - def test_config(self): - self.config_tester.run_common_tests() - - def test_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_model(*config_and_inputs) - def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: @@ -347,12 +341,6 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) - @slow - def test_model_from_pretrained(self): - model_name = "uw-madison/yoso-4096" - model = YosoModel.from_pretrained(model_name) - self.assertIsNotNone(model) - @unittest.skip(reason="This model does not output attentions") def test_attention_outputs(self): return diff --git a/tests/models/zoedepth/test_modeling_zoedepth.py b/tests/models/zoedepth/test_modeling_zoedepth.py index 571c44f2f47266..ba2f082c36a4dd 100644 --- a/tests/models/zoedepth/test_modeling_zoedepth.py +++ b/tests/models/zoedepth/test_modeling_zoedepth.py @@ -145,6 +145,7 @@ class ZoeDepthModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase test_pruning = False test_resize_embeddings = False test_head_masking = False + pretrained_checkpoint = "Intel/zoedepth-nyu" def setUp(self): self.model_tester = ZoeDepthModelTester(self) @@ -152,9 +153,6 @@ def setUp(self): self, config_class=ZoeDepthConfig, has_text_modality=False, hidden_size=37, common_properties=[] ) - def test_config(self): - self.config_tester.run_common_tests() - @unittest.skip(reason="ZoeDepth with AutoBackbone does not have a base model and hence no input_embeddings") def test_inputs_embeds(self): pass @@ -195,12 +193,6 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass - @slow - def test_model_from_pretrained(self): - model_name = "Intel/zoedepth-nyu" - model = ZoeDepthForDepthEstimation.from_pretrained(model_name) - self.assertIsNotNone(model) - # We will verify our results on an image of cute cats def prepare_img(): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 4d96b229284089..9fedd6afe029a1 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -188,6 +188,7 @@ class ModelTesterMixin: is_encoder_decoder = False has_attentions = True model_split_percents = [0.5, 0.7, 0.9] + pretrained_checkpoint = None def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) @@ -256,6 +257,13 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): return inputs_dict + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -4865,6 +4873,14 @@ def test_forward_with_num_logits_to_keep(self): # Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops) self.assertTrue(torch.allclose(all_logits[:, -1:, :], last_token_logits, atol=1e-5)) + @slow + def test_model_from_pretrained(self): + self.assertIsNotNone(self.pretrained_checkpoint) + # Just check we can load at least one model from the checkpoint + for model_class in self.all_model_classes[0]: + model = model_class.from_pretrained(self.pretrained_checkpoint) + self.assertIsNotNone(model) + global_rng = random.Random()