From 74511cd3983df8b1fd6d908c5d79dc46e91d047f Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 19 Dec 2024 17:24:29 +0530 Subject: [PATCH 1/3] misc lora test improvements. --- tests/lora/test_lora_layers_ltx_video.py | 35 ------------------------ 1 file changed, 35 deletions(-) diff --git a/tests/lora/test_lora_layers_ltx_video.py b/tests/lora/test_lora_layers_ltx_video.py index c9c877b202ef..fd211ba90751 100644 --- a/tests/lora/test_lora_layers_ltx_video.py +++ b/tests/lora/test_lora_layers_ltx_video.py @@ -107,41 +107,6 @@ def get_dummy_inputs(self, with_generator=True): return noise, input_ids, pipeline_inputs - @skip_mps - @pytest.mark.xfail( - condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), - reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", - strict=True, - ) - def test_lora_fuse_nan(self): - for scheduler_cls in self.scheduler_classes: - components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) - pipe = self.pipeline_class(**components) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - _, _, inputs = self.get_dummy_inputs(with_generator=False) - - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - - self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") - - # corrupt one LoRA weight with `inf` values - with torch.no_grad(): - pipe.transformer.transformer_blocks[0].attn1.to_q.lora_A["adapter-1"].weight += float("inf") - - # with `safe_fusing=True` we should see an Error - with self.assertRaises(ValueError): - pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) - - # without we should not see an error, but every image will be black - pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) - - out = pipe( - "test", num_inference_steps=2, max_sequence_length=inputs["max_sequence_length"], output_type="np" - )[0] - - self.assertTrue(np.isnan(out).all()) - def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) From 8d0ef11120e3cfb42c01ee64e3c3051a13b9c7fa Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 19 Dec 2024 18:24:48 +0530 Subject: [PATCH 2/3] updates --- tests/lora/test_lora_layers_flux.py | 76 -------------- tests/lora/test_lora_layers_ltx_video.py | 12 +-- tests/lora/utils.py | 120 +++++++++++++++++++++-- 3 files changed, 116 insertions(+), 92 deletions(-) diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index b28fdde91574..7ba37164cdfe 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -36,7 +36,6 @@ numpy_cosine_similarity_distance, require_big_gpu_with_torch_cuda, require_peft_backend, - require_peft_version_greater, require_torch_gpu, slow, torch_device, @@ -355,81 +354,6 @@ def test_lora_parameter_expanded_shapes(self): with self.assertRaises(NotImplementedError): pipe.load_lora_weights(lora_state_dict, "adapter-1") - @require_peft_version_greater("0.13.2") - def test_lora_B_bias(self): - components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) - pipe = self.pipeline_class(**components) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - # keep track of the bias values of the base layers to perform checks later. - bias_values = {} - for name, module in pipe.transformer.named_modules(): - if any(k in name for k in ["to_q", "to_k", "to_v", "to_out.0"]): - if module.bias is not None: - bias_values[name] = module.bias.data.clone() - - _, _, inputs = self.get_dummy_inputs(with_generator=False) - - logger = logging.get_logger("diffusers.loaders.lora_pipeline") - logger.setLevel(logging.INFO) - - original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] - - denoiser_lora_config.lora_bias = False - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - lora_bias_false_output = pipe(**inputs, generator=torch.manual_seed(0))[0] - pipe.delete_adapters("adapter-1") - - denoiser_lora_config.lora_bias = True - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - lora_bias_true_output = pipe(**inputs, generator=torch.manual_seed(0))[0] - - self.assertFalse(np.allclose(original_output, lora_bias_false_output, atol=1e-3, rtol=1e-3)) - self.assertFalse(np.allclose(original_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) - self.assertFalse(np.allclose(lora_bias_false_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) - - # for now this is flux control lora specific but can be generalized later and added to ./utils.py - def test_correct_lora_configs_with_different_ranks(self): - components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) - pipe = self.pipeline_class(**components) - pipe = pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - _, _, inputs = self.get_dummy_inputs(with_generator=False) - - original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] - - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] - pipe.transformer.delete_adapters("adapter-1") - - # change the rank_pattern - updated_rank = denoiser_lora_config.r * 2 - denoiser_lora_config.rank_pattern = {"single_transformer_blocks.0.attn.to_k": updated_rank} - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - assert pipe.transformer.peft_config["adapter-1"].rank_pattern == { - "single_transformer_blocks.0.attn.to_k": updated_rank - } - - lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] - - self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3)) - self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3)) - pipe.transformer.delete_adapters("adapter-1") - - # similarly change the alpha_pattern - updated_alpha = denoiser_lora_config.lora_alpha * 2 - denoiser_lora_config.alpha_pattern = {"single_transformer_blocks.0.attn.to_k": updated_alpha} - pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") - assert pipe.transformer.peft_config["adapter-1"].alpha_pattern == { - "single_transformer_blocks.0.attn.to_k": updated_alpha - } - - lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0] - - self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) - self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) - def test_lora_expanding_shape_with_normal_lora_raises_error(self): # TODO: This test checks if an error is raised when a lora expands shapes (like control loras) but # another lora with correct shapes is loaded. This is not supported at the moment and should raise an error. diff --git a/tests/lora/test_lora_layers_ltx_video.py b/tests/lora/test_lora_layers_ltx_video.py index fd211ba90751..1ed426f6e8dd 100644 --- a/tests/lora/test_lora_layers_ltx_video.py +++ b/tests/lora/test_lora_layers_ltx_video.py @@ -15,8 +15,6 @@ import sys import unittest -import numpy as np -import pytest import torch from transformers import AutoTokenizer, T5EncoderModel @@ -26,18 +24,12 @@ LTXPipeline, LTXVideoTransformer3DModel, ) -from diffusers.utils.testing_utils import ( - floats_tensor, - is_torch_version, - require_peft_backend, - skip_mps, - torch_device, -) +from diffusers.utils.testing_utils import floats_tensor, require_peft_backend sys.path.append(".") -from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 +from utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend diff --git a/tests/lora/utils.py b/tests/lora/utils.py index 73ed17049c1b..4e73e1b196af 100644 --- a/tests/lora/utils.py +++ b/tests/lora/utils.py @@ -89,12 +89,12 @@ class PeftLoraLoaderMixinTests: has_two_text_encoders = False has_three_text_encoders = False - text_encoder_cls, text_encoder_id, text_encoder_subfolder = None, None, None - text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = None, None, None - text_encoder_3_cls, text_encoder_3_id, text_encoder_3_subfolder = None, None, None - tokenizer_cls, tokenizer_id, tokenizer_subfolder = None, None, None - tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = None, None, None - tokenizer_3_cls, tokenizer_3_id, tokenizer_3_subfolder = None, None, None + text_encoder_cls, text_encoder_id, text_encoder_subfolder = None, None, "" + text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = None, None, "" + text_encoder_3_cls, text_encoder_3_id, text_encoder_3_subfolder = None, None, "" + tokenizer_cls, tokenizer_id, tokenizer_subfolder = None, None, "" + tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = None, None, "" + tokenizer_3_cls, tokenizer_3_id, tokenizer_3_subfolder = None, None, "" unet_kwargs = None transformer_cls = None @@ -1988,3 +1988,111 @@ def test_set_adapters_match_attention_kwargs(self): np.allclose(output_lora_scale_wo_kwargs, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), "Loading from saved checkpoints should give same results as set_adapters().", ) + + @require_peft_version_greater("0.13.2") + def test_lora_B_bias(self): + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # keep track of the bias values of the base layers to perform checks later. + bias_values = {} + denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer + for name, module in denoiser.named_modules(): + if any(k in name for k in ["to_q", "to_k", "to_v", "to_out.0"]): + if module.bias is not None: + bias_values[name] = module.bias.data.clone() + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.INFO) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + denoiser_lora_config.lora_bias = False + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + lora_bias_false_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + pipe.delete_adapters("adapter-1") + + denoiser_lora_config.lora_bias = True + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + lora_bias_true_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_output, lora_bias_false_output, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(original_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(lora_bias_false_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) + + def test_correct_lora_configs_with_different_ranks(self): + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + + lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if self.unet_kwargs is not None: + pipe.unet.delete_adapters("adapter-1") + else: + pipe.transformer.delete_adapters("adapter-1") + + denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer + for name, _ in denoiser.named_modules(): + if "to_k" in name and "attn" in name and "lora" not in name: + module_name_to_rank_update = name.replace(".base_layer.", ".") + break + + # change the rank_pattern + updated_rank = denoiser_lora_config.r * 2 + denoiser_lora_config.rank_pattern = {module_name_to_rank_update: updated_rank} + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + updated_rank_pattern = pipe.unet.peft_config["adapter-1"].rank_pattern + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + updated_rank_pattern = pipe.transformer.peft_config["adapter-1"].rank_pattern + + self.assertTrue(updated_rank_pattern == {module_name_to_rank_update: updated_rank}) + + lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3)) + self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3)) + + if self.unet_kwargs is not None: + pipe.unet.delete_adapters("adapter-1") + else: + pipe.transformer.delete_adapters("adapter-1") + + # similarly change the alpha_pattern + updated_alpha = denoiser_lora_config.lora_alpha * 2 + denoiser_lora_config.alpha_pattern = {module_name_to_rank_update: updated_alpha} + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue( + pipe.unet.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} + ) + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue( + pipe.transformer.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} + ) + + lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) + self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) From 618d2063c09031ff82d904f2c8567ee63671d4e7 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Mon, 23 Dec 2024 13:27:17 +0530 Subject: [PATCH 3/3] fixes to tests --- tests/lora/test_lora_layers_flux.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/tests/lora/test_lora_layers_flux.py b/tests/lora/test_lora_layers_flux.py index 9e7dd74a86f5..b22fbaaed69b 100644 --- a/tests/lora/test_lora_layers_flux.py +++ b/tests/lora/test_lora_layers_flux.py @@ -330,7 +330,8 @@ def test_lora_parameter_expanded_shapes(self): } with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(lora_state_dict, "adapter-1") - self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0] @@ -339,6 +340,7 @@ def test_lora_parameter_expanded_shapes(self): self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) + # Testing opposite direction where the LoRA params are zero-padded. components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) @@ -349,15 +351,21 @@ def test_lora_parameter_expanded_shapes(self): "transformer.x_embedder.lora_A.weight": dummy_lora_A.weight, "transformer.x_embedder.lora_B.weight": dummy_lora_B.weight, } - # We should error out because lora input features is less than original. We only - # support expanding the module, not shrinking it - with self.assertRaises(NotImplementedError): + with CaptureLogger(logger) as cap_logger: pipe.load_lora_weights(lora_state_dict, "adapter-1") - def test_lora_expanding_shape_with_normal_lora_raises_error(self): - # TODO: This test checks if an error is raised when a lora expands shapes (like control loras) but - # another lora with correct shapes is loaded. This is not supported at the moment and should raise an error. - # When we do support it, this test should be removed. Context: https://github.com/huggingface/diffusers/issues/10180 + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out) + + def test_normal_lora_with_expanded_lora_raises_error(self): + # Test the following situation. Load a regular LoRA (such as the ones trained on Flux.1-Dev). And then + # load shape expanded LoRA (such as Control LoRA). components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) # Change the transformer config to mimic a real use case.