diff --git a/tests/models/idefics3/test_processing_idefics3.py b/tests/models/idefics3/test_processing_idefics3.py index c204ecdb4151b8..b148d2e9472ceb 100644 --- a/tests/models/idefics3/test_processing_idefics3.py +++ b/tests/models/idefics3/test_processing_idefics3.py @@ -92,9 +92,8 @@ def get_splitted_image_expected_tokens(self, processor, image_rows, image_cols): ) text_split_images += processor.tokenizer("\n", add_special_tokens=False)["input_ids"] text_split_images = text_split_images[:-1] # remove last newline - text_split_images += processor.tokenizer("\n\n", add_special_tokens=False)[ - "input_ids" - ] # add double newline, as it gets its own token + # add double newline, as it gets its own token + text_split_images += processor.tokenizer("\n\n", add_special_tokens=False)["input_ids"] text_split_images += ( [self.fake_image_token_id] + self.global_img_tokens_id