From 7ee2ec81a2b5e1a22433a1b9e2450e7f17081412 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Marafioti?= Date: Wed, 18 Sep 2024 14:29:28 +0200 Subject: [PATCH] Update tests/models/idefics3/test_processing_idefics3.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- tests/models/idefics3/test_processing_idefics3.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/models/idefics3/test_processing_idefics3.py b/tests/models/idefics3/test_processing_idefics3.py index c204ecdb4151b8..b148d2e9472ceb 100644 --- a/tests/models/idefics3/test_processing_idefics3.py +++ b/tests/models/idefics3/test_processing_idefics3.py @@ -92,9 +92,8 @@ def get_splitted_image_expected_tokens(self, processor, image_rows, image_cols): ) text_split_images += processor.tokenizer("\n", add_special_tokens=False)["input_ids"] text_split_images = text_split_images[:-1] # remove last newline - text_split_images += processor.tokenizer("\n\n", add_special_tokens=False)[ - "input_ids" - ] # add double newline, as it gets its own token + # add double newline, as it gets its own token + text_split_images += processor.tokenizer("\n\n", add_special_tokens=False)["input_ids"] text_split_images += ( [self.fake_image_token_id] + self.global_img_tokens_id