From 8e6b55ba6125548c6db5f11f807e20c752952257 Mon Sep 17 00:00:00 2001 From: yonigozlan Date: Thu, 19 Sep 2024 17:49:54 +0000 Subject: [PATCH] nit --- src/transformers/models/paligemma/processing_paligemma.py | 8 +------- tests/models/paligemma/test_processor_paligemma.py | 4 ++-- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/paligemma/processing_paligemma.py b/src/transformers/models/paligemma/processing_paligemma.py index a67869445c01c7..4457b6fe957bf3 100644 --- a/src/transformers/models/paligemma/processing_paligemma.py +++ b/src/transformers/models/paligemma/processing_paligemma.py @@ -17,7 +17,6 @@ """ import logging -import sys from typing import List, Optional, Union from ...feature_extraction_utils import BatchFeature @@ -27,6 +26,7 @@ ProcessingKwargs, ProcessorMixin, TextKwargs, + Unpack, _validate_images_text_input_order, ) from ...tokenization_utils_base import ( @@ -36,12 +36,6 @@ ) -if sys.version_info >= (3, 11): - from typing import Unpack -else: - from typing_extensions import Unpack - - logger = logging.getLogger(__name__) IMAGE_TOKEN = "" diff --git a/tests/models/paligemma/test_processor_paligemma.py b/tests/models/paligemma/test_processor_paligemma.py index f99258f7b145f1..47810f1832416f 100644 --- a/tests/models/paligemma/test_processor_paligemma.py +++ b/tests/models/paligemma/test_processor_paligemma.py @@ -1,4 +1,4 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. +# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import shutil import tempfile import unittest @@ -43,7 +44,6 @@ def setUp(self): tokenizer = GemmaTokenizer(SAMPLE_VOCAB, keep_accents=True) processor = PaliGemmaProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(self.tmpdirname) - print(image_processor) def tearDown(self): shutil.rmtree(self.tmpdirname)