diff --git a/src/transformers/models/mgp_str/processing_mgp_str.py b/src/transformers/models/mgp_str/processing_mgp_str.py index 7e30a0336b809f..169d8adcec7b8a 100644 --- a/src/transformers/models/mgp_str/processing_mgp_str.py +++ b/src/transformers/models/mgp_str/processing_mgp_str.py @@ -78,28 +78,35 @@ def __init__(self, image_processor=None, tokenizer=None, **kwargs): FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") - if "char_tokenizer" in kwargs: - warnings.warn( - "The `char_tokenizer` argument is deprecated and will be removed in future versions, use `tokenizer`" - " instead.", - FutureWarning, - ) - char_tokenizer = kwargs.pop("char_tokenizer") image_processor = image_processor if image_processor is not None else feature_extractor - tokenizer = tokenizer if tokenizer is not None else char_tokenizer if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") self.tokenizer = tokenizer - self.char_tokenizer = tokenizer # For backwards compatibility self.bpe_tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") self.wp_tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") super().__init__(image_processor, tokenizer) + @property + def char_tokenizer(self): + warnings.warn( + "The `char_tokenizer` attribute is deprecated and will be removed in future versions, use `tokenizer` instead.", + FutureWarning, + ) + return self.tokenizer + + @char_tokenizer.setter + def char_tokenizer(self, value): + warnings.warn( + "The `char_tokenizer` attribute is deprecated and will be removed in future versions, use `tokenizer` instead.", + FutureWarning, + ) + self.tokenizer = value + def __call__( self, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, diff --git a/src/transformers/models/tvp/image_processing_tvp.py b/src/transformers/models/tvp/image_processing_tvp.py index 4e9618eef17084..7a4c5db004671e 100644 --- a/src/transformers/models/tvp/image_processing_tvp.py +++ b/src/transformers/models/tvp/image_processing_tvp.py @@ -50,7 +50,13 @@ # Copied from transformers.models.vivit.image_processing_vivit.make_batched def make_batched(videos) -> List[List[ImageInput]]: - if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): + if isinstance(videos, np.ndarray) and videos.ndim == 5: + return videos + + elif isinstance(videos, np.ndarray) and videos.ndim == 4: + return [videos] + + elif isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): @@ -305,30 +311,20 @@ def _preprocess_image( # All transformations expect numpy arrays. image = to_numpy_array(image) - print(f"{image.shape = }") - if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) - print(f"{image.shape = }") - if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) - print(f"{image.shape = }") - if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) - print(f"{image.shape = }") - if do_normalize: image = self.normalize( image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format ) - print(f"{image.shape = }") - if do_pad: image = self.pad_image( image=image, @@ -338,18 +334,12 @@ def _preprocess_image( input_data_format=input_data_format, ) - print(f"{image.shape = }") - # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: image = flip_channel_order(image=image, input_data_format=input_data_format) - print(f"{image.shape = }") - image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) - print(f"{image.shape = }") - return image @filter_out_non_signature_kwargs() diff --git a/src/transformers/models/videomae/image_processing_videomae.py b/src/transformers/models/videomae/image_processing_videomae.py index 413589523aa675..c21210faf6670c 100644 --- a/src/transformers/models/videomae/image_processing_videomae.py +++ b/src/transformers/models/videomae/image_processing_videomae.py @@ -47,8 +47,15 @@ logger = logging.get_logger(__name__) +# Copied from transformers.models.vivit.image_processing_vivit.make_batched def make_batched(videos) -> List[List[ImageInput]]: - if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): + if isinstance(videos, np.ndarray) and videos.ndim == 5: + return videos + + elif isinstance(videos, np.ndarray) and videos.ndim == 4: + return [videos] + + elif isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): diff --git a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py index 0d723ed10bf067..ec398e09f8412e 100644 --- a/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py @@ -16,10 +16,24 @@ Processor class for VisionTextDualEncoder """ +import sys import warnings +from typing import List, Optional, Union -from ...processing_utils import ProcessorMixin -from ...tokenization_utils_base import BatchEncoding +from ...feature_extraction_utils import BatchFeature +from ...image_utils import ImageInput +from ...processing_utils import ProcessingKwargs, ProcessorMixin +from ...tokenization_utils_base import PreTokenizedInput, TextInput + + +if sys.version_info >= (3, 11): + from typing import Unpack +else: + from typing_extensions import Unpack + + +class VisionTextDualEncoderProcessorKwargs(ProcessingKwargs, total=False): + _defaults = {} class VisionTextDualEncoderProcessor(ProcessorMixin): @@ -61,7 +75,14 @@ def __init__(self, image_processor=None, tokenizer=None, **kwargs): super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor - def __call__(self, text=None, images=None, return_tensors=None, **kwargs): + def __call__( + self, + text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, + images: Optional[ImageInput] = None, + audio=None, + videos=None, + **kwargs: Unpack[VisionTextDualEncoderProcessorKwargs], + ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to VisionTextDualEncoderTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not @@ -70,24 +91,16 @@ def __call__(self, text=None, images=None, return_tensors=None, **kwargs): of the above two methods for more information. Args: - text (`str`, `List[str]`, `List[List[str]]`): + text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). - images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + images (`ImageInput`, *optional*): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. - return_tensors (`str` or [`~utils.TensorType`], *optional*): - If set, will return tensors of a particular framework. Acceptable values are: - - - `'tf'`: Return TensorFlow `tf.constant` objects. - - `'pt'`: Return PyTorch `torch.Tensor` objects. - - `'np'`: Return NumPy `np.ndarray` objects. - - `'jax'`: Return JAX `jnp.ndarray` objects. - Returns: - [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when @@ -99,19 +112,25 @@ def __call__(self, text=None, images=None, return_tensors=None, **kwargs): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") + output_kwargs = self._merge_kwargs( + VisionTextDualEncoderProcessorKwargs, + tokenizer_init_kwargs=self.tokenizer.init_kwargs, + **kwargs, + ) + if text is not None: - encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if images is not None: - image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) + image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) + return_tensors = output_kwargs["common_kwargs"].get("return_tensors") if text is not None and images is not None: - encoding["pixel_values"] = image_features.pixel_values - return encoding + return BatchFeature(data=dict(**encodings, **image_features), tensor_type=return_tensors) elif text is not None: - return encoding + return BatchFeature(data=dict(**encodings), tensor_type=return_tensors) else: - return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + return BatchFeature(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ diff --git a/src/transformers/models/vivit/image_processing_vivit.py b/src/transformers/models/vivit/image_processing_vivit.py index 5f251bbd1b95b9..fb959e9f1eddb2 100644 --- a/src/transformers/models/vivit/image_processing_vivit.py +++ b/src/transformers/models/vivit/image_processing_vivit.py @@ -51,7 +51,13 @@ def make_batched(videos) -> List[List[ImageInput]]: - if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): + if isinstance(videos, np.ndarray) and videos.ndim == 5: + return videos + + elif isinstance(videos, np.ndarray) and videos.ndim == 4: + return [videos] + + elif isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): diff --git a/tests/models/tvp/test_processor_tvp.py b/tests/models/tvp/test_processor_tvp.py index 8f5e0bd6b5d05d..40d700e0beea15 100644 --- a/tests/models/tvp/test_processor_tvp.py +++ b/tests/models/tvp/test_processor_tvp.py @@ -2,8 +2,6 @@ import tempfile import unittest -import numpy as np - from transformers import TvpProcessor from transformers.testing_utils import require_torch, require_vision @@ -20,10 +18,6 @@ def setUp(self): processor = self.processor_class.from_pretrained(self.from_pretrained_id) processor.save_pretrained(self.tmpdirname) - @require_vision - def prepare_video_inputs(self): - return [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] - @require_torch @require_vision def test_video_processor_defaults_preserved_by_kwargs(self): diff --git a/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py b/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py index aebe723bd5fd7f..d51d0be7b8d4ca 100644 --- a/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py +++ b/tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py @@ -20,21 +20,25 @@ import numpy as np -from transformers import BertTokenizerFast +from transformers import BertTokenizerFast, VisionTextDualEncoderProcessor from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available +from ...test_processing_common import ProcessorTesterMixin + if is_vision_available(): from PIL import Image - from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor + from transformers import ViTImageProcessor @require_tokenizers @require_vision -class VisionTextDualEncoderProcessorTest(unittest.TestCase): +class VisionTextDualEncoderProcessorTest(ProcessorTesterMixin, unittest.TestCase): + processor_class = VisionTextDualEncoderProcessor + def setUp(self): self.tmpdirname = tempfile.mkdtemp() @@ -54,6 +58,9 @@ def setUp(self): with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) + processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") + processor.save_pretrained(self.tmpdirname) + def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) diff --git a/tests/models/x_clip/test_processor_x_clip.py b/tests/models/x_clip/test_processor_x_clip.py index e9d0bf4b2539ee..5b34855a67252a 100644 --- a/tests/models/x_clip/test_processor_x_clip.py +++ b/tests/models/x_clip/test_processor_x_clip.py @@ -1,8 +1,6 @@ import tempfile import unittest -import numpy as np - from transformers import XCLIPProcessor from transformers.testing_utils import require_torch, require_vision @@ -19,10 +17,6 @@ def setUp(self): processor = self.processor_class.from_pretrained(self.from_pretrained_id) processor.save_pretrained(self.tmpdirname) - @require_vision - def prepare_video_inputs(self): - return [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] - @require_torch @require_vision def test_image_processor_defaults_preserved_by_image_kwargs(self): diff --git a/tests/test_processing_common.py b/tests/test_processing_common.py index ebb5e6f74f3d07..53cfcf5520c053 100644 --- a/tests/test_processing_common.py +++ b/tests/test_processing_common.py @@ -93,7 +93,7 @@ def prepare_image_inputs(self): @require_vision def prepare_video_inputs(self): - return [np.random.randint(255, size=(4, 3, 30, 400), dtype=np.uint8)] + return np.random.randint(255, size=(1, 4, 3, 30, 400), dtype=np.uint8) def test_processor_to_json_string(self): processor = self.get_processor()