diff --git a/src/transformers/models/grounding_dino/processing_grounding_dino.py b/src/transformers/models/grounding_dino/processing_grounding_dino.py index 00c183338be056..2b576992851884 100644 --- a/src/transformers/models/grounding_dino/processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/processing_grounding_dino.py @@ -17,20 +17,12 @@ """ import pathlib -import sys from typing import Dict, List, Optional, Tuple, Union from ...image_processing_utils import BatchFeature from ...image_transforms import center_to_corners_format from ...image_utils import AnnotationFormat, ImageInput -from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin - - -if sys.version_info >= (3, 11): - from typing import Unpack -else: - from typing_extensions import Unpack - +from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput from ...utils import TensorType, is_torch_available diff --git a/src/transformers/models/llava/processing_llava.py b/src/transformers/models/llava/processing_llava.py index 28a9410e6cbf0b..8a9597892c6021 100644 --- a/src/transformers/models/llava/processing_llava.py +++ b/src/transformers/models/llava/processing_llava.py @@ -16,21 +16,15 @@ Processor class for Llava. """ -import sys from typing import List, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, get_image_size, to_numpy_array -from ...processing_utils import ProcessingKwargs, ProcessorMixin, _validate_images_text_input_order +from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, _validate_images_text_input_order from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import logging -if sys.version_info >= (3, 11): - from typing import Unpack -else: - from typing_extensions import Unpack - logger = logging.get_logger(__name__) diff --git a/src/transformers/models/llava_onevision/processing_llava_onevision.py b/src/transformers/models/llava_onevision/processing_llava_onevision.py index d4ae02e0bb154c..cfc87f21fa8be3 100644 --- a/src/transformers/models/llava_onevision/processing_llava_onevision.py +++ b/src/transformers/models/llava_onevision/processing_llava_onevision.py @@ -18,22 +18,12 @@ import math import os -import sys from typing import Iterable, List, Union - -if sys.version_info >= (3, 11): - from typing import Unpack -else: - from typing_extensions import Unpack - from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import select_best_resolution from ...image_utils import ImageInput, VideoInput, get_image_size, to_numpy_array -from ...processing_utils import ( - ProcessingKwargs, - ProcessorMixin, -) +from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import logging from ..auto import AutoImageProcessor diff --git a/src/transformers/models/pixtral/processing_pixtral.py b/src/transformers/models/pixtral/processing_pixtral.py index 1b07aa02771dc9..d336760c6d9c66 100644 --- a/src/transformers/models/pixtral/processing_pixtral.py +++ b/src/transformers/models/pixtral/processing_pixtral.py @@ -16,21 +16,15 @@ Processor class for Pixtral. """ -import sys from typing import List, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, is_valid_image, load_image -from ...processing_utils import ProcessingKwargs, ProcessorMixin, _validate_images_text_input_order +from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, _validate_images_text_input_order from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import is_torch_device, is_torch_dtype, is_torch_tensor, logging, requires_backends -if sys.version_info >= (3, 11): - from typing import Unpack -else: - from typing_extensions import Unpack - logger = logging.get_logger(__name__) diff --git a/src/transformers/models/qwen2_vl/processing_qwen2_vl.py b/src/transformers/models/qwen2_vl/processing_qwen2_vl.py index 591b82f053c8f3..48516e6aa31d7d 100644 --- a/src/transformers/models/qwen2_vl/processing_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/processing_qwen2_vl.py @@ -23,18 +23,9 @@ from typing import List, Union - -try: - from typing import Unpack -except ImportError: - from typing_extensions import Unpack - from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, VideoInput -from ...processing_utils import ( - ProcessingKwargs, - ProcessorMixin, -) +from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import logging diff --git a/tests/test_processing_common.py b/tests/test_processing_common.py index fe17de4eeb5cdf..403d89bad2978b 100644 --- a/tests/test_processing_common.py +++ b/tests/test_processing_common.py @@ -18,15 +18,10 @@ import json import tempfile - -try: - from typing import Unpack -except ImportError: - from typing_extensions import Unpack - import numpy as np from transformers.models.auto.processing_auto import processor_class_from_name +from transformers.processing_utils import Unpack from transformers.testing_utils import ( check_json_file_has_correct_format, require_torch,