Skip to content

Commit

Permalink
[Fix] Fix pre-commit-hook for python3.12
Browse files Browse the repository at this point in the history
  • Loading branch information
HAOCHENYE committed Nov 1, 2024
1 parent c46684c commit 51681d2
Show file tree
Hide file tree
Showing 23 changed files with 66 additions and 49 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,13 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.7
- name: Set up Python 3.10.15
uses: actions/setup-python@v2
with:
python-version: 3.7
python-version: '3.10.15'
- name: Install pre-commit hook
run: |
pip install setuptools
pip install pre-commit
pre-commit install
- name: Linting
Expand Down
15 changes: 10 additions & 5 deletions .pre-commit-config-zh-cn.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
exclude: ^tests/data/
repos:
- repo: https://gitee.com/openmmlab/mirrors-flake8
rev: 5.0.4
- repo: https://github.com/pre-commit/pre-commit
rev: v4.0.0
hooks:
- id: validate_manifest
- repo: https://github.com/PyCQA/flake8
rev: 7.1.1
hooks:
- id: flake8
- repo: https://gitee.com/openmmlab/mirrors-isort
Expand All @@ -13,7 +17,7 @@ repos:
hooks:
- id: yapf
- repo: https://gitee.com/openmmlab/mirrors-pre-commit-hooks
rev: v4.3.0
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
Expand Down Expand Up @@ -53,15 +57,16 @@ repos:
hooks:
- id: check-copyright
args: ["mmcv", "tests", "--excludes", "mmcv/ops"]
- repo: https://gitee.com/openmmlab/mirrors-mypy
rev: v0.812
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.2.0
hooks:
- id: mypy
exclude: |-
(?x)(
^test
| ^docs
)
additional_dependencies: ["types-setuptools", "types-requests"]
# - repo: local
# hooks:
# - id: clang-format
Expand Down
13 changes: 9 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
exclude: ^tests/data/
repos:
- repo: https://github.com/pre-commit/pre-commit
rev: v4.0.0
hooks:
- id: validate_manifest
- repo: https://github.com/PyCQA/flake8
rev: 5.0.4
rev: 7.1.1
hooks:
- id: flake8
- repo: https://github.com/PyCQA/isort
Expand All @@ -13,7 +17,7 @@ repos:
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.3.0
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: check-yaml
Expand All @@ -39,7 +43,7 @@ repos:
- mdformat_frontmatter
- linkify-it-py
- repo: https://github.com/myint/docformatter
rev: v1.3.1
rev: 06907d0
hooks:
- id: docformatter
args: ["--in-place", "--wrap-descriptions", "79"]
Expand All @@ -54,14 +58,15 @@ repos:
- id: check-copyright
args: ["mmcv", "tests", "--excludes", "mmcv/ops"]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.812
rev: v1.2.0
hooks:
- id: mypy
exclude: |-
(?x)(
^test
| ^docs
)
additional_dependencies: ["types-setuptools", "types-requests"]
# - repo: local
# hooks:
# - id: clang-format
Expand Down
3 changes: 2 additions & 1 deletion mmcv/cnn/bricks/drop.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ def drop_path(x: torch.Tensor,
residual blocks).
We follow the implementation
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py
# noqa: E501
"""
if drop_prob == 0. or not training:
return x
Expand Down
8 changes: 4 additions & 4 deletions mmcv/cnn/rfsearch/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def __init__(self,
self.by_epoch = by_epoch

def init_model(self, model: nn.Module):
"""init model with search ability.
"""Init model with search ability.
Args:
model (nn.Module): pytorch model
Expand Down Expand Up @@ -132,7 +132,7 @@ def step(self, model: nn.Module, work_dir: str) -> None:
)

def estimate_and_expand(self, model: nn.Module) -> None:
"""estimate and search for RFConvOp.
"""Estimate and search for RFConvOp.
Args:
model (nn.Module): pytorch model
Expand All @@ -146,7 +146,7 @@ def wrap_model(self,
model: nn.Module,
search_op: str = 'Conv2d',
prefix: str = '') -> None:
"""wrap model to support searchable conv op.
"""Wrap model to support searchable conv op.
Args:
model (nn.Module): pytorch model
Expand Down Expand Up @@ -187,7 +187,7 @@ def set_model(self,
search_op: str = 'Conv2d',
init_rates: Optional[int] = None,
prefix: str = '') -> None:
"""set model based on config.
"""Set model based on config.
Args:
model (nn.Module): pytorch model
Expand Down
4 changes: 2 additions & 2 deletions mmcv/cnn/rfsearch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@


def write_to_json(config: dict, filename: str):
"""save config to json file.
"""Save config to json file.
Args:
config (dict): Config to be saved.
Expand All @@ -16,7 +16,7 @@ def write_to_json(config: dict, filename: str):


def expand_rates(dilation: tuple, config: dict) -> list:
"""expand dilation rate according to config.
"""Expand dilation rate according to config.
Args:
dilation (int): _description_
Expand Down
6 changes: 3 additions & 3 deletions mmcv/ops/active_rotated_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ class ActiveRotatedFilterFunction(Function):
"""Encoding the orientation information and generating orientation-
sensitive features.
The details are described in the paper `Align Deep Features for Oriented
Object Detection <https://arxiv.org/abs/2008.09397>_`.
"""
The details are described in the paper
`Align Deep Features for Oriented Object Detection <https://arxiv.org/abs/2008.09397>_`.
""" # noqa: E501

@staticmethod
def forward(ctx, input: torch.Tensor,
Expand Down
2 changes: 1 addition & 1 deletion mmcv/ops/cc_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(self, in_channels: int) -> None:
self.in_channels = in_channels

def forward(self, x: torch.Tensor) -> torch.Tensor:
"""forward function of Criss-Cross Attention.
"""Forward function of Criss-Cross Attention.
Args:
x (torch.Tensor): Input feature with the shape of
Expand Down
3 changes: 2 additions & 1 deletion mmcv/ops/chamfer_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ class ChamferDistanceFunction(Function):
"""This is an implementation of the 2D Chamfer Distance.
It has been used in the paper `Oriented RepPoints for Aerial Object
Detection (CVPR 2022) <https://arxiv.org/abs/2105.11111>_`.
Detection (CVPR 2022)
<https://arxiv.org/abs/2105.11111>_`.
"""

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion mmcv/ops/correlation.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def _output_size(ctx, input1):


class Correlation(nn.Module):
r"""Correlation operator
r"""Correlation operator.
This correlation operator works for optical flow correlation computation.
Expand Down
4 changes: 3 additions & 1 deletion mmcv/ops/deform_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,9 @@ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,

@MODELS.register_module('DCN', force=True)
class DeformConv2dPack_MLU(DeformConv2d):
"""This class is the DCN implementation of the MLU device. The MLU
"""This class is the DCN implementation of the MLU device.
The MLU
backend support of the operator has been implemented in torchvision.
The mmcv registration mechanism is used for multiplexing here. The
torchvision implementation of DCN is called.
Expand Down
2 changes: 1 addition & 1 deletion mmcv/ops/iou3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def boxes_iou3d(boxes_a: Tensor, boxes_b: Tensor) -> Tensor:
Returns:
torch.Tensor: 3D IoU result with shape (M, N).
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7,\
assert boxes_a.shape[1] == boxes_b.shape[1] == 7, \
'Input boxes shape should be (N, 7)'

boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
Expand Down
6 changes: 3 additions & 3 deletions mmcv/ops/rotated_feature_align.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ class RotatedFeatureAlignFunction(Function):
correspond to the refined rotate anchors and reconstruct the feature maps
in pixel-wise manner to achieve feature alignment.
The details are described in the paper
`R3Det: Refined Single-Stage Detector with Feature Refinement for Rotating
Object <https://arxiv.org/abs/1908.05612>`_.
The details are described in the paper `R3Det: Refined Single-Stage
Detector with Feature Refinement for Rotating Object
<https://arxiv.org/abs/1908.05612>`_.
"""

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion mmcv/ops/scatter_points.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def forward(ctx: Any,
feats: torch.Tensor,
coors: torch.Tensor,
reduce_type: str = 'max') -> Tuple[torch.Tensor, torch.Tensor]:
"""convert kitti points(N, >=3) to voxels.
"""Convert kitti points(N, >=3) to voxels.
Args:
feats (torch.Tensor): [N, C]. Points features to be reduced
Expand Down
13 changes: 6 additions & 7 deletions mmcv/ops/sparse_modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,15 @@ def _mean_update(vals: Union[int, List], m_vals: Union[int, List],


class SparseModule(nn.Module):
"""place holder, All module subclass from this will take sptensor in
"""Place holder, All module subclass from this will take sptensor in
SparseSequential."""
pass


class SparseSequential(SparseModule):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor.
Alternatively, an ordered dict of modules can also be passed in.
r"""A sequential container. Modules will be added to it in the order they
are passed in the constructor. Alternatively, an ordered dict of modules
can also be passed in.
To make it easier to understand, given is a small example::
Expand Down Expand Up @@ -189,14 +188,14 @@ def fused(self):


class ToDense(SparseModule):
"""convert SparseConvTensor to NCHW dense tensor."""
"""Convert SparseConvTensor to NCHW dense tensor."""

def forward(self, x: SparseConvTensor):
return x.dense()


class RemoveGrid(SparseModule):
"""remove pre-allocated grid buffer."""
"""Remove pre-allocated grid buffer."""

def forward(self, x: SparseConvTensor):
x.grid = None
Expand Down
2 changes: 1 addition & 1 deletion mmcv/ops/sparse_structure.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

def scatter_nd(indices: torch.Tensor, updates: torch.Tensor,
shape: torch.Tensor) -> torch.Tensor:
"""pytorch edition of tensorflow scatter_nd.
"""Pytorch edition of tensorflow scatter_nd.
this function don't contain except handle code. so use this carefully when
indice repeats, don't support repeat add which is supported in tensorflow.
Expand Down
8 changes: 5 additions & 3 deletions mmcv/ops/tin_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,12 @@ class TINShift(nn.Module):
Temporal Interlace shift is a differentiable temporal-wise frame shifting
which is proposed in "Temporal Interlacing Network"
Please refer to `Temporal Interlacing Network
<https://arxiv.org/abs/2001.06499>`_ for more details.
Please refer to
`Temporal Interlacing Network <https://arxiv.org/abs/2001.06499>`_
for more details.
Code is modified from https://github.com/mit-han-lab/temporal-shift-module
Code is modified from
https://github.com/mit-han-lab/temporal-shift-module
"""

def forward(self, input, shift):
Expand Down
6 changes: 3 additions & 3 deletions mmcv/ops/upfirdn2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


def _parse_scaling(scaling):
"""parse scaling into list [x, y]"""
"""Parse scaling into list [x, y]"""
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
Expand All @@ -30,7 +30,7 @@ def _parse_scaling(scaling):


def _parse_padding(padding):
"""parse padding into list [padx0, padx1, pady0, pady1]"""
"""Parse padding into list [padx0, padx1, pady0, pady1]"""
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
Expand All @@ -43,7 +43,7 @@ def _parse_padding(padding):


def _get_filter_size(filter):
"""get width and height of filter kernel."""
"""Get width and height of filter kernel."""
if filter is None:
return 1, 1
assert isinstance(filter, torch.Tensor) and filter.ndim in [1, 2]
Expand Down
1 change: 1 addition & 0 deletions mmcv/transforms/formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ def __init__(self, keys: dict) -> None:
def transform(self, results: dict) -> dict:
"""Transform function to convert image in results to
:obj:`torch.Tensor` and transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
Expand Down
2 changes: 1 addition & 1 deletion mmcv/transforms/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1457,7 +1457,7 @@ class RandomResize(BaseTransform):
def __init__(
self,
scale: Union[Tuple[int, int], Sequence[Tuple[int, int]]],
ratio_range: Tuple[float, float] = None,
ratio_range: Optional[Tuple[float, float]] = None,
resize_type: str = 'Resize',
**resize_kwargs,
) -> None:
Expand Down
2 changes: 1 addition & 1 deletion mmcv/transforms/wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ class KeyMapper(BaseTransform):
"""

def __init__(self,
transforms: Union[Transform, List[Transform]] = None,
transforms: Union[Transform, List[Transform], None] = None,
mapping: Optional[Dict] = None,
remapping: Optional[Dict] = None,
auto_remap: Optional[bool] = None,
Expand Down
4 changes: 2 additions & 2 deletions mmcv/video/optflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,8 @@ def dequantize_flow(dx: np.ndarray,
dx, dy = (dequantize(d, -max_val, max_val, 255) for d in [dx, dy])

if denorm:
dx *= dx.shape[1]
dy *= dx.shape[0]
dx *= dx.shape[1] # type: ignore
dy *= dx.shape[0] # type: ignore
flow = np.dstack((dx, dy))
return flow

Expand Down
2 changes: 1 addition & 1 deletion mmcv/visualization/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def imshow_bboxes(img: Union[str, np.ndarray],
def imshow_det_bboxes(img: Union[str, np.ndarray],
bboxes: np.ndarray,
labels: np.ndarray,
class_names: List[str] = None,
class_names: Optional[List[str]] = None,
score_thr: float = 0,
bbox_color: ColorType = 'green',
text_color: ColorType = 'green',
Expand Down

0 comments on commit 51681d2

Please sign in to comment.