diff --git a/curvlinops/_torch_base.py b/curvlinops/_torch_base.py index 2266b5ad..00e71975 100644 --- a/curvlinops/_torch_base.py +++ b/curvlinops/_torch_base.py @@ -191,7 +191,7 @@ def __check_tensor_and_preprocess( Raises: ValueError: If the input tensor has an invalid shape. """ - if X.ndim > 2 or X.shape[0] != self.shape[1]: # noqa: PLR2004 + if X.ndim > 2 or X.shape[0] != self.shape[1]: raise ValueError( f"Input tensor must have shape ({self.shape[1]},) or " + f"({self.shape[1]}, K), with K arbitrary. Got {X.shape}." diff --git a/curvlinops/diagonal/hutchinson.py b/curvlinops/diagonal/hutchinson.py index 4f3e99c8..b7647a5a 100644 --- a/curvlinops/diagonal/hutchinson.py +++ b/curvlinops/diagonal/hutchinson.py @@ -62,7 +62,7 @@ def __init__(self, A: LinearOperator): Raises: ValueError: If the operator is not square. """ - if len(A.shape) != 2 or A.shape[0] != A.shape[1]: # noqa: PLR2004 + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError(f"A must be square. Got shape {A.shape}.") self._A = A diff --git a/curvlinops/fisher.py b/curvlinops/fisher.py index d5bc9d9d..d4a1e5e5 100644 --- a/curvlinops/fisher.py +++ b/curvlinops/fisher.py @@ -292,7 +292,7 @@ def sample_grad_output(self, output: Tensor, num_samples: int, y: Tensor) -> Ten NotImplementedError: If the prediction does not have two dimensions. NotImplementedError: If binary classification labels are not binary. """ - if output.ndim != 2: # noqa: PLR2004 + if output.ndim != 2: raise NotImplementedError(f"Only 2d outputs supported. Got {output.shape}") C = output.shape[1] diff --git a/curvlinops/kfac.py b/curvlinops/kfac.py index b0435e33..1f6e488e 100644 --- a/curvlinops/kfac.py +++ b/curvlinops/kfac.py @@ -548,7 +548,7 @@ def draw_label(self, output: Tensor) -> Tensor: ValueError: If the output is not 2d. NotImplementedError: If the loss function is not supported. """ - if output.ndim != 2: # noqa: PLR2004 + if output.ndim != 2: raise ValueError("Only a 2d output is supported.") if isinstance(self._loss_func, MSELoss): diff --git a/curvlinops/kfac_utils.py b/curvlinops/kfac_utils.py index 679cbf27..d7061739 100644 --- a/curvlinops/kfac_utils.py +++ b/curvlinops/kfac_utils.py @@ -100,7 +100,7 @@ def loss_hessian_matrix_sqrt( NotImplementedError: If the loss function is ``BCEWithLogitsLoss`` but the target is not binary. """ - if output_one_datum.ndim != 2 or output_one_datum.shape[0] != 1: # noqa: PLR2004 + if output_one_datum.ndim != 2 or output_one_datum.shape[0] != 1: raise ValueError( f"Expected 'output_one_datum' to be 2d with shape [1, C], got " f"{output_one_datum.shape}" diff --git a/curvlinops/trace/hutchinson.py b/curvlinops/trace/hutchinson.py index 26f5e8a8..a2d1506c 100644 --- a/curvlinops/trace/hutchinson.py +++ b/curvlinops/trace/hutchinson.py @@ -58,7 +58,7 @@ def __init__(self, A: LinearOperator): Raises: ValueError: If the operator is not square. """ - if len(A.shape) != 2 or A.shape[0] != A.shape[1]: # noqa: PLR2004 + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: raise ValueError(f"A must be square. Got shape {A.shape}.") self._A = A diff --git a/docs/examples/basic_usage/example_benchmark.py b/docs/examples/basic_usage/example_benchmark.py index 3e3ca718..925febfb 100644 --- a/docs/examples/basic_usage/example_benchmark.py +++ b/docs/examples/basic_usage/example_benchmark.py @@ -185,7 +185,7 @@ def setup_problem( for m in supported_layers: # ignore the last layer of GPT because it has 50k outputs, which # will yield an extremely large Kronecker factor - if all(d <= 50_000 for d in m.weight.shape): # noqa: PLR2004 + if all(d <= 50_000 for d in m.weight.shape): params.extend([p for p in m.parameters() if p.requires_grad]) else: params = [p for p in model.parameters() if p.requires_grad] diff --git a/docs/examples/basic_usage/example_inverses.py b/docs/examples/basic_usage/example_inverses.py index b68840c0..849babd3 100644 --- a/docs/examples/basic_usage/example_inverses.py +++ b/docs/examples/basic_usage/example_inverses.py @@ -249,7 +249,7 @@ # of the matrix to be inverted: max_eigval = eigsh(damped_GGN, k=1, which="LM", return_eigenvectors=False)[0] # eigenvalues (scale * damped_GGN_mat) are in [0; 2) -scale = 1.0 if max_eigval < 2.0 else 1.99 / max_eigval # noqa: PLR2004 +scale = 1.0 if max_eigval < 2.0 else 1.99 / max_eigval # %% # diff --git a/test/test_inverse.py b/test/test_inverse.py index 312c6e5e..eae8528b 100644 --- a/test/test_inverse.py +++ b/test/test_inverse.py @@ -137,7 +137,7 @@ def test_Neumann_inverse_damped_GGN_matvec(inv_case, delta: float = 1e-2): # set scale such that Neumann series converges eval_max = eigh(damped_GGN_functorch)[0][-1] - scale = 1.0 if eval_max < 2 else 1.9 / eval_max # noqa: PLR2004 + scale = 1.0 if eval_max < 2 else 1.9 / eval_max # NOTE This may break when other cases are added because slow convergence inv_GGN = NeumannInverseLinearOperator(GGN + damping, num_terms=7_000, scale=scale) diff --git a/test/utils.py b/test/utils.py index aa913a00..f36210db 100644 --- a/test/utils.py +++ b/test/utils.py @@ -250,7 +250,7 @@ def forward(self, x: Tensor) -> Tensor: # Example: Transformer for translation: (batch, sequence_length, c) # (although second and third dimension would have to be transposed for # classification) - if x.ndim > 2 and self.loss == "CE": # noqa: PLR2004 + if x.ndim > 2 and self.loss == "CE": x = rearrange(x, "batch ... c -> batch c ...") return x