Skip to content

Commit

Permalink
[REF] Delete noqas of PLR2004
Browse files Browse the repository at this point in the history
  • Loading branch information
f-dangel committed Jan 9, 2025
1 parent 83cfa3b commit 11b94a6
Show file tree
Hide file tree
Showing 10 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion curvlinops/_torch_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def __check_tensor_and_preprocess(
Raises:
ValueError: If the input tensor has an invalid shape.
"""
if X.ndim > 2 or X.shape[0] != self.shape[1]: # noqa: PLR2004
if X.ndim > 2 or X.shape[0] != self.shape[1]:
raise ValueError(
f"Input tensor must have shape ({self.shape[1]},) or "
+ f"({self.shape[1]}, K), with K arbitrary. Got {X.shape}."
Expand Down
2 changes: 1 addition & 1 deletion curvlinops/diagonal/hutchinson.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def __init__(self, A: LinearOperator):
Raises:
ValueError: If the operator is not square.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]: # noqa: PLR2004
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(f"A must be square. Got shape {A.shape}.")
self._A = A

Expand Down
2 changes: 1 addition & 1 deletion curvlinops/fisher.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def sample_grad_output(self, output: Tensor, num_samples: int, y: Tensor) -> Ten
NotImplementedError: If the prediction does not have two dimensions.
NotImplementedError: If binary classification labels are not binary.
"""
if output.ndim != 2: # noqa: PLR2004
if output.ndim != 2:
raise NotImplementedError(f"Only 2d outputs supported. Got {output.shape}")

C = output.shape[1]
Expand Down
2 changes: 1 addition & 1 deletion curvlinops/kfac.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,7 +548,7 @@ def draw_label(self, output: Tensor) -> Tensor:
ValueError: If the output is not 2d.
NotImplementedError: If the loss function is not supported.
"""
if output.ndim != 2: # noqa: PLR2004
if output.ndim != 2:
raise ValueError("Only a 2d output is supported.")

if isinstance(self._loss_func, MSELoss):
Expand Down
2 changes: 1 addition & 1 deletion curvlinops/kfac_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def loss_hessian_matrix_sqrt(
NotImplementedError: If the loss function is ``BCEWithLogitsLoss`` but the
target is not binary.
"""
if output_one_datum.ndim != 2 or output_one_datum.shape[0] != 1: # noqa: PLR2004
if output_one_datum.ndim != 2 or output_one_datum.shape[0] != 1:
raise ValueError(
f"Expected 'output_one_datum' to be 2d with shape [1, C], got "
f"{output_one_datum.shape}"
Expand Down
2 changes: 1 addition & 1 deletion curvlinops/trace/hutchinson.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(self, A: LinearOperator):
Raises:
ValueError: If the operator is not square.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]: # noqa: PLR2004
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(f"A must be square. Got shape {A.shape}.")
self._A = A

Expand Down
2 changes: 1 addition & 1 deletion docs/examples/basic_usage/example_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def setup_problem(
for m in supported_layers:
# ignore the last layer of GPT because it has 50k outputs, which
# will yield an extremely large Kronecker factor
if all(d <= 50_000 for d in m.weight.shape): # noqa: PLR2004
if all(d <= 50_000 for d in m.weight.shape):
params.extend([p for p in m.parameters() if p.requires_grad])
else:
params = [p for p in model.parameters() if p.requires_grad]
Expand Down
2 changes: 1 addition & 1 deletion docs/examples/basic_usage/example_inverses.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@
# of the matrix to be inverted:
max_eigval = eigsh(damped_GGN, k=1, which="LM", return_eigenvectors=False)[0]
# eigenvalues (scale * damped_GGN_mat) are in [0; 2)
scale = 1.0 if max_eigval < 2.0 else 1.99 / max_eigval # noqa: PLR2004
scale = 1.0 if max_eigval < 2.0 else 1.99 / max_eigval

# %%
#
Expand Down
2 changes: 1 addition & 1 deletion test/test_inverse.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def test_Neumann_inverse_damped_GGN_matvec(inv_case, delta: float = 1e-2):

# set scale such that Neumann series converges
eval_max = eigh(damped_GGN_functorch)[0][-1]
scale = 1.0 if eval_max < 2 else 1.9 / eval_max # noqa: PLR2004
scale = 1.0 if eval_max < 2 else 1.9 / eval_max

# NOTE This may break when other cases are added because slow convergence
inv_GGN = NeumannInverseLinearOperator(GGN + damping, num_terms=7_000, scale=scale)
Expand Down
2 changes: 1 addition & 1 deletion test/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def forward(self, x: Tensor) -> Tensor:
# Example: Transformer for translation: (batch, sequence_length, c)
# (although second and third dimension would have to be transposed for
# classification)
if x.ndim > 2 and self.loss == "CE": # noqa: PLR2004
if x.ndim > 2 and self.loss == "CE":
x = rearrange(x, "batch ... c -> batch c ...")
return x

Expand Down

0 comments on commit 11b94a6

Please sign in to comment.