Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test consistency with numpy #78

Open
ev-br opened this issue Mar 4, 2023 · 1 comment
Open

Test consistency with numpy #78

ev-br opened this issue Mar 4, 2023 · 1 comment
Labels
enhancement New feature or request

Comments

@ev-br
Copy link
Collaborator

ev-br commented Mar 4, 2023

#77 puts a nice scaffolding for being able to toggle the context for individual tests between the original numpy and torch_np. Let's think how to best organize this sort of testing.

@honno
Copy link
Contributor

honno commented Mar 4, 2023

Oops forgot to share the sample diff on doing this for test_indexing.py

diff of how you'd modify `test_indexing.py` to utilise the introduced `np` paramter
diff --git a/torch_np/tests/numpy_tests/core/test_indexing.py b/torch_np/tests/numpy_tests/core/test_indexing.py
index ba6ef58..4659813 100644
--- a/torch_np/tests/numpy_tests/core/test_indexing.py
+++ b/torch_np/tests/numpy_tests/core/test_indexing.py
@@ -7,22 +7,22 @@ import re
 import pytest
 from pytest import raises as assert_raises
 
+import numpy
+import torch_np
 import torch_np as np
 # from numpy.core._multiarray_tests import array_indexing  # numpy implements this in C
 from itertools import product
 from torch_np.testing import (
-    assert_, assert_equal, assert_raises_regex,
+    assert_, assert_raises_regex,
     assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM
     )
 
 
-xfail_neg_step = pytest.mark.xfail(
-    reason="torch does not support indexing with negative slice steps"
-)
+neg_step_reason = "torch does not support indexing with negative slice steps"
 
 
 class TestIndexing:
-    def test_index_no_floats(self):
+    def test_index_no_floats(self, np):
         a = np.array([[[5]]])
 
         assert_raises(IndexError, lambda: a[0.0])
@@ -58,10 +58,15 @@ class TestIndexing:
         #     TypeError: slice indices must be integers or None or have an
         #     __index__ method
         #
-        assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0])
-        assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0,:])
-
-    def test_slicing_no_floats(self):
+        if np is numpy:
+            error = IndexError
+        else:
+            assert np is torch_np  # sanity check
+            error = TypeError
+        assert_raises(error, lambda: a[0.0:, 0.0])
+        assert_raises(error, lambda: a[0.0:, 0.0,:])
+
+    def test_slicing_no_floats(self, np):
         a = np.array([[5]])
 
         # start as float.
@@ -92,8 +97,9 @@ class TestIndexing:
         # should still get the DeprecationWarning if step = 0.
         assert_raises(TypeError, lambda: a[::0.0])
 
-    @pytest.mark.skip(reason="torch allows slicing with non-0d array components")
-    def test_index_no_array_to_index(self):
+    def test_index_no_array_to_index(self, np):
+        if np is torch_np:
+            pytest.skip("torch allows slicing with non-0d array components")
         # No non-scalar arrays.
         a = np.array([[[1]]])
 
@@ -105,107 +111,114 @@ class TestIndexing:
         #     array([], shape=(0, 1, 1), dtype=int64)
         #
 
-    def test_none_index(self):
+    def test_none_index(self, np):
         # `None` index adds newaxis
         a = np.array([1, 2, 3])
-        assert_equal(a[None], a[np.newaxis])
-        assert_equal(a[None].ndim, a.ndim + 1)
+        np.testing.assert_equal(a[None], a[np.newaxis])
+        np.testing.assert_equal(a[None].ndim, a.ndim + 1)
 
-    def test_empty_tuple_index(self):
+    def test_empty_tuple_index(self, np):
         # Empty tuple index creates a view
         a = np.array([1, 2, 3])
-        assert_equal(a[()], a)
+        np.testing.assert_equal(a[()], a)
         assert_(a[()].base is a)
         a = np.array(0)
-        pytest.skip(
-            "torch doesn't have scalar types with distinct instancing behaviours"
-        )
+        if np is torch_np:
+            pytest.skip(
+                "torch doesn't have scalar types with distinct instancing behaviours"
+            )
         assert_(isinstance(a[()], np.int_))
 
-    def test_same_kind_index_casting(self):
+    def test_same_kind_index_casting(self, np):
         # Indexes should be cast with same-kind and not safe, even if that
         # is somewhat unsafe. So test various different code paths.
         index = np.arange(5)
         u_index = index.astype(np.uint8)  # i.e. cast to default uint indexing dtype
         arr = np.arange(10)
 
-        assert_array_equal(arr[index], arr[u_index])
+        np.testing.assert_array_equal(arr[index], arr[u_index])
         arr[u_index] = np.arange(5)
-        assert_array_equal(arr, np.arange(10))
+        np.testing.assert_array_equal(arr, np.arange(10))
 
         arr = np.arange(10).reshape(5, 2)
-        assert_array_equal(arr[index], arr[u_index])
+        np.testing.assert_array_equal(arr[index], arr[u_index])
 
         arr[u_index] = np.arange(5)[:,None]
-        pytest.xfail("XXX: repeat() not implemented")
-        assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
+        if np is torch_np:
+            pytest.xfail("XXX: repeat() not implemented")
+        np.testing.assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
 
         arr = np.arange(25).reshape(5, 5)
-        assert_array_equal(arr[u_index, u_index], arr[index, index])
+        np.testing.assert_array_equal(arr[u_index, u_index], arr[index, index])
 
-    def test_empty_fancy_index(self):
+    def test_empty_fancy_index(self, np):
         # Empty list index creates an empty array
         # with the same dtype (but with weird shape)
         a = np.array([1, 2, 3])
-        assert_equal(a[[]], [])
-        assert_equal(a[[]].dtype, a.dtype)
+        np.testing.assert_equal(a[[]], [])
+        np.testing.assert_equal(a[[]].dtype, a.dtype)
 
         b = np.array([], dtype=np.intp)
-        assert_equal(a[[]], [])
-        assert_equal(a[[]].dtype, a.dtype)
+        np.testing.assert_equal(a[[]], [])
+        np.testing.assert_equal(a[[]].dtype, a.dtype)
 
         b = np.array([])
         assert_raises(IndexError, a.__getitem__, b)
 
-    def test_ellipsis_index(self):
+    def test_ellipsis_index(self, np):
         a = np.array([[1, 2, 3],
                       [4, 5, 6],
                       [7, 8, 9]])
         assert_(a[...] is not a)
-        assert_equal(a[...], a)
+        np.testing.assert_equal(a[...], a)
         # `a[...]` was `a` in numpy <1.9.
         assert_(a[...].base is a)
 
         # Slicing with ellipsis can skip an
         # arbitrary number of dimensions
-        assert_equal(a[0, ...], a[0])
-        assert_equal(a[0, ...], a[0,:])
-        assert_equal(a[..., 0], a[:, 0])
+        np.testing.assert_equal(a[0, ...], a[0])
+        np.testing.assert_equal(a[0, ...], a[0,:])
+        np.testing.assert_equal(a[..., 0], a[:, 0])
 
         # Slicing with ellipsis always results
         # in an array, not a scalar
-        assert_equal(a[0, ..., 1], np.array(2))
+        np.testing.assert_equal(a[0, ..., 1], np.array(2))
 
         # Assignment with `(Ellipsis,)` on 0-d arrays
         b = np.array(1)
         b[(Ellipsis,)] = 2
-        assert_equal(b, 2)
+        np.testing.assert_equal(b, 2)
 
-    def test_single_int_index(self):
+    def test_single_int_index(self, np):
         # Single integer index selects one row
         a = np.array([[1, 2, 3],
                       [4, 5, 6],
                       [7, 8, 9]])
 
-        assert_equal(a[0], [1, 2, 3])
-        assert_equal(a[-1], [7, 8, 9])
+        np.testing.assert_equal(a[0], [1, 2, 3])
+        np.testing.assert_equal(a[-1], [7, 8, 9])
 
         # Index out of bounds produces IndexError
         assert_raises(IndexError, a.__getitem__, 1 << 30)
         # Index overflow produces IndexError
         # Note torch raises RuntimeError here
-        assert_raises((IndexError, RuntimeError), a.__getitem__, 1 << 64)
-
-    def test_single_bool_index(self):
+        if np is numpy:
+            error = IndexError
+        else:
+            assert np is torch_np  # sanity check
+            error = RuntimeError
+        assert_raises(error, a.__getitem__, 1 << 64)
+
+    def test_single_bool_index(self, np):
         # Single boolean index
         a = np.array([[1, 2, 3],
                       [4, 5, 6],
                       [7, 8, 9]])
 
-        assert_equal(a[np.array(True)], a[None])
-        assert_equal(a[np.array(False)], a[None][0:0])
+        np.testing.assert_equal(a[np.array(True)], a[None])
+        np.testing.assert_equal(a[np.array(False)], a[None][0:0])
 
-    def test_boolean_shape_mismatch(self):
+    def test_boolean_shape_mismatch(self, np):
         arr = np.ones((5, 4, 3))
 
         index = np.array([True])
@@ -219,17 +232,17 @@ class TestIndexing:
 
         assert_raises(IndexError, arr.__getitem__, (slice(None), index))
 
-    def test_boolean_indexing_onedim(self):
+    def test_boolean_indexing_onedim(self, np):
         # Indexing a 2-dimensional array with
         # boolean array of length one
         a = np.array([[ 0.,  0.,  0.]])
         b = np.array([ True], dtype=bool)
-        assert_equal(a[b], a)
+        np.testing.assert_equal(a[b], a)
         # boolean assignment
         a[b] = 1.
-        assert_equal(a, [[1., 1., 1.]])
+        np.testing.assert_equal(a, [[1., 1., 1.]])
 
-    def test_boolean_assignment_value_mismatch(self):
+    def test_boolean_assignment_value_mismatch(self, np):
         # A boolean assignment should fail when the shape of the values
         # cannot be broadcast to the subscription. (see also gh-3458)
         a = np.arange(4)
@@ -237,11 +250,16 @@ class TestIndexing:
         def f(a, v):
             a[a > -1] = v
 
-        assert_raises((ValueError, TypeError), f, a, [])
-        assert_raises((ValueError, TypeError), f, a, [1, 2, 3])
-        assert_raises((ValueError, TypeError), f, a[:1], [1, 2, 3])
+        if np is numpy:
+            error = ValueError
+        else:
+            assert np is torch_np  # sanity check
+            error = TypeError
+        assert_raises(error, f, a, [])
+        assert_raises(error, f, a, [1, 2, 3])
+        assert_raises(error, f, a[:1], [1, 2, 3])
 
-    def test_boolean_indexing_twodim(self):
+    def test_boolean_indexing_twodim(self, np):
         # Indexing a 2-dimensional array with
         # 2-dimensional boolean array
         a = np.array([[1, 2, 3],
@@ -250,27 +268,28 @@ class TestIndexing:
         b = np.array([[ True, False,  True],
                       [False,  True, False],
                       [ True, False,  True]])
-        assert_equal(a[b], [1, 3, 5, 7, 9])
-        assert_equal(a[b[1]], [[4, 5, 6]])
-        assert_equal(a[b[0]], a[b[2]])
+        np.testing.assert_equal(a[b], [1, 3, 5, 7, 9])
+        np.testing.assert_equal(a[b[1]], [[4, 5, 6]])
+        np.testing.assert_equal(a[b[0]], a[b[2]])
 
         # boolean assignment
         a[b] = 0
-        assert_equal(a, [[0, 2, 0],
+        np.testing.assert_equal(a, [[0, 2, 0],
                          [4, 0, 6],
                          [0, 8, 0]])
 
-    def test_boolean_indexing_list(self):
+    def test_boolean_indexing_list(self, np):
         # Regression test for #13715. It's a use-after-free bug which the
         # test won't directly catch, but it will show up in valgrind.
         a = np.array([1, 2, 3])
         b = [True, False, True]
         # Two variants of the test because the first takes a fast path
-        assert_equal(a[b], [1, 3])
-        assert_equal(a[None, b], [[1, 3]])
+        np.testing.assert_equal(a[b], [1, 3])
+        np.testing.assert_equal(a[None, b], [[1, 3]])
 
-    @xfail_neg_step
-    def test_reverse_strides_and_subspace_bufferinit(self):
+    def test_reverse_strides_and_subspace_bufferinit(self, np):
+        if np is torch_np:
+            pytest.xfail(neg_step_reason)
         # This tests that the strides are not reversed for simple and
         # subspace fancy indexing.
         a = np.ones(5)
@@ -279,25 +298,26 @@ class TestIndexing:
 
         a[b] = c
         # If the strides are not reversed, the 0 in the arange comes last.
-        assert_equal(a[0], 0)
+        np.testing.assert_equal(a[0], 0)
 
         # This also tests that the subspace buffer is initialized:
         a = np.ones((5, 2))
         c = np.arange(10).reshape(5, 2)[::-1]
         a[b, :] = c
-        assert_equal(a[0], [0, 1])
+        np.testing.assert_equal(a[0], [0, 1])
 
-    @xfail_neg_step
-    def test_reversed_strides_result_allocation(self):
+    def test_reversed_strides_result_allocation(self, np):
+        if np is torch_np:
+            pytest.xfail(neg_step_reason)
         # Test a bug when calculating the output strides for a result array
         # when the subspace size was 1 (and test other cases as well)
         a = np.arange(10)[:, None]
         i = np.arange(10)[::-1]
-        assert_array_equal(a[i], a[i.copy('C')])
+        np.testing.assert_array_equal(a[i], a[i.copy('C')])
 
         a = np.arange(20).reshape(-1, 2)
 
-    def test_uncontiguous_subspace_assignment(self):
+    def test_uncontiguous_subspace_assignment(self, np):
         # During development there was a bug activating a skip logic
         # based on ndim instead of size.
         a = np.full((3, 4, 2), -1)
@@ -306,28 +326,29 @@ class TestIndexing:
         a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
         b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
 
-        assert_equal(a, b)
+        np.testing.assert_equal(a, b)
 
-    @pytest.mark.skip(reason="torch does not limit dims to 32")
-    def test_too_many_fancy_indices_special_case(self):
+    def test_too_many_fancy_indices_special_case(self, np):
+        if np is torch_np:
+            pytest.skip("torch does not limit dims to 32")
         # Just documents behaviour, this is a small limitation.
         a = np.ones((1,) * 32)  # 32 is NPY_MAXDIMS
         assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
 
-    def test_scalar_array_bool(self):
+    def test_scalar_array_bool(self, np):
         # NumPy bools can be used as boolean index (python ones as of yet not)
         a = np.array(1)
-        assert_equal(a[np.bool_(True)], a[np.array(True)])
-        assert_equal(a[np.bool_(False)], a[np.array(False)])
+        np.testing.assert_equal(a[np.bool_(True)], a[np.array(True)])
+        np.testing.assert_equal(a[np.bool_(False)], a[np.array(False)])
 
         # After deprecating bools as integers:
         #a = np.array([0,1,2])
-        #assert_equal(a[True, :], a[None, :])
-        #assert_equal(a[:, True], a[:, None])
+        #np.testing.assert_equal(a[True, :], a[None, :])
+        #np.testing.assert_equal(a[:, True], a[:, None])
         #
         #assert_(not np.may_share_memory(a, a[True, :]))
 
-    def test_everything_returns_views(self):
+    def test_everything_returns_views(self, np):
         # Before `...` would return a itself.
         a = np.arange(5)
 
@@ -335,12 +356,12 @@ class TestIndexing:
         assert_(a is not a[...])
         assert_(a is not a[:])
 
-    def test_broaderrors_indexing(self):
+    def test_broaderrors_indexing(self, np):
         a = np.zeros((5, 5))
         assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
         assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
 
-    def test_trivial_fancy_out_of_bounds(self):
+    def test_trivial_fancy_out_of_bounds(self, np):
         a = np.zeros(5)
         ind = np.ones(20, dtype=np.intp)
         ind[-1] = 10
@@ -351,12 +372,12 @@ class TestIndexing:
         assert_raises(IndexError, a.__getitem__, ind)
         assert_raises((IndexError, RuntimeError), a.__setitem__, ind, 0)
 
-    def test_trivial_fancy_not_possible(self):
+    def test_trivial_fancy_not_possible(self, np):
         # Test that the fast path for trivial assignment is not incorrectly
         # used when the index is not contiguous or 1D, see also gh-11467.
         a = np.arange(6)
         idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
-        assert_array_equal(a[idx], idx)
+        np.testing.assert_array_equal(a[idx], idx)
 
         # this case must not go into the fast path, note that idx is
         # a non-contiuguous none 1D array here.
@@ -364,10 +385,11 @@ class TestIndexing:
         res = np.arange(6)
         res[0] = -1
         res[3] = -1
-        assert_array_equal(a, res)
+        np.testing.assert_array_equal(a, res)
 
-    @pytest.mark.xfail(reason="XXX: recarray stuff is TBD")
-    def test_subclass_writeable(self):
+    def test_subclass_writeable(self, np):
+        if np is torch_np:
+            pytest.xfail("XXX: recarray stuff is TBD")
         d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
                          dtype=[('target', 'S20'), ('V_mag', '>f4')])
         ind = np.array([False,  True,  True], dtype=bool)
@@ -377,7 +399,7 @@ class TestIndexing:
         assert_(d[...].flags.writeable)
         assert_(d[0].flags.writeable)
 
-    def test_memory_order(self):
+    def test_memory_order(self, np):
         # This is not necessary to preserve. Memory layouts for
         # more complex indices are not as simple.
         a = np.arange(10)
@@ -388,7 +410,7 @@ class TestIndexing:
         a = a.reshape(-1, 1)
         assert_(a[b, 0].flags.f_contiguous)
 
-    def test_small_regressions(self):
+    def test_small_regressions(self, np):
         # Reference count of intp for index checks
         a = np.array([0])
         if HAS_REFCOUNT:
@@ -402,9 +424,9 @@ class TestIndexing:
                       np.array([1], dtype=np.uint8), 1)
 
         if HAS_REFCOUNT:
-            assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+            np.testing.assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
 
-    def test_tuple_subclass(self):
+    def test_tuple_subclass(self, np):
         arr = np.ones((5, 5))
 
         # A tuple subclass should also be an nd-index
@@ -416,32 +438,33 @@ class TestIndexing:
         # Unlike the non nd-index:
         assert_(arr[index,].shape != (1,))
 
-    @pytest.mark.xfail(reason="XXX: low-prio behaviour to support")
-    def test_broken_sequence_not_nd_index(self):
+    def test_broken_sequence_not_nd_index(self, np):
+        if np is torch_np:
+            pytest.xfail("XXX: low-prio behaviour to support")
         # See https://github.com/numpy/numpy/issues/5063
         # If we have an object which claims to be a sequence, but fails
         # on item getting, this should not be converted to an nd-index (tuple)
         # If this object happens to be a valid index otherwise, it should work
         # This object here is very dubious and probably bad though:
         class SequenceLike:
-            def __index__(self):
+            def __index__(self, np):
                 return 0
 
-            def __len__(self):
+            def __len__(self, np):
                 return 1
 
             def __getitem__(self, item):
                 raise IndexError('Not possible')
 
         arr = np.arange(10)
-        assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+        np.testing.assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
 
         # also test that field indexing does not segfault
         # for a similar reason, by indexing a structured array
         arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
-        assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
+        np.testing.assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
 
-    def test_indexing_array_weird_strides(self):
+    def test_indexing_array_weird_strides(self, np):
         # See also gh-6221
         # the shapes used here come from the issue and create the correct
         # size for the iterator buffering size.
@@ -451,13 +474,14 @@ class TestIndexing:
         ind = np.broadcast_to(ind, (10, 55, 4, 4))
 
         # single advanced index case
-        assert_array_equal(x[ind], x[ind.copy()])
+        np.testing.assert_array_equal(x[ind], x[ind.copy()])
         # higher dimensional advanced index
         zind = np.zeros(4, dtype=np.intp)
-        assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
+        np.testing.assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
 
-    @xfail_neg_step
-    def test_indexing_array_negative_strides(self):
+    def test_indexing_array_negative_strides(self, np):
+        if np is torch_np:
+            pytest.xfail(neg_step_reason)
         # From gh-8264,
         # core dumps if negative strides are used in iteration
         arro = np.zeros((4, 4))
@@ -465,7 +489,7 @@ class TestIndexing:
 
         slices = (slice(None), [0, 1, 2, 3])
         arr[slices] = 10
-        assert_array_equal(arr, 10.)
+        np.testing.assert_array_equal(arr, 10.)
 
     @pytest.mark.parametrize("index",
             [True, False, np.array([0])])
@@ -475,7 +499,7 @@ class TestIndexing:
         # These are limitations based on the number of arguments we can process.
         # For `num=32` (and all boolean cases), the result is actually define;
         # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
-        if not (isinstance(index, np.ndarray) and original_ndim < num):
+        if np is torch_np and not (isinstance(index, np.ndarray) and original_ndim < num):
             # unskipped cases fail because of assigning too many indices
             pytest.skip("torch does not limit dims to 32")
         arr = np.ones((1,) * original_ndim)
@@ -485,16 +509,17 @@ class TestIndexing:
             arr[(index,) * num] = 1.
 
 
-    def test_nontuple_ndindex(self):
+    def test_nontuple_ndindex(self, np):
         a = np.arange(25).reshape((5, 5))
-        assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
-        assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
-        pytest.skip(
-            "torch happily consumes non-tuple sequences with multi-axis "
-            "indices (i.e. slices) as an index, whereas NumPy invalidates "
-            "them, assumedly to keep things simple. This invalidation "
-            "behaviour is just too niche to bother emulating."
-        )
+        np.testing.assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
+        np.testing.assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
+        if np is torch_np:
+            pytest.skip(
+                "torch happily consumes non-tuple sequences with multi-axis "
+                "indices (i.e. slices) as an index, whereas NumPy invalidates "
+                "them, assumedly to keep things simple. This invalidation "
+                "behaviour is just too niche to bother emulating."
+            )
         assert_raises(IndexError, a.__getitem__, [slice(None)])
 
 
@@ -503,7 +528,7 @@ class TestBroadcastedAssignments:
         a[ind] = val
         return a
 
-    def test_prepending_ones(self):
+    def test_prepending_ones(self, np):
         a = np.zeros((3, 2))
 
         a[...] = np.ones((1, 3, 2))
@@ -513,7 +538,7 @@ class TestBroadcastedAssignments:
         # Fancy without subspace (with broadcasting)
         a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
 
-    def test_prepend_not_one(self):
+    def test_prepend_not_one(self, np):
         assign = self.assign
         s_ = np.s_
         a = np.zeros(5)
@@ -523,7 +548,7 @@ class TestBroadcastedAssignments:
         assert_raises((ValueError, RuntimeError), assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
         assert_raises((ValueError, RuntimeError), assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
 
-    def test_simple_broadcasting_errors(self):
+    def test_simple_broadcasting_errors(self, np):
         assign = self.assign
         s_ = np.s_
         a = np.zeros((5, 1))
@@ -552,16 +577,18 @@ class TestBroadcastedAssignments:
         )
         assert re.search(fr"[\(\[]{r_inner_shape}[\]\)]$", str(e.value))
 
-    @pytest.mark.xfail(reason="XXX: deal with awkward put-like set operations")
-    def test_index_is_larger(self):
+    def test_index_is_larger(self, np):
+        if np is torch_np:
+            pytest.xfail("XXX: deal with awkward put-like set operations")
         # Simple case of fancy index broadcasting of the index.
         a = np.zeros((5, 5))
         a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
 
         assert_((a[:3, :3] == [2, 3, 4]).all())
 
-    @xfail_neg_step
-    def test_broadcast_subspace(self):
+    def test_broadcast_subspace(self, np):
+        if np is torch_np:
+            pytest.xfail(neg_step_reason)
         a = np.zeros((100, 100))
         v = np.arange(100)[:,None]
         b = np.arange(100)[::-1]
@@ -570,10 +597,11 @@ class TestBroadcastedAssignments:
 
 
 class TestFancyIndexingCast:
-    @pytest.mark.xfail(
-        reason="XXX: low-prio to support assigning complex values on floating arrays"
-    )
-    def test_boolean_index_cast_assign(self):
+    def test_boolean_index_cast_assign(self, np):
+        if np is torch_np:
+            pytest.xfail(
+                "XXX: low-prio to support assigning complex values on floating arrays"
+            )
         # Setup the boolean index and float arrays.
         shape = (8, 63)
         bool_index = np.zeros(shape).astype(bool)
@@ -582,17 +610,17 @@ class TestFancyIndexingCast:
 
         # Assigning float is fine.
         zero_array[bool_index] = np.array([1])
-        assert_equal(zero_array[0, 1], 1)
+        np.testing.assert_equal(zero_array[0, 1], 1)
 
         # Fancy indexing works, although we get a cast warning.
         assert_warns(np.ComplexWarning,
                      zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
-        assert_equal(zero_array[0, 1], 2)  # No complex part
+        np.testing.assert_equal(zero_array[0, 1], 2)  # No complex part
 
         # Cast complex to float, throwing away the imaginary portion.
         assert_warns(np.ComplexWarning,
                      zero_array.__setitem__, bool_index, np.array([1j]))
-        assert_equal(zero_array[0, 1], 0)
+        np.testing.assert_equal(zero_array[0, 1], 0)
 
 @pytest.mark.xfail(reason="XXX: requires broadcast() and broadcast_to()")
 class TestMultiIndexingAutomated:
@@ -617,7 +645,7 @@ class TestMultiIndexingAutomated:
 
     """
 
-    def setup_method(self):
+    def setup_method(self, np):
         self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
         self.b = np.empty((3, 0, 5, 6))
         self.complex_indices = ['skip', Ellipsis,
@@ -904,7 +932,7 @@ class TestMultiIndexingAutomated:
             assert_raises(type(e), arr.__getitem__, index)
             assert_raises(type(e), arr.__setitem__, index, 0)
             if HAS_REFCOUNT:
-                assert_equal(prev_refcount, sys.getrefcount(arr))
+                np.testing.assert_equal(prev_refcount, sys.getrefcount(arr))
             return
 
         self._compare_index_result(arr, index, mimic_get, no_copy)
@@ -928,7 +956,7 @@ class TestMultiIndexingAutomated:
             assert_raises(type(e), arr.__getitem__, index)
             assert_raises(type(e), arr.__setitem__, index, 0)
             if HAS_REFCOUNT:
-                assert_equal(prev_refcount, sys.getrefcount(arr))
+                np.testing.assert_equal(prev_refcount, sys.getrefcount(arr))
             return
 
         self._compare_index_result(arr, index, mimic_get, no_copy)
@@ -939,7 +967,7 @@ class TestMultiIndexingAutomated:
         pytest.skip("torch does not support subclassing")
         arr = arr.copy()
         indexed_arr = arr[index]
-        assert_array_equal(indexed_arr, mimic_get)
+        np.testing.assert_array_equal(indexed_arr, mimic_get)
         # Check if we got a view, unless its a 0-sized or 0-d array.
         # (then its not a view, and that does not matter)
         if indexed_arr.size != 0 and indexed_arr.ndim != 0:
@@ -948,9 +976,9 @@ class TestMultiIndexingAutomated:
             if HAS_REFCOUNT:
                 if no_copy:
                     # refcount increases by one:
-                    assert_equal(sys.getrefcount(arr), 3)
+                    np.testing.assert_equal(sys.getrefcount(arr), 3)
                 else:
-                    assert_equal(sys.getrefcount(arr), 2)
+                    np.testing.assert_equal(sys.getrefcount(arr), 2)
 
         # Test non-broadcast setitem:
         b = arr.copy()
@@ -960,17 +988,17 @@ class TestMultiIndexingAutomated:
         if no_copy and indexed_arr.ndim != 0:
             # change indexed_arr in-place to manipulate original:
             indexed_arr += 1000
-            assert_array_equal(arr, b)
+            np.testing.assert_array_equal(arr, b)
             return
         # Use the fact that the array is originally an arange:
         arr.flat[indexed_arr.ravel()] += 1000
-        assert_array_equal(arr, b)
+        np.testing.assert_array_equal(arr, b)
 
-    def test_boolean(self):
+    def test_boolean(self, np):
         a = np.array(5)
-        assert_equal(a[np.array(True)], 5)
+        np.testing.assert_equal(a[np.array(True)], 5)
         a[np.array(True)] = 1
-        assert_equal(a, 1)
+        np.testing.assert_equal(a, 1)
         # NOTE: This is different from normal broadcasting, as
         # arr[boolean_array] works like in a multi index. Which means
         # it is aligned to the left. This is probably correct for
@@ -983,7 +1011,7 @@ class TestMultiIndexingAutomated:
         self._check_multi_index(
             self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
 
-    def test_multidim(self):
+    def test_multidim(self, np):
         # Automatically test combinations with complex indexes on 2nd (or 1st)
         # spot and the simple ones in one other spot.
         with warnings.catch_warnings():
@@ -1013,7 +1041,7 @@ class TestMultiIndexingAutomated:
         assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
         assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
 
-    def test_1d(self):
+    def test_1d(self, np):
         a = np.arange(10)
         for index in self.complex_indices:
             self._check_single_index(a, index)
@@ -1025,7 +1053,7 @@ class TestFloatNonIntegerArgument:
     and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
 
     """
-    def test_valid_indexing(self):
+    def test_valid_indexing(self, np):
         # These should raise no errors.
         a = np.array([[[5]]])
 
@@ -1035,7 +1063,7 @@ class TestFloatNonIntegerArgument:
         a[:, 0,:]
         a[:,:,:]
 
-    def test_valid_slicing(self):
+    def test_valid_slicing(self, np):
         # These should raise no errors.
         a = np.array([[[5]]])
 
@@ -1048,21 +1076,19 @@ class TestFloatNonIntegerArgument:
         a[:2:2]
         a[1:2:2]
 
-    def test_non_integer_argument_errors(self):
+    def test_non_integer_argument_errors(self, np):
         a = np.array([[5]])
 
         assert_raises(TypeError, np.reshape, a, (1., 1., -1))
         assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
-        pytest.xfail("XXX: take not implemented")
+        if np is torch_np:
+            pytest.xfail("XXX: take not implemented")
         assert_raises(TypeError, np.take, a, [0], 1.)
         assert_raises(TypeError, np.take, a, [0], np.float64(1.))
 
-    @pytest.mark.skip(
-        reason=(
-            "torch doesn't have scalar types with distinct element-wise behaviours"
-        )
-    )
-    def test_non_integer_sequence_multiplication(self):
+    def test_non_integer_sequence_multiplication(self, np):
+        if np is torch_np:
+            pytest.skip("torch doesn't have scalar types with distinct element-wise behaviours")
         # NumPy scalar sequence multiply should not work with non-integers
         def mult(a, b):
             return a * b
@@ -1071,7 +1097,7 @@ class TestFloatNonIntegerArgument:
         # following should be OK
         mult([1], np.int_(3))
 
-    def test_reduce_axis_float_index(self):
+    def test_reduce_axis_float_index(self, np):
         d = np.zeros((3,3,3))
         assert_raises(TypeError, np.min, d, 0.5)
         assert_raises(TypeError, np.min, d, (0.5, 1))
@@ -1081,7 +1107,7 @@ class TestFloatNonIntegerArgument:
 
 class TestBooleanIndexing:
     # Using a boolean as integer argument/indexing is an error.
-    def test_bool_as_int_argument_errors(self):
+    def test_bool_as_int_argument_errors(self, np):
         a = np.array([[[1]]])
 
         assert_raises(TypeError, np.reshape, a, (True, -1))
@@ -1089,21 +1115,23 @@ class TestBooleanIndexing:
         # array is thus also deprecated, but not with the same message:
         assert_warns(DeprecationWarning, operator.index, np.True_)
 
-        pytest.xfail("XXX: take not implemented")
+        if np is torch_np:
+            pytest.xfail("XXX: take not implemented")
         assert_raises(TypeError, np.take, args=(a, [0], False))
 
-        pytest.skip("torch consumes boolean tensors as ints, no bother raising here")
+        if np is torch_np:
+            pytest.skip("torch consumes boolean tensors as ints, no bother raising here")
         assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
         assert_raises(TypeError, operator.index, np.array(True))
 
-    def test_boolean_indexing_weirdness(self):
+    def test_boolean_indexing_weirdness(self, np):
         # Weird boolean indexing things
         a = np.ones((2, 3, 4))
         assert a[False, True, ...].shape == (0, 2, 3, 4)
         assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2)
         assert_raises(IndexError, lambda: a[False, [0, 1], ...])
 
-    def test_boolean_indexing_fast_path(self):
+    def test_boolean_indexing_fast_path(self, np):
         # These used to either give the wrong error, or incorrectly give no
         # error.
         a = np.ones((3, 3))
@@ -1134,20 +1162,23 @@ class TestArrayToIndexDeprecation:
     """Creating an index from array not 0-D is an error.
 
     """
-    def test_array_to_index_error(self):
+    def test_array_to_index_error(self, np):
         # so no exception is expected. The raising is effectively tested above.
         a = np.array([[[1]]])
 
-        pytest.xfail("XXX: take not implemented")
+        if np is torch_np:
+            pytest.xfail("XXX: take not implemented")
         assert_raises(TypeError, np.take, a, [0], a)
 
-        pytest.skip(
-            "Multi-dimensional tensors are indexable just as long as they only "
-            "contain a single element, no bother raising here"
-        )
+        if np is torch_np:
+            pytest.skip(
+                "Multi-dimensional tensors are indexable just as long as they only "
+                "contain a single element, no bother raising here"
+            )
         assert_raises(TypeError, operator.index, np.array([1]))
 
-        pytest.skip("torch consumes tensors as ints, no bother raising here")
+        if np is torch_np:
+            pytest.skip("torch consumes tensors as ints, no bother raising here")
         assert_raises(TypeError, np.reshape, a, (a, -1))
 
 
@@ -1158,14 +1189,13 @@ class TestNonIntegerArrayLike:
     an integer.
 
     """
-    @pytest.mark.skip(
-        reason=(
-            "torch consumes floats by way of falling back on its deprecated "
-            "__index__ behaviour, no bother raising here"
-        )
-    )
     @pytest.mark.filterwarnings("ignore::DeprecationWarning")
-    def test_basic(self):
+    def test_basic(self, np):
+        if np is torch_np:
+            pytest.skip(
+                "torch consumes floats by way of falling back on its deprecated "
+                "__index__ behaviour, no bother raising here"
+            )
         a = np.arange(10)
 
         assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
@@ -1179,13 +1209,12 @@ class TestMultipleEllipsisError:
     """An index can only have a single ellipsis.
 
     """
-    @pytest.mark.xfail(
-        reason=(
-            "torch currently consumes multiple ellipsis, no bother raising "
-            "here. See https://github.com/pytorch/pytorch/issues/59787#issue-917252204"
-        )
-    )
-    def test_basic(self):
+    def test_basic(self, np):
+        if np is torch_np:
+            pytest.xfail(
+                "torch currently consumes multiple ellipsis, no bother raising "
+                "here. See https://github.com/pytorch/pytorch/issues/59787#issue-917252204"
+            )
         a = np.arange(10)
         assert_raises(IndexError, lambda: a[..., ...])
         assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))

As mentioned it's a bit finnicky as you have to check np is {numpy,torch_np} for some module-specific behaviour, e.g. raised error classes and skips/xfails, and you have to make sure you're using the argument np for things like the {numpy,torch_np}.testing utils.

So IMO probably not something we'd want to rework our existing tests for, but instead use just when we want to sanity check things as-and-when desired.

@ev-br ev-br added the enhancement New feature or request label Mar 16, 2023
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
enhancement New feature or request
Projects
None yet
Development

No branches or pull requests

2 participants