Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to SDK 3.3.0 #18

Merged
merged 2 commits into from
Jul 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .devcontainer.dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM graphcore/pytorch:3.2.0-ubuntu-20.04
FROM graphcore/pytorch:3.3.0-ubuntu-20.04

RUN apt-get update \
&& apt-get install -y \
Expand Down
7 changes: 5 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ concurrency:
jobs:
ci:
runs-on: ubuntu-latest
container: graphcore/pytorch:3.2.0-ubuntu-20.04
container: graphcore/pytorch:3.3.0-ubuntu-20.04
timeout-minutes: 10
steps:
- uses: actions/checkout@v3
Expand All @@ -23,7 +23,10 @@ jobs:
apt-get install -y clang-format
pip install -r requirements-dev.txt
- name: Run CI
run: ./dev ci
run: |
source /opt/setup-bash-env.sh
./dev ci
shell: bash
- name: Publish documentation
if: ${{github.ref == 'refs/heads/main'}}
uses: Cecilapp/GitHub-Pages-deploy@v3
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ A collection of addons to [PopTorch](https://github.com/graphcore/poptorch), wit
See [documentation](https://graphcore-research.github.io/poptorch-experimental-addons).

```bash
# Tested on Poplar SDK 3.2.0+1277, Ubuntu 20.04, Python 3.8
# Tested on Poplar SDK 3.3.0+7857, Ubuntu 20.04, Python 3.8
pip install git+https://github.com/graphcore-research/poptorch-experimental-addons

# Run an example
Expand Down
2 changes: 1 addition & 1 deletion poptorch_experimental_addons/_impl/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def autograd_proxy(fwd: Tensor, proxy: Tensor) -> Tensor:
example_outputs=[fwd],
)
else:
y = _AutogradProxy.apply(fwd, proxy)
y = _AutogradProxy.apply(fwd, proxy) # type:ignore[no-untyped-call]
return y


Expand Down
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ black==23.1.0
flake8==6.0.0
isort==5.12.0
mypy==1.0.1
numpy>=1.24.4
pdoc3==0.10.0
pytest==7.2.1
pytest-cov==4.0.0
4 changes: 1 addition & 3 deletions tests/test_collectives.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

from . import utils

assert_close = torch.testing.assert_close # type:ignore[attr-defined]


def set_seed(seed: int) -> None:
torch.manual_seed(seed)
Expand Down Expand Up @@ -136,4 +134,4 @@ def test_collective(op: Callable[[torch.Tensor, int], torch.Tensor]) -> None:
num_ipus = 2
actual = run_collective(X, op, num_ipus)
expected = simulate_collective(X, _op_mapping[op], num_ipus)
list(map(assert_close, actual, expected))
list(map(torch.testing.assert_close, actual, expected))
12 changes: 5 additions & 7 deletions tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@

import poptorch_experimental_addons as pea

assert_close = torch.testing.assert_close # type:ignore[attr-defined]


def run_forward_and_backward(
fn: Callable[..., Tensor],
Expand Down Expand Up @@ -57,8 +55,8 @@ def test_autograd_proxy(device: str) -> None:
patterns=dict(AutogradProxyOpPattern=True),
device=device,
)
assert_close(outputs["loss"], torch.tensor(6.0))
assert_close(outputs["grad_x"], torch.tensor(3.0))
torch.testing.assert_close(outputs["loss"], torch.tensor(6.0))
torch.testing.assert_close(outputs["grad_x"], torch.tensor(3.0))


@pytest.mark.parametrize("dtype", [torch.float32, torch.float16])
Expand Down Expand Up @@ -87,19 +85,19 @@ def test_distance_matrix(p: int, dtype: torch.dtype) -> None:
atol = {torch.float32: 1e-5, torch.float16: 2e-3}[dtype]
rtol = {torch.float32: 2e-6, torch.float16: 2e-3}[dtype]

assert_close(
torch.testing.assert_close(
output_ipu["output"],
output_torch["output"],
rtol=rtol,
atol=atol,
)
assert_close(
torch.testing.assert_close(
output_ipu["grad_tensor1"],
output_torch["grad_tensor1"],
rtol=rtol,
atol=atol,
)
assert_close(
torch.testing.assert_close(
output_ipu["grad_tensor2"],
output_torch["grad_tensor2"],
rtol=rtol,
Expand Down
6 changes: 2 additions & 4 deletions tests/test_sharded_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

from . import utils

assert_close = torch.testing.assert_close # type:ignore[attr-defined]


class Sharding(Enum):
Replicated = 1
Expand Down Expand Up @@ -265,7 +263,7 @@ def test_sharded_matmul(op: Callable[[Any], torch.Tensor]) -> None:
num_ipus = 2
actual = run_sharded_matmul(X, Y, op, num_ipus)
expected = simulate_sharded_matmul(X, Y, op, num_ipus)
list(map(assert_close, actual, expected))
list(map(torch.testing.assert_close, actual, expected))


@pytest.mark.parametrize("op", list(_op_mapping.keys()))
Expand All @@ -281,7 +279,7 @@ def test_simulator(op: Callable[[Any], torch.Tensor]) -> None:
mode=SimulatorMode.Base,
)
out_sim, _ = simulator()
assert_close(out_sim, out_base)
torch.testing.assert_close(out_sim, out_base)


if __name__ == "__main__":
Expand Down
8 changes: 3 additions & 5 deletions tests/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@

import poptorch_experimental_addons as pea

assert_close = torch.testing.assert_close # type:ignore[attr-defined]


@dataclass
class Problem:
Expand Down Expand Up @@ -83,7 +81,7 @@ def test_coo_methods(shape: Tuple[int], block_size: int, seed: int) -> None:
assert torch.sum(dense != 0) == nnz_blocks * block_size ** len(shape)

if len(shape) == 2:
assert_close(
torch.testing.assert_close(
pea.sparse.block_coo_to_dense(pea.sparse.block_coo_transpose(array)),
dense.T,
)
Expand Down Expand Up @@ -141,7 +139,7 @@ def test_block_coo_spmm(
)(problem.dense)

for name, output in outputs.items():
assert_close(
torch.testing.assert_close(
output.float(),
problem.expected_output,
rtol=0,
Expand All @@ -167,7 +165,7 @@ def test_high_level_api() -> None:
expected_output = dense_in @ pea.sparse.block_coo_to_dense(sparse).T

def check(output: Tensor) -> None:
assert_close(output, expected_output, atol=1e-5, rtol=0)
torch.testing.assert_close(output, expected_output, atol=1e-5, rtol=0)

check(pea.sparse.StaticSparseLinear(sparse)(dense_in))
check((pea.sparse.StaticSparseMatrix(sparse) @ dense_in.T).T)
Expand Down
Loading