Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TorchToLinalg] Fix AtenReflectionPad2dOp lowering to not assert when dimensions unknown #3758

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 32 additions & 9 deletions lib/Conversion/TorchToLinalg/DataMovement.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -403,17 +403,40 @@ class ConvertAtenReflectionPad2dOp
int64_t vDim = numDims - 2;
Value hDimSize = inputShape[hDim];
Value vDimSize = inputShape[vDim];
Type indexType = rewriter.getIndexType();

assert(getHPadArgument(LEFT) < inputType.getShape()[hDim] &&
"Left padding too large");
assert(getHPadArgument(RIGHT) < inputType.getShape()[hDim] &&
"Right padding too large");
assert(getVPadArgument(TOP) < inputType.getShape()[vDim] &&
"Top padding too large");
assert(getVPadArgument(BOTTOM) < inputType.getShape()[vDim] &&
"Bottom padding too large");
auto leftPadAssertMsg = "Left padding too large";
auto rightPadAssertMsg = "Right padding too large";
auto topPadAssertMsg = "Top padding too large";
auto bottomPadAssertMsg = "Bottom padding too large";

auto addPadDynAssert = [&](int64_t pad, Value dimSize,
const llvm::Twine &msg) {
Value padValue = getConstant(rewriter, loc, pad, indexType);
Value pred = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, padValue, dimSize);
rewriter.create<cf::AssertOp>(loc, pred, rewriter.getStringAttr(msg));
};

if (inputType.getShape()[hDim] != ShapedType::kDynamic) {
assert(getHPadArgument(LEFT) < inputType.getShape()[hDim] &&
leftPadAssertMsg);
assert(getHPadArgument(RIGHT) < inputType.getShape()[hDim] &&
rightPadAssertMsg);
} else {
addPadDynAssert(getHPadArgument(LEFT), hDimSize, leftPadAssertMsg);
addPadDynAssert(getHPadArgument(RIGHT), hDimSize, rightPadAssertMsg);
}
if (inputType.getShape()[vDim] != ShapedType::kDynamic) {
assert(getVPadArgument(TOP) < inputType.getShape()[vDim] &&
topPadAssertMsg);
assert(getVPadArgument(BOTTOM) < inputType.getShape()[vDim] &&
bottomPadAssertMsg);
} else {
addPadDynAssert(getVPadArgument(TOP), vDimSize, topPadAssertMsg);
addPadDynAssert(getVPadArgument(BOTTOM), vDimSize, bottomPadAssertMsg);
}

Type indexType = rewriter.getIndexType();
Value zero = getConstant(rewriter, loc, 0, indexType);
Value one = getConstant(rewriter, loc, 1, indexType);

Expand Down
23 changes: 23 additions & 0 deletions projects/pt1/python/torch_mlir_e2e_test/test_suite/padding.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,29 @@ def ReflectionPad2dModule_basic(module, tu: TestUtils):
# ==============================================================================


class ReflectionPad2dDynamicSizesModule(torch.nn.Module):
def __init__(self):
super().__init__()

@export
@annotate_args(
[
None,
([1, -1, -1], torch.float32, True),
]
)
def forward(self, x):
return torch.ops.aten.reflection_pad2d(x, (10, 10, 10, 10))


@register_test_case(module_factory=lambda: ReflectionPad2dDynamicSizesModule())
def ReflectionPad2dDynamicSizesModule_basic(module, tu: TestUtils):
module.forward(tu.rand(1, 20, 20, low=-1))


# ==============================================================================


class ReflectionPad2dModuleTop(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down
Loading