Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 25 additions & 7 deletions backends/vulkan/op_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,17 +793,35 @@ def register_ported_op_all_packed_dims():


# Ported ops that support their own prepacking.
@update_features(
[
exir_ops.edge.aten.embedding.default,
exir_ops.edge.aten._native_batch_norm_legit_no_training.default,
]
)
def register_ported_ops_with_prepacking():
@update_features(exir_ops.edge.aten.embedding.default)
def register_embedding_op():
return OpFeatures(
inputs_storage=utils.CHANNELS_PACKED_TEXTURE,
supports_prepacking=True,
supports_resize=True,
)


@update_features(exir_ops.edge.aten._native_batch_norm_legit_no_training.default)
def register_batch_norm_op():
def check_batch_norm_node(node: torch.fx.Node) -> bool:
x = node.args[0]
if not isinstance(x, torch.fx.Node):
return False
x_val = x.meta.get("val", None)
if x_val is None:
return False
x_shape = x_val.size()
# Only support 4-D input tensors since this is a restriction enforced by the
# operator implementation.
# TODO(ssjia): Add shape agnostic support for batch norm
return len(x_shape) == 4

return OpFeatures(
inputs_storage=utils.CHANNELS_PACKED_TEXTURE,
supports_prepacking=True,
supports_resize=True,
are_node_inputs_supported_fn=check_batch_norm_node,
)


Expand Down
18 changes: 18 additions & 0 deletions backends/vulkan/test/test_vulkan_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -1061,6 +1061,24 @@ def forward(self, x):
sample_inputs,
)

def test_vulkan_backend_batch_norm_after_linear(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this a positive case or negative case?

why not add both tests?

class LinearBatchNormModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(128, 64)
self.bn = torch.nn.BatchNorm1d(num_features=64)

def forward(self, x):
x = self.linear(x)
return self.bn(x)

sample_inputs = (torch.randn(size=(4, 128), dtype=torch.float32),)

self.lower_module_and_test_output(
LinearBatchNormModule(),
sample_inputs,
)

def test_vulkan_backend_full(self):
class FullModule(torch.nn.Module):
def __init__(self):
Expand Down
Loading