From c88e9782ad3237d0b26385b4aa152a8d00c51b70 Mon Sep 17 00:00:00 2001 From: soltanianaref <101413285+soltanianaref@users.noreply.github.com> Date: Sat, 9 Apr 2022 20:33:31 +0430 Subject: [PATCH] Update encoders.py --- steganogan/encoders.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/steganogan/encoders.py b/steganogan/encoders.py index d1a93c8..3ab7c1c 100644 --- a/steganogan/encoders.py +++ b/steganogan/encoders.py @@ -2,6 +2,17 @@ import torch from torch import nn +import torch.onnx +from torchvision.ops.deform_conv import DeformConv2d + +#input = torch.rand(4, 3, 10, 10) +kh, kw = 3, 3 +#weight = torch.rand(5, 3, kh, kw) +# offset and mask should have the same spatial size as the output +# of the convolution. In this case, for an input of 10, stride of 1 +# and kernel size of 3, without padding, the output size is 8 +#offset = torch.rand(4, 2 * kh * kw, 8, 8) +#mask = torch.rand(4, kh * kw, 8, 8) class BasicEncoder(nn.Module): @@ -16,9 +27,13 @@ class BasicEncoder(nn.Module): add_image = False def _conv2d(self, in_channels, out_channels): - return nn.Conv2d( + return DeformConv2d( in_channels=in_channels, out_channels=out_channels, + input = torch.rand(4, 3, 10, 10) + weight = torch.rand(5, 3, kh, kw) + offset = torch.rand(4, 2 * kh * kw, 8, 8) + mask = torch.rand(4, kh * kw, 8, 8) kernel_size=3, padding=1 )