Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

padding bug #151

Open
krgy12138 opened this issue May 27, 2023 · 1 comment
Open

padding bug #151

krgy12138 opened this issue May 27, 2023 · 1 comment
Labels

Comments

@krgy12138
Copy link

Describe the bug
I get an error when I convolve with the padding !=0

To Reproduce
this is my model

class Generator(nn.Module):

  def __init__(self):
    super(Generator, self).__init__()

    self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=1, bias=True)
    self.in1 = nn.InstanceNorm2d(64, affine=True)
    self.relu = nn.ReLU(inplace=True)
    self.conv2 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=True
    self.in2 = nn.InstanceNorm2d(128, affine=True)
    self.conv3 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=True)
    self.in3 = nn.InstanceNorm2d(256, affine=True)

    self.deconv1 = nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True)
    self.in4 = nn.InstanceNorm2d(128, affine=True)
    self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=True)
    self.in5 = nn.InstanceNorm2d(64, affine=True)
    self.conv4 = nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3, bias=True)



  def forward(self, x):
    out = self.conv1(x)
    out = self.in1(out)
    out = self.relu(out)
    out = self.conv2(out)
    out = self.in2(out)
    out = self.relu(out)
    out = self.conv3(out)
    out = self.in3(out)
    out = self.relu(out)

    out = self.deconv1(out)
    out = self.in4(out)
    out = self.relu(out)
    out = self.deconv2(out)
    out = self.in5(out)
    out = self.relu(out)
    out = self.conv4(out)
    out = torch.tanh(out)
    
    
    return out

Logs
INFO:pytorch2keras:Converter is called.
WARNING:pytorch2keras:Custom shapes isn't supported now.
DEBUG:pytorch2keras:Input_names:
DEBUG:pytorch2keras:['input_0']
DEBUG:pytorch2keras:Output_names:
DEBUG:pytorch2keras:['output_0']
Exported graph: graph(%input_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=0, device=cpu),
%deconv1.weight : Float(256, 128, 5, 5, strides=[3200, 25, 5, 1], requires_grad=1, device=cpu),
%deconv1.bias : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn4.weight : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn4.bias : Float(128, strides=[1], requires_grad=1, device=cpu),
%bn4.running_mean : Float(128, strides=[1], requires_grad=0, device=cpu),
%bn4.running_var : Float(128, strides=[1], requires_grad=0, device=cpu),
%deconv2.weight : Float(128, 64, 5, 5, strides=[1600, 25, 5, 1], requires_grad=1, device=cpu),
%deconv2.bias : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn5.weight : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn5.bias : Float(64, strides=[1], requires_grad=1, device=cpu),
%bn5.running_mean : Float(64, strides=[1], requires_grad=0, device=cpu),
%bn5.running_var : Float(64, strides=[1], requires_grad=0, device=cpu),
%deconv3.weight : Float(64, 1, 5, 5, strides=[25, 25, 5, 1], requires_grad=1, device=cpu),
%deconv3.bias : Float(1, strides=[1], requires_grad=1, device=cpu),
%onnx::Conv_56 : Float(64, 1, 5, 5, strides=[25, 25, 5, 1], requires_grad=0, device=cpu),
%onnx::Conv_57 : Float(64, strides=[1], requires_grad=0, device=cpu),
%onnx::Conv_59 : Float(128, 64, 5, 5, strides=[1600, 25, 5, 1], requires_grad=0, device=cpu),
%onnx::Conv_60 : Float(128, strides=[1], requires_grad=0, device=cpu),
%onnx::Conv_62 : Float(256, 128, 5, 5, strides=[3200, 25, 5, 1], requires_grad=0, device=cpu),
%onnx::Conv_63 : Float(256, strides=[1], requires_grad=0, device=cpu)):
%/conv1/Conv_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv1/Conv"](%input_0, %onnx::Conv_56, %onnx::Conv_57), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv1 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0
%/relu/Relu_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0
%/conv2/Conv_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv2/Conv"](%/relu/Relu_output_0, %onnx::Conv_59, %onnx::Conv_60), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv2 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0
%/relu_1/Relu_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_1/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0
%/conv3/Conv_output_0 : Float(1, 256, 64, 64, strides=[1048576, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv3/Conv"](%/relu_1/Relu_output_0, %onnx::Conv_62, %onnx::Conv_63), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv3 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0
%/relu_2/Relu_output_0 : Float(1, 256, 64, 64, strides=[1048576, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_2/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0
%/deconv1/ConvTranspose_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv1/ConvTranspose"](%/relu_2/Relu_output_0, %deconv1.weight, %deconv1.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv1 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0
%/bn4/BatchNormalization_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn4/BatchNormalization"](%/deconv1/ConvTranspose_output_0, %bn4.weight, %bn4.bias, %bn4.running_mean, %bn4.running_var), scope: pt.mymodel.Generator::/torch.nn.modules.batchnorm.BatchNorm2d::bn4 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:2435:0
%/relu_3/Relu_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_3/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0
%/deconv2/ConvTranspose_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv2/ConvTranspose"](%/relu_3/Relu_output_0, %deconv2.weight, %deconv2.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv2 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0
%/bn5/BatchNormalization_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn5/BatchNormalization"](%/deconv2/ConvTranspose_output_0, %bn5.weight, %bn5.bias, %bn5.running_mean, %bn5.running_var), scope: pt.mymodel.Generator::/torch.nn.modules.batchnorm.BatchNorm2d::bn5 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:2435:0
%/relu_4/Relu_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_4/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0
%/deconv3/ConvTranspose_output_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv3/ConvTranspose"](%/relu_4/Relu_output_0, %deconv3.weight, %deconv3.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv3 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0
%output_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Tanhonnx_name="/tanh/Tanh", scope: pt.mymodel.Generator::/torch.nn.modules.activation.Tanh::tanh # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/activation.py:358:0
return (%output_0)

INFO:onnx2keras:Converter is called.
DEBUG:onnx2keras:List input shapes:
DEBUG:onnx2keras:[(1, 64, 64)]
DEBUG:onnx2keras:List inputs:
DEBUG:onnx2keras:Input 0 -> input_0.
DEBUG:onnx2keras:List outputs:
DEBUG:onnx2keras:Output 0 -> output_0.
DEBUG:onnx2keras:Gathering weights to dictionary.
DEBUG:onnx2keras:Found weight deconv1.weight with shape (256, 128, 5, 5).
DEBUG:onnx2keras:Found weight deconv1.bias with shape (128,).
DEBUG:onnx2keras:Found weight bn4.weight with shape (128,).
DEBUG:onnx2keras:Found weight bn4.bias with shape (128,).
DEBUG:onnx2keras:Found weight bn4.running_mean with shape (128,).
DEBUG:onnx2keras:Found weight bn4.running_var with shape (128,).
DEBUG:onnx2keras:Found weight deconv2.weight with shape (128, 64, 5, 5).
DEBUG:onnx2keras:Found weight deconv2.bias with shape (64,).
DEBUG:onnx2keras:Found weight bn5.weight with shape (64,).
DEBUG:onnx2keras:Found weight bn5.bias with shape (64,).
DEBUG:onnx2keras:Found weight bn5.running_mean with shape (64,).
DEBUG:onnx2keras:Found weight bn5.running_var with shape (64,).
DEBUG:onnx2keras:Found weight deconv3.weight with shape (64, 1, 5, 5).
DEBUG:onnx2keras:Found weight deconv3.bias with shape (1,).
DEBUG:onnx2keras:Found weight onnx::Conv_56 with shape (64, 1, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_57 with shape (64,).
DEBUG:onnx2keras:Found weight onnx::Conv_59 with shape (128, 64, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_60 with shape (128,).
DEBUG:onnx2keras:Found weight onnx::Conv_62 with shape (256, 128, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_63 with shape (256,).
DEBUG:onnx2keras:Found input input_0 with shape (1, 64, 64)
DEBUG:onnx2keras:######
DEBUG:onnx2keras:...
DEBUG:onnx2keras:Converting ONNX operation
DEBUG:onnx2keras:type: Conv
DEBUG:onnx2keras:node_name: /conv1/Conv_output_0
DEBUG:onnx2keras:node_params: {'dilations': [1, 1], 'group': 1, 'kernel_shape': [5, 5], 'pads': [2, 2, 2, 2], 'strides': [1, 1], 'change_ordering': False, 'name_policy': None}
DEBUG:onnx2keras:...
DEBUG:onnx2keras:Check if all inputs are available:
DEBUG:onnx2keras:Check input 0 (name input_0).
DEBUG:onnx2keras:Check input 1 (name onnx::Conv_56).
DEBUG:onnx2keras:The input not found in layers / model inputs.
DEBUG:onnx2keras:Found in weights, add as a numpy constant.
DEBUG:onnx2keras:Check input 2 (name onnx::Conv_57).
DEBUG:onnx2keras:The input not found in layers / model inputs.
DEBUG:onnx2keras:Found in weights, add as a numpy constant.
DEBUG:onnx2keras:... found all, continue
DEBUG:onnx2keras:conv:Conv with bias
DEBUG:onnx2keras:conv:2D convolution
DEBUG:onnx2keras:conv:Paddings exist, add ZeroPadding layer
Traceback (most recent call last):
File "", line 1, in
File "/home/duyangfan/nxy/pytorch2keras/pytorch2keras/converter.py", line 72, in pytorch_to_keras
k_model = onnx_to_keras(onnx_model=onnx_model, input_names=['input_0'],
File "/home/duyangfan/miniconda3/lib/python3.10/site-packages/onnx2keras/converter.py", line 175, in onnx_to_keras
AVAILABLE_CONVERTERS[node_type](
File "/home/duyangfan/miniconda3/lib/python3.10/site-packages/onnx2keras/convolution_layers.py", line 91, in convert_conv
layers[padding_name] = input_0 = padding_layer(input_0)
File "/home/duyangfan/miniconda3/lib/python3.10/site-packages/keras/utils/traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/home/duyangfan/miniconda3/lib/python3.10/contextlib.py", line 135, in enter
return next(self.gen)
ValueError: '/conv1/Conv_output_0_pad/' is not a valid root scope name. A root scope name has to match the following pattern: ^[A-Za-z0-9.][A-Za-z0-9_.\/>-]*$

torch.onnx.export(G,input_var,'model.onnx',input_names=['input_0'],output_names=['output_0'],dynamic_axes={'input_0':[0],'output_0':[0]} )
/home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/onnx/utils.py:2040: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input input_0
warnings.warn(
/home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/onnx/utils.py:2040: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input output_0
warnings.warn(

@krgy12138 krgy12138 added the bug label May 27, 2023
@balisujohn
Copy link

uh try this, #147 (comment)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Projects
None yet
Development

No branches or pull requests

2 participants