Skip to content

Commit

Permalink
Added UpSampling3D ONNX tests
Browse files Browse the repository at this point in the history
  • Loading branch information
chavicoski committed May 18, 2021
1 parent 49fe323 commit df2c240
Show file tree
Hide file tree
Showing 4 changed files with 240 additions and 1 deletion.
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import argparse

import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Conv3D, UpSampling3D, MaxPooling3D
import keras2onnx

# Training settings
parser = argparse.ArgumentParser(description='Keras Conv3D+Upsampling encoder decoder with synthetic data Example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 5)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--output-path', type=str, default="onnx_models/upsample3D_enc_dec_synthetic.onnx",
help='Output path to store the onnx file')
parser.add_argument('--output-metric', type=str, default="",
help='Output file path to store the metric value obtained in test set')
args = parser.parse_args()

# Create synthetic data
n_samples = 6
# Shape: (n_samples, ch=3, depth=16, height=16, width=16)
x_train = np.linspace(0, 1, n_samples*3*16*16*16)
x_train = x_train.reshape((n_samples, 3, 16, 16, 16)).astype(np.float32)
# (B, C, D, H, W) -> (B, D, H, W, C)
x_train = np.transpose(x_train, (0, 2, 3, 4, 1)) # Set channel last

print("Train data shape:", x_train.shape)

# Definer encoder
model = Sequential()
model.add(Input(shape=(16, 16, 16, 3)))
# Encoder
model.add(Conv3D(32, 3, padding="same", activation="relu"))
model.add(MaxPooling3D(2, 2))
model.add(Conv3D(64, 3, padding="same", activation="relu"))
model.add(MaxPooling3D(2, 2))
# Decoder
model.add(Conv3D(64, 3, padding="same", activation="relu"))
model.add(UpSampling3D((2, 2, 2)))
model.add(Conv3D(32, 3, padding="same", activation="relu"))
model.add(UpSampling3D((2, 2, 2)))
model.add(Conv3D(3, 1, padding="valid", activation="sigmoid"))

model.compile(loss='mse',
optimizer="adam",
metrics=[])

model.summary()

# Training
model.fit(x_train, x_train, batch_size=args.batch_size, epochs=args.epochs)

# Evaluation
eval_loss = model.evaluate(x_train, x_train)
print("Evaluation result: Loss:", eval_loss)

# In case of providing output metric file, store the test mse value
if args.output_metric != "":
with open(args.output_metric, 'w') as ofile:
ofile.write(str(eval_loss))

# Convert to ONNX
onnx_model = keras2onnx.convert_keras(model, "upsample3D_synthetic", debug_mode=1)
# Save ONNX to file
keras2onnx.save_model(onnx_model, args.output_path)
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def test(model, device, test_loader):
def main():
# Training settings
parser = argparse.ArgumentParser(
description='PyTorch ConvT2D encoder-decoder MNIST Example')
description='PyTorch ConvT3D encoder-decoder with synthetic data example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
from __future__ import print_function
import argparse

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np


class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Encoder
self.encoder = nn.Sequential(
nn.Conv3d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool3d(2, 2),
nn.Conv3d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool3d(2, 2),
)
# Decoder
self.decoder = nn.Sequential(
nn.Conv3d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv3d(64, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv3d(32, 3, 1, stride=1, padding=0),
nn.Sigmoid()
)

def forward(self, x):
x_enc = self.encoder(x)
out = self.decoder(x_enc)
return out


# Prepare data loader
class Dummy_datagen:
def __init__(self, batch_size=2, n_samples=6):
# Shape: (n_samples=n_samples, ch=3, depth=16, height=16, width=16)
self.samples = np.linspace(0, 1, n_samples*3*16*16*16).reshape((n_samples, 3, 16, 16, 16)).astype(np.float32)
self.curr_idx = 0 # Current index of the batch
self.bs = batch_size

def __iter__(self):
return self

def __len__(self):
return int(self.samples.shape[0] / self.bs)

def __next__(self):
target = self.curr_idx
self.curr_idx += self.bs
if target <= self.samples.shape[0]-self.bs:
return self.samples[target:target+self.bs]
raise StopIteration

def reset(self):
'''Reset the iterator'''
self.curr_idx = 0


def train(args, model, device, train_loader, optimizer, epoch):
model.train()
loss_acc = 0
current_samples = 0
for batch_idx, data in enumerate(train_loader):
data = torch.from_numpy(data)
data = data.to(device)
b, c, d, h, w = data.size()
data_el_size = c * d * h * w
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, data, reduction='sum')
loss.backward()
loss_acc += loss.item() / data_el_size
current_samples += data.size(0)
optimizer.step()
if batch_idx % 10 == 0:
print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.samples),
100. * batch_idx / len(train_loader), loss_acc / current_samples))


def test(model, device, test_loader):
model.eval()
test_loss = 0
current_samples = 0
with torch.no_grad():
for data in test_loader:
data = torch.from_numpy(data)
data = data.to(device)
output = model(data)
b, c, d, h, w = data.size()
data_el_size = c * d * h * w
test_loss += F.mse_loss(output, data, reduction='sum').item() / data_el_size
current_samples += data.size(0)

test_loss = test_loss / current_samples
print(f'\nTest set: Average loss: {test_loss:.4f}\n')

return test_loss


def main():
# Training settings
parser = argparse.ArgumentParser(
description='PyTorch Conv3D+Upsample encoder-decoder with synthetic data example')
parser.add_argument('--batch-size', type=int, default=2, metavar='N',
help='input batch size for training (default: 2)')
parser.add_argument('--epochs', type=int, default=5, metavar='N',
help='number of epochs to train (default: 5)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--output-path', type=str, default="onnx_models/upsample3D_enc_dec_synthetic.onnx",
help='Output path to store the onnx file')
parser.add_argument('--output-metric', type=str, default="",
help='Output file path to store the metric value obtained in test set')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)

device = torch.device("cuda" if use_cuda else "cpu")

model = Net().to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)

# Create data generators
train_loader = Dummy_datagen(args.batch_size)
test_loader = Dummy_datagen(args.batch_size)

# Train
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test_loss = test(model, device, test_loader)
train_loader.reset()
test_loader.reset()

# In case of providing output metric file, store the test accuracy value
if args.output_metric != "":
with open(args.output_metric, 'w') as ofile:
ofile.write(str(test_loss))

# Save to ONNX file
dummy_input = torch.randn(args.batch_size, 3, 16, 16, 16, device=device)
torch.onnx._export(model, dummy_input, args.output_path, keep_initializers_as_inputs=True)


if __name__ == '__main__':
main()
8 changes: 8 additions & 0 deletions scripts/tests/run_onnx_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ scripts_to_run+=("EDDL_to_EDDL_conv3D;test_onnx_conv3D;test_onnx_conv3D")
scripts_to_run+=("EDDL_to_EDDL_convT2D;test_onnx_convT2D;test_onnx_convT2D")
scripts_to_run+=("EDDL_to_EDDL_convT3D;test_onnx_convT3D;test_onnx_convT3D")
scripts_to_run+=("EDDL_to_EDDL_upsample2D;test_onnx_upsample2D;test_onnx_upsample2D")
scripts_to_run+=("EDDL_to_EDDL_upsample3D;test_onnx_upsample3D;test_onnx_upsample3D")
scripts_to_run+=("EDDL_to_EDDL_GRU_imdb;test_onnx_gru_imdb;test_onnx_gru_imdb")
scripts_to_run+=("EDDL_to_EDDL_LSTM_imdb;test_onnx_lstm_imdb;test_onnx_lstm_imdb")
scripts_to_run+=("EDDL_to_EDDL_GRU_mnist;test_onnx_gru_mnist;test_onnx_gru_mnist")
Expand All @@ -123,6 +124,7 @@ scripts_to_run+=("EDDL_to_EDDL_conv3D_CPU;test_onnx_conv3D_cpu;test_onnx_conv3D,
#scripts_to_run+=("EDDL_to_EDDL_convT2D_CPU;test_onnx_convT2D_cpu;test_onnx_convT2D,--cpu") ConvT2D not available in CPU
#scripts_to_run+=("EDDL_to_EDDL_convT3D_CPU;test_onnx_convT3D_cpu;test_onnx_convT3D,--cpu") ConvT3D not available in CPU
scripts_to_run+=("EDDL_to_EDDL_upsample2D_CPU;test_onnx_upsample2D_cpu;test_onnx_upsample2D,--cpu")
scripts_to_run+=("EDDL_to_EDDL_upsample3D_CPU;test_onnx_upsample3D_cpu;test_onnx_upsample3D,--cpu")
scripts_to_run+=("EDDL_to_EDDL_GRU_imdb_CPU;test_onnx_gru_imdb_cpu;test_onnx_gru_imdb,--cpu")
scripts_to_run+=("EDDL_to_EDDL_LSTM_imdb_CPU;test_onnx_lstm_imdb_cpu;test_onnx_lstm_imdb,--cpu")
scripts_to_run+=("EDDL_to_EDDL_GRU_mnist_CPU;test_onnx_gru_mnist_cpu;test_onnx_gru_mnist,--cpu")
Expand Down Expand Up @@ -200,6 +202,7 @@ then
eddl2onnxrt+=("EDDL_to_ONNXRT_convT2D;test_onnx_convT2D;onnxruntime_enc_dec_mnist.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_convT3D;test_onnx_convT3D;onnxruntime_enc_dec_synthetic3D.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_upsample2D;test_onnx_upsample2D;onnxruntime_enc_dec_mnist.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_upsample3D;test_onnx_upsample3D;onnxruntime_enc_dec_synthetic3D.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_GRU_imdb;test_onnx_gru_imdb;onnxruntime_imdb_keras.py,--unsqueeze-input")
eddl2onnxrt+=("EDDL_to_ONNXRT_LSTM_imdb;test_onnx_lstm_imdb;onnxruntime_imdb_keras.py,--unsqueeze-input")
eddl2onnxrt+=("EDDL_to_ONNXRT_LSTM_enc_dec;test_onnx_lstm_enc_dec;onnxruntime_recurrent_enc_dec_mnist.py")
Expand All @@ -212,6 +215,7 @@ then
#eddl2onnxrt+=("EDDL_to_ONNXRT_convT2D_CPU;test_onnx_convT2D_cpu;onnxruntime_enc_dec_mnist.py") ConvT2D not available in CPU
#eddl2onnxrt+=("EDDL_to_ONNXRT_convT3D_CPU;test_onnx_convT3D_cpu;onnxruntime_enc_dec_synthetic3D.py") ConvT3D not available in CPU
eddl2onnxrt+=("EDDL_to_ONNXRT_upsample2D_CPU;test_onnx_upsample2D_cpu;onnxruntime_enc_dec_mnist.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_upsample3D_CPU;test_onnx_upsample3D_cpu;onnxruntime_enc_dec_synthetic3D.py")
eddl2onnxrt+=("EDDL_to_ONNXRT_GRU_imdb_CPU;test_onnx_gru_imdb_cpu;onnxruntime_imdb_keras.py,--unsqueeze-input")
eddl2onnxrt+=("EDDL_to_ONNXRT_LSTM_imdb_CPU;test_onnx_lstm_imdb_cpu;onnxruntime_imdb_keras.py,--unsqueeze-input")
eddl2onnxrt+=("EDDL_to_ONNXRT_LSTM_enc_dec_CPU;test_onnx_lstm_enc_dec_cpu;onnxruntime_recurrent_enc_dec_mnist.py")
Expand Down Expand Up @@ -331,6 +335,7 @@ then
pytorch2eddl+=("Pytorch_to_EDDL_convT2D;test_onnx_pytorch_convT2D;export_scripts/convT2D_enc_dec_mnist_pytorch_export.py;test_onnx_convT2D,--import")
pytorch2eddl+=("Pytorch_to_EDDL_convT3D;test_onnx_pytorch_convT3D;export_scripts/convT3D_enc_dec_synthetic_pytorch_export.py;test_onnx_convT3D,--import")
pytorch2eddl+=("Pytorch_to_EDDL_upsample2D;test_onnx_pytorch_upsample2D;export_scripts/upsample2D_enc_dec_mnist_pytorch_export.py;test_onnx_upsample2D,--import")
pytorch2eddl+=("Pytorch_to_EDDL_upsample3D;test_onnx_pytorch_upsample3D;export_scripts/upsample3D_enc_dec_synthetic_pytorch_export.py;test_onnx_upsample3D,--import")
pytorch2eddl+=("Pytorch_to_EDDL_LSTM_IMDB;test_onnx_pytorch_LSTM_imdb;export_scripts/lstm_pytorch_export.py;test_onnx_lstm_imdb,--import")
pytorch2eddl+=("Pytorch_to_EDDL_GRU_IMDB;test_onnx_pytorch_GRU_imdb;export_scripts/gru_pytorch_export.py;test_onnx_gru_imdb,--import")
pytorch2eddl+=("Pytorch_to_EDDL_LSTM_MNIST;test_onnx_pytorch_LSTM_mnist;export_scripts/lstm_mnist_pytorch_export.py;test_onnx_lstm_mnist,--import")
Expand All @@ -347,6 +352,7 @@ then
#pytorch2eddl+=("Pytorch_to_EDDL_convT2D_CPU;test_onnx_pytorch_convT2D;none;test_onnx_convT2D,--import,--cpu") ConvT2D not available in CPU
#pytorch2eddl+=("Pytorch_to_EDDL_convT3D_CPU;test_onnx_pytorch_convT3D;none;test_onnx_convT3D,--import,--cpu") ConvT3D not available in CPU
pytorch2eddl+=("Pytorch_to_EDDL_upsample2D_CPU;test_onnx_pytorch_upsample2D;none;test_onnx_upsample2D,--import,--cpu")
pytorch2eddl+=("Pytorch_to_EDDL_upsample3D_CPU;test_onnx_pytorch_upsample3D;none;test_onnx_upsample3D,--import,--cpu")
pytorch2eddl+=("Pytorch_to_EDDL_LSTM_IMDB_CPU;test_onnx_pytorch_LSTM_imdb;none;test_onnx_lstm_imdb,--import,--cpu")
pytorch2eddl+=("Pytorch_to_EDDL_GRU_IMDB_CPU;test_onnx_pytorch_GRU_imdb;none;test_onnx_gru_imdb,--import,--cpu")
pytorch2eddl+=("Pytorch_to_EDDL_LSTM_MNIST_CPU;test_onnx_pytorch_LSTM_mnist;none;test_onnx_lstm_mnist,--import,--cpu")
Expand Down Expand Up @@ -427,6 +433,7 @@ then
keras2eddl+=("Keras_to_EDDL_convT2D;test_onnx_keras_convT2D;export_scripts/convT2D_enc_dec_mnist_keras_export.py;test_onnx_convT2D,--import")
keras2eddl+=("Keras_to_EDDL_convT3D;test_onnx_keras_convT3D;export_scripts/convT3D_enc_dec_synthetic_keras_export.py;test_onnx_convT3D,--import,--channels-last")
keras2eddl+=("Keras_to_EDDL_upsample2D;test_onnx_keras_upsample2D;export_scripts/upsample2D_enc_dec_mnist_keras_export.py;test_onnx_upsample2D,--import")
keras2eddl+=("Keras_to_EDDL_upsample3D;test_onnx_keras_upsample3D;export_scripts/upsample3D_enc_dec_synthetic_keras_export.py;test_onnx_upsample3D,--import")
keras2eddl+=("Keras_to_EDDL_LSTM_IMDB;test_onnx_keras_LSTM_imdb;export_scripts/lstm_keras_export.py;test_onnx_lstm_imdb,--import")
keras2eddl+=("Keras_to_EDDL_GRU_IMDB;test_onnx_keras_GRU_imdb;export_scripts/gru_keras_export.py;test_onnx_gru_imdb,--import")
keras2eddl+=("Keras_to_EDDL_LSTM_MNIST;test_onnx_keras_LSTM_mnist;export_scripts/lstm_mnist_keras_export.py;test_onnx_lstm_mnist,--import")
Expand All @@ -443,6 +450,7 @@ then
#keras2eddl+=("Keras_to_EDDL_convT2D_CPU;test_onnx_keras_convT2D;none;test_onnx_convT2D,--import,--cpu") ConvT2D not available in CPU
#keras2eddl+=("Keras_to_EDDL_convT3D_CPU;test_onnx_keras_convT3D;none;test_onnx_convT3D,--import,--cpu,--channels-last") ConvT3D not available in CPU
keras2eddl+=("Keras_to_EDDL_upsample2D_CPU;test_onnx_keras_upsample2D;none;test_onnx_upsample2D,--import,--cpu")
keras2eddl+=("Keras_to_EDDL_upsample3D_CPU;test_onnx_keras_upsample3D;none;test_onnx_upsample3D,--import,--cpu")
keras2eddl+=("Keras_to_EDDL_LSTM_IMDB_CPU;test_onnx_keras_LSTM_imdb;none;test_onnx_lstm_imdb,--import,--cpu")
keras2eddl+=("Keras_to_EDDL_GRU_IMDB_CPU;test_onnx_keras_GRU_imdb;none;test_onnx_gru_imdb,--import,--cpu")
keras2eddl+=("Keras_to_EDDL_LSTM_MNIST_CPU;test_onnx_keras_LSTM_mnist;none;test_onnx_lstm_mnist,--import,--cpu")
Expand Down

0 comments on commit df2c240

Please sign in to comment.