Skip to content

Commit

Permalink
ConvTranspose2d Operator (#91)
Browse files Browse the repository at this point in the history
Add ConvTranspose2d operator
  • Loading branch information
Coderitter-GmbH authored Mar 11, 2021
1 parent 52b2eb4 commit 7108b7b
Show file tree
Hide file tree
Showing 10 changed files with 779 additions and 3 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
//this file was generated by ../../../../../../scripts/onnx_generator/OperatorTemplate.py
#include "operator__ai_onnx__convtranspose__11.h"
#include "tracing.h"
#include "utils.h"

//transformes a 3d pos into a 1d flat array pos
static inline int calcArrayPos3D(int x, int y, int outputChannel, int width, int height) {
return outputChannel * height * width + y * width + x;
}

//transformes a 4d pos into a 1d flat array pos
static inline int calcArrayPos4D(int x, int y, int outputChannel, int intputChannel, int width, int height, int nOfOutputChannels) {
return intputChannel * nOfOutputChannels *height * width
+ outputChannel * height * width
+ y * width
+ x;
}

operator_status
execute_operator__ai_onnx__convtranspose__11__T_tensor_float(
node_context *ctx
)
{
TRACE_ENTRY(1);

TRACE_NODE(2, true, ctx->onnx_node);

/* UNCOMMENT AS NEEDED */

Onnx__TensorProto *i_X = searchInputByName(ctx, 0);
Onnx__TensorProto *i_W = searchInputByName(ctx, 1);
Onnx__TensorProto *i_B = searchInputByName(ctx, 2);

TRACE_TENSOR(2, true, i_X);
TRACE_TENSOR(2, true, i_W);
TRACE_TENSOR(2, B, i_B);

context_operator__ai_onnx__convtranspose__11 *op_ctx = ctx->executer_context;

// size is not needed, because this operator works for one fixed size only

// char* auto_pad = op_ctx->auto_pad;
// size_t n_dilations = op_ctx->n_dilations;
int64_t* dilations = op_ctx->dilations;
// int64_t group = op_ctx->group;
// size_t n_kernel_shape = op_ctx->n_kernel_shape;
// int64_t* kernel_shape = op_ctx->kernel_shape;
// size_t n_output_padding = op_ctx->n_output_padding;
// int64_t* output_padding = op_ctx->output_padding;
// size_t n_output_shape = op_ctx->n_output_shape;
// int64_t* output_shape = op_ctx->output_shape;
// size_t n_pads = op_ctx->n_pads;
int64_t* pads = op_ctx->pads;
// size_t n_strides = op_ctx->n_strides;
int64_t* strides = op_ctx->strides;

// TRACE_VAR(2, true, auto_pad, "\"%s\"");
TRACE_ARRAY(2, true, dilations, , n_dilations, "%" PRId64);
// TRACE_VAR(2, true, group, "%" PRId64);
// TRACE_ARRAY(2, true, kernel_shape, , n_kernel_shape, "%" PRId64);
// TRACE_ARRAY(2, true, output_padding, , n_output_padding, "%" PRId64);
// TRACE_ARRAY(2, true, output_shape, , n_output_shape, "%" PRId64);
TRACE_ARRAY(2, true, pads, , n_pads, "%" PRId64);
TRACE_ARRAY(2, true, strides, , n_strides, "%" PRId64);

Onnx__TensorProto *o_Y = searchOutputByName(ctx, 0);

// TRACE_TENSOR(2, true, o_Y);

/* DO CALCULATION HERE */

const int inputSizeX = i_X->dims[3];
const int inputSizeY = i_X->dims[2];
const int inputChannels = i_X->dims[1];

const float *input = i_X->float_data;
const float *weights = i_W->float_data;

const int kernelSizeX = i_W->dims[3];
const int kernelSizeY = i_W->dims[2];
const int outputChannels = i_W->dims[1];

const int strideX = strides[1];
const int strideY = strides[0];

const int dilationsX = dilations[1];
const int dilationsY = dilations[0];

const int padStartY = pads[0];
const int padStartX = pads[1];

//not used because outputSize is used to test for the padEnd
//const int padEndY = pads[2];
//const int padEndX = pads[3];

const int outputSizeX = o_Y->dims[2];
const int outputSizeY = o_Y->dims[3];

float* output = o_Y->float_data;

//fill with bias
for(int c=0; c<outputChannels; c++) {
float bias = i_B?i_B->float_data[c]:0;
for(int y=0; y<outputSizeY; y++) {
for(int x=0; x<outputSizeX; x++) {
output[calcArrayPos3D(x,y,c,outputSizeX, outputSizeY)] = bias;
}
}
}

//actual transpose convolution
for(int i=0; i < inputChannels; i++) {
for(int c=0; c<outputChannels; c++) {
for(int inputPosY=0; inputPosY<inputSizeY; inputPosY++) {
for(int inputPosX=0; inputPosX<inputSizeX; inputPosX++) {
float _input = input[calcArrayPos3D(inputPosX, inputPosY, i, inputSizeX, inputSizeY)];

for(int kernelPosX=0; kernelPosX<kernelSizeX; kernelPosX++) {
int x = inputPosX*strideX+kernelPosX*dilationsX - padStartX;
if(x < 0 || x >= outputSizeX) {
continue;
}

for(int kernelPosY=0; kernelPosY<kernelSizeY; kernelPosY++) {
int y = inputPosY*strideY+kernelPosY*dilationsY - padStartY;
if(y < 0 || y >= outputSizeY) {
continue;
}

const float _weight = weights[calcArrayPos4D(kernelPosX, kernelPosY, c, i, kernelSizeX, kernelSizeY, outputChannels)];
output[calcArrayPos3D(x, y, c, outputSizeX, outputSizeY)] += _input * _weight;
}
}
}
}
}
}

TRACE_EXIT(1);

/* CHANGE RETURN CODE IF THIS EXECUTER IS VALID */
return OP_OK;
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
//this file was generated by ../../../../../../scripts/onnx_generator/OperatorTemplate.py
#include "operator__ai_onnx__convtranspose__11.h"
#include "tracing.h"
#include "utils.h"

void
free_operator__ai_onnx__convtranspose__11(
node_context *ctx
)
{
TRACE_ENTRY(1);

TRACE_NODE(2, true, ctx->onnx_node);

/* UNCOMMENT AS NEEDED */

// Onnx__TensorProto *i_X = searchInputByName(ctx, 0);
// Onnx__TensorProto *i_W = searchInputByName(ctx, 1);
// Onnx__TensorProto *i_B = searchInputByName(ctx, 2);

// TRACE_TENSOR(2, true, i_X);
// TRACE_TENSOR(2, true, i_W);
// TRACE_TENSOR(2, B, i_B);

// Onnx__AttributeProto *a_auto_pad = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"auto_pad");
// Onnx__AttributeProto *a_dilations = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"dilations");
// Onnx__AttributeProto *a_group = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"group");
// Onnx__AttributeProto *a_kernel_shape = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"kernel_shape");
// Onnx__AttributeProto *a_output_padding = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"output_padding");
// Onnx__AttributeProto *a_output_shape = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"output_shape");
// Onnx__AttributeProto *a_pads = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"pads");
// Onnx__AttributeProto *a_strides = searchAttributeNyName(ctx->onnx_node->n_attribute,ctx->onnx_node->attribute,"strides");

// TRACE_ATTRIBUTE(2, a_auto_pad, a_auto_pad);
// TRACE_ATTRIBUTE(2, a_dilations, a_dilations);
// TRACE_ATTRIBUTE(2, a_group, a_group);
// TRACE_ATTRIBUTE(2, a_kernel_shape, a_kernel_shape);
// TRACE_ATTRIBUTE(2, a_output_padding, a_output_padding);
// TRACE_ATTRIBUTE(2, a_output_shape, a_output_shape);
// TRACE_ATTRIBUTE(2, a_pads, a_pads);
// TRACE_ATTRIBUTE(2, a_strides, a_strides);

Onnx__TensorProto *o_Y = searchOutputByName(ctx, 0);

// TRACE_TENSOR(2, true, o_Y);

/* FREE CONTEXT HERE IF NEEDED */

context_operator__ai_onnx__convtranspose__11 *op_ctx = ctx->executer_context;

// TRACE_VAR(2, true, op_ctx->auto_pad, "\"%s\"");
TRACE_ARRAY(2, true, op_ctx->dilations, , op_ctx->n_dilations, "%" PRId64);
// TRACE_VAR(2, true, op_ctx->group, "%" PRId64);
// TRACE_ARRAY(2, true, op_ctx->kernel_shape, , op_ctx->n_kernel_shape, "%" PRId64);
// TRACE_ARRAY(2, true, op_ctx->output_padding, , op_ctx->n_output_padding, "%" PRId64);
// TRACE_ARRAY(2, true, op_ctx->output_shape, , op_ctx->n_output_shape, "%" PRId64);
TRACE_ARRAY(2, true, op_ctx->pads, , op_ctx->n_pads, "%" PRId64);
TRACE_ARRAY(2, true, op_ctx->strides, , op_ctx->n_strides, "%" PRId64);

// free(op_ctx->auto_pad);
free(op_ctx->dilations);
// free(op_ctx->kernel_shape);
// free(op_ctx->output_padding);
// free(op_ctx->output_shape);
free(op_ctx->pads);
free(op_ctx->strides);

free(op_ctx);


/* FREE OUTPUT DATA_TYPE AND SHAPE HERE */
/* DO NOT FREE THE TENSOR ITSELF */

// freeTensorData(o_Y);
free(o_Y->dims);

TRACE_EXIT(1);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
//this file was generated by ../../../../../../scripts/onnx_generator/OperatorInfo.py
#include "operators/operator_info.h"
#include "operator__ai_onnx__convtranspose__11.h"

/* attributes */
static
operator_info_attribute
attributes[] = {
{
.name = "auto_pad",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRING
},
{
.name = "dilations",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
},
{
.name = "group",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INT
},
{
.name = "kernel_shape",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
},
{
.name = "output_padding",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
},
{
.name = "output_shape",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
},
{
.name = "pads",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
},
{
.name = "strides",
.optional = true,
.type = ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS
}
};

/* input tensors */
static
uint32_t
input_tensor_type_X[] = {
ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16
};

static
uint32_t
input_tensor_type_W[] = {
ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16
};

static
uint32_t
input_tensor_type_B[] = {
ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16
};

static
operator_info_tensor
inputs[] = {
{
.name = "X",
.optional = false,
.variadic = false,
.homogeneous = true,
.constraint = "T",
.n_types = 3,
.types = input_tensor_type_X
},
{
.name = "W",
.optional = false,
.variadic = false,
.homogeneous = true,
.constraint = "T",
.n_types = 3,
.types = input_tensor_type_W
},
{
.name = "B",
.optional = true,
.variadic = true,
.homogeneous = true,
.constraint = "T",
.n_types = 3,
.types = input_tensor_type_B
}
};

/* output tensors */
static
uint32_t
output_tensor_type_Y[] = {
ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT,
ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16
};

static
operator_info_tensor
outputs[] = {
{
.name = "Y",
.optional = false,
.variadic = false,
.homogeneous = true,
.constraint = "T",
.n_types = 3,
.types = output_tensor_type_Y
}
};

/* constraints */
static
operator_info_constraint
constraints[] = {
{ "T" }
};

/* operator info */
operator_info
info_operator__ai_onnx__convtranspose__11 = {
.name = "ConvTranspose",
.range_input = { 2, 3 },
.range_output = { 1, 1 },
.n_attribute = 8,
.attribute = attributes,
.n_input = 3,
.input = inputs,
.n_output = 1,
.output = outputs,
.n_constraint = 1,
.constraint = constraints
};
Loading

0 comments on commit 7108b7b

Please sign in to comment.