From 4ebdee27c75dbf9cb0eb76f628542a1ac5d9f828 Mon Sep 17 00:00:00 2001 From: Alexander Yermolovich Date: Fri, 5 Apr 2019 17:46:51 -0700 Subject: [PATCH 1/2] ONNX doesn't support extract image patch, but does support spaceToDepth. For this special case they are equivalent. --- darkflow/net/ops/convolution.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/darkflow/net/ops/convolution.py b/darkflow/net/ops/convolution.py index 167b0fd78..d25626eeb 100644 --- a/darkflow/net/ops/convolution.py +++ b/darkflow/net/ops/convolution.py @@ -24,8 +24,8 @@ def _forward(self): def forward(self): inp = self.inp.out s = self.lay.stride - self.out = tf.extract_image_patches( - inp, [1,s,s,1], [1,s,s,1], [1,1,1,1], 'VALID') + self.out = tf.space_to_depth( + inp, s, 'VALID') def speak(self): args = [self.lay.stride] * 2 From 793f264b8cc586f590559ad8eb1f403e457473b8 Mon Sep 17 00:00:00 2001 From: Alexander Yermolovich Date: Wed, 25 Sep 2019 12:37:19 -0700 Subject: [PATCH 2/2] Changed imagePatch to SpaceToDepth, added fusedBatch. --- darkflow/net/ops/convolution.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/darkflow/net/ops/convolution.py b/darkflow/net/ops/convolution.py index d25626eeb..0aee76a67 100644 --- a/darkflow/net/ops/convolution.py +++ b/darkflow/net/ops/convolution.py @@ -2,6 +2,7 @@ from .baseop import BaseOp import tensorflow as tf import numpy as np +import math class reorg(BaseOp): def _forward(self): @@ -24,8 +25,9 @@ def _forward(self): def forward(self): inp = self.inp.out s = self.lay.stride - self.out = tf.space_to_depth( - inp, s, 'VALID') + #working higher mAP + self.out = tf.extract_image_patches( + inp, [1,s,s,1], [1,s,s,1], [1,1,1,1], 'VALID') def speak(self): args = [self.lay.stride] * 2 @@ -75,17 +77,22 @@ def forward(self): def batchnorm(self, layer, inp): if not self.var: - temp = (inp - layer.w['moving_mean']) - temp /= (np.sqrt(layer.w['moving_variance']) + 1e-5) - temp *= layer.w['gamma'] - return temp + tensor, mean, variance = tf.nn.fused_batch_norm(inp, + layer.w['gamma'], + np.zeros([np.size(layer.w['gamma'])], dtype=np.float32), + mean=layer.w['moving_mean'], + variance = layer.w['moving_variance'], + epsilon= 1e-5, + is_training=False) + return tensor; else: args = dict({ 'center' : False, 'scale' : True, 'epsilon': 1e-5, 'scope' : self.scope, 'updates_collections' : None, 'is_training': layer.h['is_training'], - 'param_initializers': layer.w + 'param_initializers': layer.w, + 'fused' : True }) return slim.batch_norm(inp, **args)