From 2356c6232c9ccad97ae4d2c88dfecac26e3de25d Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Aug 2016 19:57:33 +0200 Subject: [PATCH] Fix adam optimizer According to the Adam Paper-v8 https://arxiv.org/pdf/1412.6980v8.pdf Algorithm 1 (p. 2). Behaves significantly better when running the trainer demo on MNIST (even better when changing adam learning rate to recommended 0.001) . --- src/convnet_trainers.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/convnet_trainers.js b/src/convnet_trainers.js index 760efe4c..b550e703 100644 --- a/src/convnet_trainers.js +++ b/src/convnet_trainers.js @@ -96,8 +96,8 @@ // adam update gsumi[j] = gsumi[j] * this.beta1 + (1- this.beta1) * gij; // update biased first moment estimate xsumi[j] = xsumi[j] * this.beta2 + (1-this.beta2) * gij * gij; // update biased second moment estimate - var biasCorr1 = gsumi[j] * (1 - Math.pow(this.beta1, this.k)); // correct bias first moment estimate - var biasCorr2 = xsumi[j] * (1 - Math.pow(this.beta2, this.k)); // correct bias second moment estimate + var biasCorr1 = gsumi[j] / (1 - Math.pow(this.beta1, this.k)); // correct bias first moment estimate + var biasCorr2 = xsumi[j] / (1 - Math.pow(this.beta2, this.k)); // correct bias second moment estimate var dx = - this.learning_rate * biasCorr1 / (Math.sqrt(biasCorr2) + this.eps); p[j] += dx; } else if(this.method === 'adagrad') {