From 24f7615920c4ca169cd6f8610585dcbf7130ed7b Mon Sep 17 00:00:00 2001 From: njellinas Date: Fri, 26 Jul 2019 11:26:48 +0300 Subject: [PATCH] Add masked MSE loss Signed-off-by: njellinas --- loss_function.py | 14 +++++++++++--- train.py | 4 ++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/loss_function.py b/loss_function.py index 99cae952b..8547e439f 100644 --- a/loss_function.py +++ b/loss_function.py @@ -1,11 +1,19 @@ from torch import nn +import torch.nn.functional as F class Tacotron2Loss(nn.Module): def __init__(self): super(Tacotron2Loss, self).__init__() - def forward(self, model_output, targets): + @staticmethod + def masked_l2_loss(out, target, lengths): + num_not_padded = lengths.sum() * out.size(1) + loss = F.mse_loss(out, target, reduction="sum") + loss = loss / num_not_padded + return loss + + def forward(self, model_output, targets, output_lengths): mel_target, gate_target = targets[0], targets[1] mel_target.requires_grad = False gate_target.requires_grad = False @@ -13,7 +21,7 @@ def forward(self, model_output, targets): mel_out, mel_out_postnet, gate_out, _ = model_output gate_out = gate_out.view(-1, 1) - mel_loss = nn.MSELoss()(mel_out, mel_target) + \ - nn.MSELoss()(mel_out_postnet, mel_target) + mel_loss = self.masked_l2_loss(mel_out, mel_target, output_lengths) + \ + self.masked_l2_loss(mel_out_postnet, mel_target, output_lengths) gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target) return mel_loss + gate_loss diff --git a/train.py b/train.py index e93917bbc..40e4203ed 100644 --- a/train.py +++ b/train.py @@ -132,7 +132,7 @@ def validate(model, criterion, valset, iteration, batch_size, n_gpus, for i, batch in enumerate(val_loader): x, y = model.parse_batch(batch) y_pred = model(x) - loss = criterion(y_pred, y) + loss = criterion(y_pred, y, x[-1]) if distributed_run: reduced_val_loss = reduce_tensor(loss.data, n_gpus).item() else: @@ -214,7 +214,7 @@ def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus, x, y = model.parse_batch(batch) y_pred = model(x) - loss = criterion(y_pred, y) + loss = criterion(y_pred, y, x[-1]) if hparams.distributed_run: reduced_loss = reduce_tensor(loss.data, n_gpus).item() else: