Skip to content

Commit 620e864

Browse files
committed
Format code
1 parent b22bcb2 commit 620e864

File tree

3 files changed

+7
-2
lines changed

3 files changed

+7
-2
lines changed

include/fdeep/layers/batch_normalization_layer.hpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,11 @@ class batch_normalization_layer : public layer
4848
tensors apply_impl(const tensors& inputs) const override
4949
{
5050
const auto input = single_tensor_from_tensors(inputs);
51+
5152
std::vector<std::size_t> dims(5, 1);
5253
dims[rank_aligned_axis_to_absolute_axis(input.shape().rank(), axis_) - 1] = moving_mean_->size();
5354
const tensor_shape params_shape = create_tensor_shape_from_dims(dims);
55+
5456
return {batch_normalization(
5557
input,
5658
broadcast(tensor(params_shape, moving_mean_), input.shape()),

include/fdeep/layers/layer_normalization_layer.hpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,9 @@ class layer_normalization_layer : public layer
3737

3838
tensors apply_impl(const tensors& inputs) const override
3939
{
40-
// https://github.com/keras-team/keras/blob/v2.14.0/keras/layers/normalization/layer_normalization.py#L291-L304
4140
const auto& input = single_tensor_from_tensors(inputs);
41+
42+
// https://github.com/keras-team/keras/blob/v2.14.0/keras/layers/normalization/layer_normalization.py#L291-L304
4243
const auto& input_moments = moments(input, axes_);
4344
const auto& mean = input_moments.first;
4445
const auto& variance = input_moments.second;
@@ -52,6 +53,7 @@ class layer_normalization_layer : public layer
5253
dims[pos] = input_shape_dimensions[pos];
5354
}
5455
const tensor_shape params_shape = create_tensor_shape_from_dims(dims);
56+
5557
return {batch_normalization(
5658
input,
5759
mean,

include/fdeep/tensor.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -814,7 +814,9 @@ inline tensor broadcast(const tensor& t, const tensor_shape& shape)
814814
(t.shape().width_ == 1 || t.shape().width_ == shape.width_) &&
815815
(t.shape().depth_ == 1 || t.shape().depth_ == shape.depth_),
816816
"Invalid shapes for combining tensors.");
817+
817818
tensor out_tensor = tensor(shape, static_cast<float_type>(0));
819+
818820
loop_over_all_dims(out_tensor.shape(), [&](
819821
std::size_t dim5, std::size_t dim4, std::size_t y, std::size_t x, std::size_t z)
820822
{
@@ -907,7 +909,6 @@ inline tensor batch_normalization(
907909
transform_tensor(
908910
fplus::add_to(variance_epsilon), variance)),
909911
scale);
910-
911912
return add_tensors(
912913
mult_tensors(x, inv),
913914
subtract_tensors(

0 commit comments

Comments
 (0)