Skip to content

Commit

Permalink
Merge pull request #293 from Dobiasd/new-activations
Browse files Browse the repository at this point in the history
Add activations Exponential, Gelu, and Softsign
  • Loading branch information
Dobiasd authored Aug 7, 2021
2 parents 22ae914 + e98c33d commit c2e7b1a
Show file tree
Hide file tree
Showing 11 changed files with 150 additions and 48 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ list(APPEND CMAKE_MODULE_PATH "${FDEEP_TOP_DIR}/cmake")

include(cmake/hunter.cmake) # default off

project(frugally-deep VERSION 0.15.8)
project(frugally-deep VERSION 0.15.9)

message(STATUS "===( ${PROJECT_NAME} ${PROJECT_VERSION} )===")

Expand Down
2 changes: 1 addition & 1 deletion INSTALL.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ Just add a *conanfile.txt* with frugally-deep as a requirement and chose the gen

```
[requires]
frugally-deep/v0.15.8-p0@dobiasd/stable
frugally-deep/v0.15.9-p0@dobiasd/stable
[generators]
cmake
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,9 @@ Layer types typically used in image recognition/generation are supported, making
* `MaxPooling1D/2D`, `GlobalMaxPooling1D/2D`
* `ELU`, `LeakyReLU`, `ReLU`, `SeLU`, `PReLU`
* `Sigmoid`, `Softmax`, `Softplus`, `Tanh`
* `Exponential`, `GELU`, `Softsign`
* `UpSampling1D/2D`
* `Reshape`, `Permute`
* `Reshape`, `Permute`, `RepeatVector`
* `Embedding`


Expand Down
42 changes: 0 additions & 42 deletions include/fdeep/fdeep.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,48 +17,6 @@
#include "fdeep/tensor_shape.hpp"
#include "fdeep/tensor_shape_variable.hpp"
#include "fdeep/recurrent_ops.hpp"
#include "fdeep/layers/add_layer.hpp"
#include "fdeep/layers/average_layer.hpp"
#include "fdeep/layers/average_pooling_2d_layer.hpp"
#include "fdeep/layers/batch_normalization_layer.hpp"
#include "fdeep/layers/concatenate_layer.hpp"
#include "fdeep/layers/conv_2d_layer.hpp"
#include "fdeep/layers/cropping_2d_layer.hpp"
#include "fdeep/layers/dense_layer.hpp"
#include "fdeep/layers/depthwise_conv_2d_layer.hpp"
#include "fdeep/layers/elu_layer.hpp"
#include "fdeep/layers/flatten_layer.hpp"
#include "fdeep/layers/global_average_pooling_2d_layer.hpp"
#include "fdeep/layers/global_max_pooling_2d_layer.hpp"
#include "fdeep/layers/hard_sigmoid_layer.hpp"
#include "fdeep/layers/input_layer.hpp"
#include "fdeep/layers/layer.hpp"
#include "fdeep/layers/leaky_relu_layer.hpp"
#include "fdeep/layers/prelu_layer.hpp"
#include "fdeep/layers/linear_layer.hpp"
#include "fdeep/layers/max_pooling_2d_layer.hpp"
#include "fdeep/layers/maximum_layer.hpp"
#include "fdeep/layers/model_layer.hpp"
#include "fdeep/layers/multiply_layer.hpp"
#include "fdeep/layers/pooling_2d_layer.hpp"
#include "fdeep/layers/relu_layer.hpp"
#include "fdeep/layers/repeat_vector_layer.hpp"
#include "fdeep/layers/reshape_layer.hpp"
#include "fdeep/layers/separable_conv_2d_layer.hpp"
#include "fdeep/layers/selu_layer.hpp"
#include "fdeep/layers/sigmoid_layer.hpp"
#include "fdeep/layers/softmax_layer.hpp"
#include "fdeep/layers/softplus_layer.hpp"
#include "fdeep/layers/subtract_layer.hpp"
#include "fdeep/layers/swish_layer.hpp"
#include "fdeep/layers/tanh_layer.hpp"
#include "fdeep/layers/upsampling_1d_layer.hpp"
#include "fdeep/layers/upsampling_2d_layer.hpp"
#include "fdeep/layers/zero_padding_2d_layer.hpp"
#include "fdeep/layers/lstm_layer.hpp"
#include "fdeep/layers/gru_layer.hpp"
#include "fdeep/layers/bidirectional_layer.hpp"
#include "fdeep/layers/time_distributed_layer.hpp"

#include "fdeep/import_model.hpp"

Expand Down
30 changes: 29 additions & 1 deletion include/fdeep/import_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#endif

#include "fdeep/common.hpp"

#include "fdeep/layers/add_layer.hpp"
#include "fdeep/layers/average_layer.hpp"
#include "fdeep/layers/average_pooling_2d_layer.hpp"
Expand All @@ -37,7 +38,9 @@
#include "fdeep/layers/dense_layer.hpp"
#include "fdeep/layers/depthwise_conv_2d_layer.hpp"
#include "fdeep/layers/elu_layer.hpp"
#include "fdeep/layers/exponential_layer.hpp"
#include "fdeep/layers/flatten_layer.hpp"
#include "fdeep/layers/gelu_layer.hpp"
#include "fdeep/layers/global_average_pooling_1d_layer.hpp"
#include "fdeep/layers/global_max_pooling_1d_layer.hpp"
#include "fdeep/layers/global_average_pooling_2d_layer.hpp"
Expand Down Expand Up @@ -65,6 +68,7 @@
#include "fdeep/layers/sigmoid_layer.hpp"
#include "fdeep/layers/softmax_layer.hpp"
#include "fdeep/layers/softplus_layer.hpp"
#include "fdeep/layers/softsign_layer.hpp"
#include "fdeep/layers/subtract_layer.hpp"
#include "fdeep/layers/swish_layer.hpp"
#include "fdeep/layers/tanh_layer.hpp"
Expand Down Expand Up @@ -814,6 +818,27 @@ inline activation_layer_ptr create_selu_layer(
return std::make_shared<selu_layer>(name);
}

inline activation_layer_ptr create_exponential_layer(
const get_param_f&, const nlohmann::json&,
const std::string& name)
{
return std::make_shared<exponential_layer>(name);
}

inline activation_layer_ptr create_gelu_layer(
const get_param_f&, const nlohmann::json&,
const std::string& name)
{
return std::make_shared<gelu_layer>(name);
}

inline activation_layer_ptr create_softsign_layer(
const get_param_f&, const nlohmann::json&,
const std::string& name)
{
return std::make_shared<softsign_layer>(name);
}

inline activation_layer_ptr create_leaky_relu_layer(
const get_param_f&, const nlohmann::json& data,
const std::string& name)
Expand Down Expand Up @@ -896,7 +921,10 @@ inline activation_layer_ptr create_activation_layer_type_name(
{"hard_sigmoid", create_hard_sigmoid_layer},
{"relu", create_relu_layer},
{"selu", create_selu_layer},
{"elu", create_elu_layer}
{"elu", create_elu_layer},
{"exponential", create_exponential_layer},
{"gelu", create_gelu_layer},
{"softsign", create_softsign_layer}
};

return fplus::throw_on_nothing(
Expand Down
32 changes: 32 additions & 0 deletions include/fdeep/layers/exponential_layer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright 2016, Tobias Hermann.
// https://github.com/Dobiasd/frugally-deep
// Distributed under the MIT License.
// (See accompanying LICENSE file or at
// https://opensource.org/licenses/MIT)

#pragma once

#include "fdeep/layers/activation_layer.hpp"
#include "fdeep/recurrent_ops.hpp"

#include <limits>
#include <string>

namespace fdeep { namespace internal
{

class exponential_layer : public activation_layer
{
public:
explicit exponential_layer(const std::string& name)
: activation_layer(name)
{
}
protected:
tensor transform_input(const tensor& in_vol) const override
{
return transform_tensor(exponential_activation, in_vol);
}
};

} } // namespace fdeep, namespace internal
32 changes: 32 additions & 0 deletions include/fdeep/layers/gelu_layer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright 2016, Tobias Hermann.
// https://github.com/Dobiasd/frugally-deep
// Distributed under the MIT License.
// (See accompanying LICENSE file or at
// https://opensource.org/licenses/MIT)

#pragma once

#include "fdeep/layers/activation_layer.hpp"
#include "fdeep/recurrent_ops.hpp"

#include <limits>
#include <string>

namespace fdeep { namespace internal
{

class gelu_layer : public activation_layer
{
public:
explicit gelu_layer(const std::string& name)
: activation_layer(name)
{
}
protected:
tensor transform_input(const tensor& in_vol) const override
{
return transform_tensor(gelu_activation, in_vol);
}
};

} } // namespace fdeep, namespace internal
32 changes: 32 additions & 0 deletions include/fdeep/layers/softsign_layer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright 2016, Tobias Hermann.
// https://github.com/Dobiasd/frugally-deep
// Distributed under the MIT License.
// (See accompanying LICENSE file or at
// https://opensource.org/licenses/MIT)

#pragma once

#include "fdeep/layers/activation_layer.hpp"
#include "fdeep/recurrent_ops.hpp"

#include <limits>
#include <string>

namespace fdeep { namespace internal
{

class softsign_layer : public activation_layer
{
public:
explicit softsign_layer(const std::string& name)
: activation_layer(name)
{
}
protected:
tensor transform_input(const tensor& in_vol) const override
{
return transform_tensor(softsign_activation, in_vol);
}
};

} } // namespace fdeep, namespace internal
17 changes: 17 additions & 0 deletions include/fdeep/recurrent_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,23 @@ inline float_type selu_activation(float_type x)
return scale * (x >= 0 ? x : alpha * (std::exp(x) - 1));
}

inline float_type exponential_activation(float_type x)
{
return static_cast<float_type>(std::exp(x));
}

inline float_type gelu_activation(float_type x)
{
return static_cast<float_type>(0.5) * x *
(static_cast<float_type>(1) +
static_cast<float_type>(std::erf(x / std::sqrt(static_cast<float_type>(2)))));
}

inline float_type softsign_activation(float_type x)
{
return x / (std::abs(x) + static_cast<float_type>(1));
}

inline float_type elu_activation(float_type x)
{
return x >= 0 ? x : std::exp(x) - 1;
Expand Down
4 changes: 3 additions & 1 deletion keras_export/generate_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,9 +361,11 @@ def get_test_model_exhaustive():
Activation('sigmoid')(inputs[25]),
Activation('softplus')(inputs[25]),
Activation('softmax')(inputs[25]),
Activation('softmax')(inputs[25]),
Activation('relu')(inputs[25]),
Activation('swish')(inputs[25]),
Activation('exponential')(inputs[25]),
Activation('gelu')(inputs[25]),
Activation('softsign')(inputs[25]),
LeakyReLU()(inputs[25]),
ELU()(inputs[25]),
PReLU()(inputs[24]),
Expand Down
2 changes: 1 addition & 1 deletion test/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ WORKDIR /
RUN git clone -b 'v0.2.15-p0' --single-branch --depth 1 https://github.com/Dobiasd/FunctionalPlus && cd FunctionalPlus && mkdir -p build && cd build && cmake .. && make && make install
RUN git clone -b '3.3.9' --single-branch --depth 1 https://gitlab.com/libeigen/eigen.git && cd eigen && mkdir -p build && cd build && cmake .. && make && make install && ln -s /usr/local/include/eigen3/Eigen /usr/local/include/Eigen
RUN git clone -b 'v3.9.1' --single-branch --depth 1 https://github.com/nlohmann/json && cd json && mkdir -p build && cd build && cmake -DBUILD_TESTING=OFF .. && make && make install
RUN git clone -b 'v0.15.8-p0' --single-branch --depth 1 https://github.com/Dobiasd/frugally-deep && cd frugally-deep && mkdir -p build && cd build && cmake .. && make && make install
RUN git clone -b 'v0.15.9-p0' --single-branch --depth 1 https://github.com/Dobiasd/frugally-deep && cd frugally-deep && mkdir -p build && cd build && cmake .. && make && make install

# To have downloaded the Keras models already
RUN pip install numpy --upgrade
Expand Down

0 comments on commit c2e7b1a

Please sign in to comment.