Skip to content

Commit

Permalink
Rename getyxz to get_yxz, setyxz to set_yxz
Browse files Browse the repository at this point in the history
  • Loading branch information
Dobiasd committed Oct 7, 2018
1 parent d8df3b6 commit fa8fd77
Show file tree
Hide file tree
Showing 15 changed files with 37 additions and 37 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ list(APPEND CMAKE_MODULE_PATH "${FDEEP_TOP_DIR}/cmake")

include(cmake/hunter.cmake) # default off

project(frugally-deep VERSION 0.5.0)
project(frugally-deep VERSION 0.5.1)

message(STATUS "===( ${PROJECT_NAME} ${PROJECT_VERSION} )===")

Expand Down
2 changes: 1 addition & 1 deletion INSTALL.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ Just add a *conanfile.txt* with frugally-deep as a requirement and chose the gen

```
[requires]
frugally-deep/v0.5.0-p0@dobiasd/stable
frugally-deep/v0.5.1-p0@dobiasd/stable
[generators]
cmake
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ inline im2col_filter_matrix generate_im2col_filter_matrix(
{
for (std::size_t zf = 0; zf < fz; ++zf)
{
b(b_y, b_x++) = filter.getyxz(yf, xf, zf);
b(b_y, b_x++) = filter.get_yxz(yf, xf, zf);
}
}
}
Expand Down Expand Up @@ -94,7 +94,7 @@ inline tensor3 convolve_im2col(
{
for (std::size_t zf = 0; zf < fz; ++zf)
{
a(a_y++, a_x) = in_padded.getyxz(
a(a_y++, a_x) = in_padded.get_yxz(
offset_y + strides_y * y + yf,
offset_x + strides_x * x + xf,
zf);
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/filter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ class filter
{
return m_;
}
float_type getyxz(std::size_t y, size_t x, std::size_t z) const
float_type get_yxz(std::size_t y, size_t x, std::size_t z) const
{
return m_.getyxz(y, x, z);
return m_.get_yxz(y, x, z);
}
float_type get_bias() const
{
Expand Down
6 changes: 3 additions & 3 deletions include/fdeep/import_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -948,7 +948,7 @@ inline void check_test_outputs(float_type epsilon,
for (std::size_t x = 0; x < output.shape().width_; ++x)
{
if (!fplus::is_in_closed_interval_around(epsilon,
target.getyxz(y, x, z), output.getyxz(y, x, z)))
target.get_yxz(y, x, z), output.get_yxz(y, x, z)))
{
const std::string msg =
std::string("test failed: ") +
Expand All @@ -957,8 +957,8 @@ inline void check_test_outputs(float_type epsilon,
fplus::show(y) + "," +
fplus::show(x) + "," +
fplus::show(z) + " " +
"value=" + fplus::show(output.getyxz(y, x, z)) + " "
"target=" + fplus::show(target.getyxz(y, x, z));
"value=" + fplus::show(output.get_yxz(y, x, z)) + " "
"target=" + fplus::show(target.get_yxz(y, x, z));
internal::raise_error(msg);
}
}
Expand Down
2 changes: 1 addition & 1 deletion include/fdeep/layers/average_pooling_2d_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ FDEEP_FORCE_INLINE tensor3 average_pool_2d(
}
}
}
out.setyxz(y, x, z, val / static_cast<float_type>(divisor));
out.set_yxz(y, x, z, val / static_cast<float_type>(divisor));
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/layers/batch_normalization_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,14 @@ class batch_normalization_layer : public layer
{
for (std::size_t x = 0; x < output.shape().width_; ++x)
{
float_type val = input.getyxz(y, x, z);
float_type val = input.get_yxz(y, x, z);
val -= moving_mean_[z];
if (use_gamma)
val *= gamma_[z];
val /= denom;
if (use_beta)
val += beta_[z];
output.setyxz(y, x, z, val);
output.set_yxz(y, x, z, val);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/layers/global_average_pooling_2d_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,10 @@ class global_average_pooling_2d_layer : public global_pooling_2d_layer
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
val += in.getyxz(y, x, z);
val += in.get_yxz(y, x, z);
}
}
out.setyxz(0, 0, z, val /
out.set_yxz(0, 0, z, val /
static_cast<float_type>(in.shape().without_depth().area()));
}
return out;
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/layers/global_max_pooling_2d_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ class global_max_pooling_2d_layer : public global_pooling_2d_layer
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
val = std::max(val, in.getyxz(y, x, z));
val = std::max(val, in.get_yxz(y, x, z));
}
}
out.setyxz(0, 0, z, val);
out.set_yxz(0, 0, z, val);
}
return out;
}
Expand Down
2 changes: 1 addition & 1 deletion include/fdeep/layers/max_pooling_2d_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ FDEEP_FORCE_INLINE tensor3 max_pool_2d(
val = std::max(val, current);
}
}
out.setyxz(y, x, z, val);
out.set_yxz(y, x, z, val);
}
}
}
Expand Down
8 changes: 4 additions & 4 deletions include/fdeep/layers/prelu_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ class prelu_layer : public layer
{
for (std::size_t x = 0; x < out.shape().width_; ++x)
{
if (input[0].getyxz(y, x, z) > 0)
if (input[0].get_yxz(y, x, z) > 0)
{
out.setyxz(y, x, z, input[0].getyxz(y, x, z));
out.set_yxz(y, x, z, input[0].get_yxz(y, x, z));
}
else
{
Expand All @@ -72,8 +72,8 @@ class prelu_layer : public layer
y_temp * width * depth +
x_temp * depth +
z_temp;
out.setyxz(y, x, z, (*alpha_)[pos] *
input[0].getyxz(y, x, z));
out.set_yxz(y, x, z, (*alpha_)[pos] *
input[0].get_yxz(y, x, z));
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions include/fdeep/layers/softmax_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class softmax_layer : public activation_layer
float_type sum = 0.0f;
for (size_t z_class = 0; z_class < input.shape().depth_; ++z_class)
{
sum += output.getyxz(y, x, z_class);
sum += output.get_yxz(y, x, z_class);
}
assertion(sum != 0 && !std::isinf(sum), "Invalid divisor in softmax layer. Try using\n"
"#define FDEEP_FLOAT_TYPE double\n"
Expand All @@ -51,7 +51,7 @@ class softmax_layer : public activation_layer
// Divide the unnormalized values of each pixel by the stacks sum.
for (size_t z_class = 0; z_class < input.shape().depth_; ++z_class)
{
output.setyxz(y, x, z_class, output.getyxz(y, x, z_class) / sum);
output.set_yxz(y, x, z_class, output.get_yxz(y, x, z_class) / sum);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion include/fdeep/layers/upsampling_2d_layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class upsampling_2d_layer : public layer
for (std::size_t x = 0; x < out_vol.shape().width_; ++x)
{
std::size_t x_in = x / scale_factor_.width_;
out_vol.setyxz(y, x, z, in_vol.getyxz(y_in, x_in, z));
out_vol.set_yxz(y, x, z, in_vol.get_yxz(y_in, x_in, z));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion include/fdeep/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class model
const auto output_shape = outputs.front().shape();
internal::assertion(output_shape.volume() == 1,
"invalid output shape");
return outputs.front().getyxz(0, 0, 0);
return outputs.front().get_yxz(0, 0, 0);
}

const std::vector<shape_hwc_variable>& get_input_shapes() const
Expand Down
24 changes: 12 additions & 12 deletions include/fdeep/tensor3.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class tensor3
{
return (*values_)[idx(pos)];
}
float_type getyxz(std::size_t y, std::size_t x, std::size_t z) const
float_type get_yxz(std::size_t y, std::size_t x, std::size_t z) const
{
return get(tensor3_pos_yxz(y, x, z));
}
Expand All @@ -70,7 +70,7 @@ class tensor3
{
(*values_)[idx(pos)] = value;
}
void setyxz(std::size_t y, std::size_t x, std::size_t z, float_type value)
void set_yxz(std::size_t y, std::size_t x, std::size_t z, float_type value)
{
set(tensor3_pos_yxz(y, x, z), value);
}
Expand Down Expand Up @@ -135,7 +135,7 @@ inline tensor3 tensor3_from_depth_slices(const std::vector<tensor2>& ms)
{
for (std::size_t x = 0; x < m.shape().width_; ++x)
{
m.setyxz(y, x, z, ms[z].get(y, x));
m.set_yxz(y, x, z, ms[z].get(y, x));
}
}
}
Expand All @@ -157,7 +157,7 @@ inline std::vector<tensor2> tensor3_to_tensor_2_depth_slices(const tensor3& m)
{
for (std::size_t x = 0; x < m.shape().width_; ++x)
{
ms[z].set(y, x, m.getyxz(y, x, z));
ms[z].set(y, x, m.get_yxz(y, x, z));
}
}
}
Expand All @@ -183,7 +183,7 @@ inline std::pair<tensor3_pos_yxz, tensor3_pos_yxz> tensor3_min_max_pos(
{
for (std::size_t x = 0; x < vol.shape().width_; ++x)
{
auto current_value = vol.getyxz(y, x, z);
auto current_value = vol.get_yxz(y, x, z);
if (current_value > value_max)
{
result_max = tensor3_pos_yxz(y, x, z);
Expand Down Expand Up @@ -217,7 +217,7 @@ inline tensor3 tensor3_swap_depth_and_height(const tensor3& in)
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
result.setyxz(z, x, y, in.getyxz(y, x, z));
result.set_yxz(z, x, y, in.get_yxz(y, x, z));
}
}
}
Expand All @@ -236,7 +236,7 @@ inline tensor3 tensor3_swap_depth_and_width(const tensor3& in)
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
result.setyxz(y, z, x, in.getyxz(y, x, z));
result.set_yxz(y, z, x, in.get_yxz(y, x, z));
}
}
}
Expand Down Expand Up @@ -310,7 +310,7 @@ inline tensor3 flatten_tensor3(const tensor3& vol)
{
for (std::size_t z = 0; z < vol.shape().depth_; ++z)
{
values.push_back(vol.getyxz(y, x, z));
values.push_back(vol.get_yxz(y, x, z));
}
}
}
Expand All @@ -332,7 +332,7 @@ inline tensor3 pad_tensor3(float_type val,
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
result.setyxz(y + top_pad, x + left_pad, z, in.getyxz(y, x, z));
result.set_yxz(y + top_pad, x + left_pad, z, in.get_yxz(y, x, z));
}
}
}
Expand All @@ -354,7 +354,7 @@ inline tensor3 crop_tensor3(
{
for (std::size_t x = 0; x < result.shape().width_; ++x)
{
result.setyxz(y, x, z, in.getyxz(y + top_crop, x + left_crop, z));
result.set_yxz(y, x, z, in.get_yxz(y + top_crop, x + left_crop, z));
}
}
}
Expand All @@ -375,11 +375,11 @@ inline tensor3 dilate_tensor3(const shape_hw& dilation_rate, const tensor3& in)
{
for (std::size_t x = 0; x < in.shape().width_; ++x)
{
result.setyxz(
result.set_yxz(
y * dilation_rate.height_,
x * dilation_rate.width_,
z,
in.getyxz(y, x, z));
in.get_yxz(y, x, z));
}
}
}
Expand Down

0 comments on commit fa8fd77

Please sign in to comment.