Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Fix layer_norm flatten issue (#4031)
Browse files Browse the repository at this point in the history
* fix layernorm flatten issue

* update ut

* checkout output val

* fix style

* apply tolerance
  • Loading branch information
baojun-nervana authored and diyessi committed Dec 10, 2019
1 parent e03289a commit 6fb1c5e
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 4 deletions.
4 changes: 2 additions & 2 deletions src/ngraph/op/fused/layer_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ shared_ptr<Node> op::LayerNorm::copy_with_new_args(const NodeVector& new_args) c
}
}

void op::LayerNorm::validate_and_infer_types()
void op::LayerNorm::pre_validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);

Expand Down Expand Up @@ -509,7 +509,7 @@ shared_ptr<Node> op::LayerNormBackprop::copy_with_new_args(const NodeVector& new
}
}

void op::LayerNormBackprop::validate_and_infer_types()
void op::LayerNormBackprop::pre_validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);

Expand Down
4 changes: 2 additions & 2 deletions src/ngraph/op/fused/layer_norm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ namespace ngraph

virtual NodeVector decompose_op() const override;

void validate_and_infer_types() override;
void pre_validate_and_infer_types() override;

virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
Expand Down Expand Up @@ -121,7 +121,7 @@ namespace ngraph

virtual NodeVector decompose_op() const override;

void validate_and_infer_types() override;
void pre_validate_and_infer_types() override;

virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
Expand Down
1 change: 1 addition & 0 deletions src/ngraph/runtime/plaidml/unit_test.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,7 @@ random_uniform_dynamic_shapes
layer_norm_affine_stats
layer_norm_bprop_affine_stats
layer_norm_bprop_affine
layer_norm_bprop_4d_input

# Another fused op decomposition pass required after the downgrade pass
model_split_equal_parts_default
Expand Down
57 changes: 57 additions & 0 deletions test/backend/layer_norm.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,3 +194,60 @@ NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_affine)
EXPECT_TRUE(test::all_close_f(exp_d_scale, read_vector<float>(d_scale)));
EXPECT_TRUE(test::all_close_f(exp_d_bias, read_vector<float>(d_bias)));
}

NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_4d_input)
{
auto p_data = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto p_delta = make_shared<op::Parameter>(element::f32, Shape{2, 3, 4, 5});
auto p_mean = make_shared<op::Parameter>(element::f32, Shape{2});
auto p_variance = make_shared<op::Parameter>(element::f32, Shape{2});
auto p_scale = make_shared<op::Parameter>(element::f32, Shape{60});
auto lnb = make_shared<op::LayerNormBackprop>(p_data, p_delta, p_mean, p_variance, p_scale);

auto output_data = lnb->output(0);
auto output_scale = lnb->output(1);
auto output_bias = lnb->output(2);

// flatten output_scale
auto output_scale_shape = output_scale.get_shape();
auto flattened_output_scale = make_shared<op::Reshape>(
output_scale, get_default_order(output_scale_shape), Shape{shape_size(output_scale_shape)});

auto f = make_shared<Function>(OutputVector{output_data, flattened_output_scale, output_bias},
ParameterVector{p_data, p_delta, p_mean, p_variance, p_scale});

auto backend = runtime::Backend::create("${BACKEND_NAME}");

// Create tensors for input
auto data = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
auto delta = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
auto mean = backend->create_tensor(element::f32, Shape{2});
auto variance = backend->create_tensor(element::f32, Shape{2});
auto scale = backend->create_tensor(element::f32, Shape{60});
// Fill in input tensors
vector<float> d_input(2 * 3 * 4 * 5, 1);
copy_data(data, d_input);
vector<float> dt_input(2 * 3 * 4 * 5, 1);
copy_data(delta, dt_input);
vector<float> m_input(2, 1);
copy_data(mean, m_input);
vector<float> v_input(2, 1);
copy_data(variance, v_input);
vector<float> s_input(60, 1);
copy_data(scale, s_input);
// Create tensors for output
auto d_data = backend->create_tensor(element::f32, Shape{2, 3, 4, 5});
auto d_scale = backend->create_tensor(element::f32, Shape{60});
auto d_bias = backend->create_tensor(element::f32, Shape{60});

auto handle = backend->compile(f);
handle->call_with_validate({d_data, d_scale, d_bias}, {data, delta, mean, variance, scale});

vector<float> expected_data(120, 0);
vector<float> expected_scale(60, 0);
vector<float> expected_bias(60, 2);

EXPECT_TRUE(test::all_close_f(expected_data, read_vector<float>(d_data), 1e-6f, 1e-6f));
EXPECT_TRUE(test::all_close_f(expected_scale, read_vector<float>(d_scale), 1e-6f, 1e-6f));
EXPECT_TRUE(test::all_close_f(expected_bias, read_vector<float>(d_bias), 1e-6f, 1e-6f));
}

0 comments on commit 6fb1c5e

Please sign in to comment.