diff --git a/CMakeLists.txt b/CMakeLists.txt index 892b3c8c881..2c41139855e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -164,7 +164,7 @@ option(NGRAPH_UNIT_TEST_ENABLE "Control the building of unit tests" TRUE) option(NGRAPH_DOC_BUILD_ENABLE "Control the building of documentation" FALSE) option(NGRAPH_TOOLS_ENABLE "Control the building of tool" TRUE) option(NGRAPH_CPU_ENABLE "Control the building of the CPU backend" TRUE) -option(NGRAPH_USE_LEGACY_MKLDNN "Use legacy MKLDNN" TRUE) +option(NGRAPH_USE_LEGACY_MKLDNN "Use legacy MKLDNN" FALSE) option(NGRAPH_MLIR_ENABLE "Control the building of MLIR backend" FALSE) option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backend" TRUE) option(NGRAPH_NOP_ENABLE "Control the building of the NOP backend" TRUE) @@ -621,6 +621,7 @@ endif() add_subdirectory(src) add_subdirectory(test) +add_subdirectory(doc/examples) if (NGRAPH_DOC_BUILD_ENABLE) add_subdirectory(doc) diff --git a/README.md b/README.md index e23efb9a862..a18a42fb9ee 100644 --- a/README.md +++ b/README.md @@ -45,8 +45,8 @@ framework and deploying to a variety of hardware targets. We strongly believe in providing freedom, performance, and ease-of-use to AI developers. The diagram below shows deep learning frameworks and hardware targets -supported by nGraph. NNP-L and NNP-I in the diagram refer to Intel's next generation -deep learning accelerators: Intel® Nervana™ Neural Network Processor for Learning and +supported by nGraph. NNP-T and NNP-I in the diagram refer to Intel's next generation +deep learning accelerators: Intel® Nervana™ Neural Network Processor for Training and Inference respectively. Future plans for supporting addtional deep learning frameworks and backends are outlined in the [ecosystem] section. diff --git a/cmake/external_mkldnn_v1.cmake b/cmake/external_mkldnn_v1.cmake index fb77250c8cc..03d1a6bb48b 100644 --- a/cmake/external_mkldnn_v1.cmake +++ b/cmake/external_mkldnn_v1.cmake @@ -18,10 +18,12 @@ include(ExternalProject) # Includes blas 3.8.0 in mkldnn set(NGRAPH_MKLDNN_SHORT_VERSION 1) -set(NGRAPH_MKLDNN_FULL_VERSION 1.0.0.0) -set(NGRAPH_MKLDNN_VERSION "v1.0") -set(NGRAPH_MKLDNN_SUB_VERSION "2019.0.5.20190502") -set(NGRAPH_MKLDNN_GIT_TAG "553c23f") +set(NGRAPH_MKLDNN_FULL_VERSION 1.0.4.0) +set(NGRAPH_MKLDNN_MKLML_ASSET_VERSION "v0.21") +set(NGRAPH_MKLDNN_VERSION "v1.0.4") +set(NGRAPH_MKLDNN_MKLML_VERSION "2019.0.5.20190502") +set(NGRAPH_MKLDNN_MKLML_WIN32_VERSION "2020.0.20190813") +set(NGRAPH_MKLDNN_GIT_TAG "v1.0.4") #------------------------------------------------------------------------------ # Fetch and install MKL-DNN @@ -88,8 +90,9 @@ endif() # This section sets up MKL as an external project to be used later by MKLDNN -set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/v0.19-rc/") -set(MKLVERSION ${NGRAPH_MKLDNN_SUB_VERSION}) +set(MKLURLROOT "https://github.com/intel/mkl-dnn/releases/download/${NGRAPH_MKLDNN_MKLML_ASSET_VERSION}/") +set(MKLVERSION ${NGRAPH_MKLDNN_MKLML_VERSION}) +set(MKLWIN32VERSION ${NGRAPH_MKLDNN_MKLML_WIN32_VERSION}) if (LINUX) set(MKLPACKAGE "mklml_lnx_${MKLVERSION}.tgz") set(MKL_SHA1_HASH 6ab490f0b358124338d04ee9383c3cbc536969d8) @@ -97,8 +100,8 @@ elseif (APPLE) set(MKLPACKAGE "mklml_mac_${MKLVERSION}.tgz") set(MKL_SHA1_HASH a1c42af04f990b0e515a1c31946424b2e68fccc9) elseif (WIN32) - set(MKLPACKAGE "mklml_win_${MKLVERSION}.zip") - set(MKL_SHA1_HASH 9d6ff4d5a486689338158093e96c43ee442b65f0) + set(MKLPACKAGE "mklml_win_${MKLWIN32VERSION}.zip") + set(MKL_SHA1_HASH cc117093e658d50a8e4e3d1cf192c300b6bac0fc) endif() set(MKL_LIBS ${MKLML_LIB} ${OMP_LIB}) set(MKLURL ${MKLURLROOT}${MKLPACKAGE}) diff --git a/cmake/mkldnn_v1.patch b/cmake/mkldnn_v1.patch index fce164fdcfb..38bcbac92cb 100644 --- a/cmake/mkldnn_v1.patch +++ b/cmake/mkldnn_v1.patch @@ -63,18 +63,18 @@ index 99970659..ef88a0a7 100644 # Compilation happens with OpenMP to enable `#pragma omp simd` # but during linkage OpenMP dependency should be avoided diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index 60bb0c94..cc3fc9d6 100644 +index f99ec31ce..b3c1d9bb8 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -73,8 +73,10 @@ endif() add_library(${LIB_NAME} ${MKLDNN_LIBRARY_TYPE} ${HEADERS} ${${LIB_NAME}_SUB_OBJS}) --set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${PROJECT_VERSION}.0") --set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "0") +-set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${MKLDNN_VERSION_MAJOR}.${MKLDNN_VERSION_MINOR}") +-set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "${MKLDNN_VERSION_MAJOR}") +if(MKLDNN_LIB_VERSIONING_ENABLE) -+ set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${PROJECT_VERSION}.0") -+ set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "0") ++ set_property(TARGET ${LIB_NAME} PROPERTY VERSION "${MKLDNN_VERSION_MAJOR}.${MKLDNN_VERSION_MINOR}") ++ set_property(TARGET ${LIB_NAME} PROPERTY SOVERSION "${MKLDNN_VERSION_MAJOR}") +endif() set_property(TARGET ${LIB_NAME} PROPERTY PUBLIC_HEADER ${HEADERS}) diff --git a/doc/examples/CMakeLists.txt b/doc/examples/CMakeLists.txt index be3f1717cec..3dcffd1ef7e 100644 --- a/doc/examples/CMakeLists.txt +++ b/doc/examples/CMakeLists.txt @@ -17,6 +17,7 @@ if (NGRAPH_CPU_ENABLE) add_subdirectory(abc) add_subdirectory(abc_operator) + add_subdirectory(dynamic_tensor) add_subdirectory(mnist_mlp) add_subdirectory(update) endif() diff --git a/doc/examples/abc/abc.cpp b/doc/examples/abc/abc.cpp index d22226b938c..0a4b58a2953 100644 --- a/doc/examples/abc/abc.cpp +++ b/doc/examples/abc/abc.cpp @@ -50,9 +50,9 @@ int main() float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}}; float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}}; - t_a->write(&v_a, 0, sizeof(v_a)); - t_b->write(&v_b, 0, sizeof(v_b)); - t_c->write(&v_c, 0, sizeof(v_c)); + t_a->write(&v_a, sizeof(v_a)); + t_b->write(&v_b, sizeof(v_b)); + t_c->write(&v_c, sizeof(v_c)); // Invoke the function auto exec = backend->compile(f); @@ -60,7 +60,7 @@ int main() // Get the result float r[2][3]; - t_result->read(&r, 0, sizeof(r)); + t_result->read(&r, sizeof(r)); std::cout << "[" << std::endl; for (size_t i = 0; i < s[0]; ++i) diff --git a/doc/examples/abc_operator/abc_operator.cpp b/doc/examples/abc_operator/abc_operator.cpp index d3c932a8c4c..f88a262484f 100644 --- a/doc/examples/abc_operator/abc_operator.cpp +++ b/doc/examples/abc_operator/abc_operator.cpp @@ -49,9 +49,9 @@ int main() float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}}; float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}}; - t_a->write(&v_a, 0, sizeof(v_a)); - t_b->write(&v_b, 0, sizeof(v_b)); - t_c->write(&v_c, 0, sizeof(v_c)); + t_a->write(&v_a, sizeof(v_a)); + t_b->write(&v_b, sizeof(v_b)); + t_c->write(&v_c, sizeof(v_c)); // Invoke the function auto exec = backend->compile(f); @@ -59,7 +59,7 @@ int main() // Get the result float r[2][3]; - t_result->read(&r, 0, sizeof(r)); + t_result->read(&r, sizeof(r)); std::cout << "[" << std::endl; for (size_t i = 0; i < s[0]; ++i) diff --git a/doc/examples/dynamic_tensor/CMakeLists.txt b/doc/examples/dynamic_tensor/CMakeLists.txt new file mode 100644 index 00000000000..1cd762a376f --- /dev/null +++ b/doc/examples/dynamic_tensor/CMakeLists.txt @@ -0,0 +1,19 @@ +# ****************************************************************************** +# Copyright 2017-2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ****************************************************************************** + +add_executable(partial_shape partial_shape.cpp) +add_dependencies(partial_shape ngraph cpu_backend) +target_link_libraries(partial_shape ngraph cpu_backend) diff --git a/doc/examples/dynamic_tensor/partial_shape.cpp b/doc/examples/dynamic_tensor/partial_shape.cpp index f7bb4acdc75..75293106dbe 100644 --- a/doc/examples/dynamic_tensor/partial_shape.cpp +++ b/doc/examples/dynamic_tensor/partial_shape.cpp @@ -15,11 +15,19 @@ //***************************************************************************** #include +#include +#include #include +using namespace std; using namespace ngraph; +void execute(shared_ptr be, + shared_ptr ex, + shared_ptr t_out, + uint32_t n); + int main() { // Create and compile a graph where the provided info of shape of x is @@ -27,42 +35,46 @@ int main() auto x_shape_info = PartialShape{2, Dimension::dynamic()}; auto x = make_shared(element::i32, x_shape_info); auto a = x + x; - auto f = make_shared({a}, {x}); - auto be = runtime::backend::create(); + auto f = make_shared(OutputVector{a}, ParameterVector{x}); + auto be = runtime::Backend::create("CPU", true); auto ex = be->compile(f); // Create a dynamic tensor of shape (2,?) auto t_out = be->create_dynamic_tensor(element::i32, x_shape_info); + execute(be, ex, t_out, 3); + execute(be, ex, t_out, 11); + execute(be, ex, t_out, 20); - // Call the graph to write a value with shape (2,3) to t_out - auto t_in = be->create_tensor(element::i32, Shape{2, 3}); - t_in->write(); - ex->call({t_out}, {t_in}) - - // Call the graph again, to write a value with a different shape to - // t_out. - t_in = be->create_tensor(element::i32, Shape{2, 20}); - t_in->write(); - ex->call({t_out}, {t_in}) - - // Get the result. At this point t_out->get_shape() would return - // Shape{2,20}, - // but t_out->get_partial_shape() would return "(2,?)" + return 0; +} - float r[2][3]; - t_result->read(&r, 0, sizeof(r)); +void execute(shared_ptr be, + shared_ptr ex, + shared_ptr t_out, + uint32_t n) +{ + // Initialize input of shape (2, n) + auto t_in = be->create_tensor(element::i32, Shape{2, n}); + { + vector t_val(2 * n); + iota(t_val.begin(), t_val.end(), 0); + t_in->write(&t_val[0], t_val.size() * sizeof(t_val[0])); + } + // Get the result + ex->call({t_out}, {t_in}); - std::cout << "[" << std::endl; + auto s = t_out->get_shape(); + vector r(s[0] * s[1]); + t_out->read(&r[0], r.size() * sizeof(r[0])); + cout << "[" << endl; for (size_t i = 0; i < s[0]; ++i) { - std::cout << " ["; + cout << " ["; for (size_t j = 0; j < s[1]; ++j) { - std::cout << r[i][j] << ' '; + cout << r[i * s[1] + j] << ' '; } - std::cout << ']' << std::endl; + cout << ']' << endl; } - std::cout << ']' << std::endl; - - return 0; + cout << ']' << endl; } diff --git a/doc/examples/mnist_mlp/CMakeLists.txt b/doc/examples/mnist_mlp/CMakeLists.txt index 90450974c44..ce387892349 100644 --- a/doc/examples/mnist_mlp/CMakeLists.txt +++ b/doc/examples/mnist_mlp/CMakeLists.txt @@ -17,9 +17,8 @@ add_executable(mnist_mlp mnist_loader.cpp mnist_mlp.cpp) add_dependencies(mnist_mlp ngraph cpu_backend) target_link_libraries(mnist_mlp ngraph cpu_backend) -if (NGRAPH_DISTRIBUTED_ENABLE) - add_executable(dist_mnist_mlp mnist_loader.cpp dist_mnist_mlp.cpp) - target_compile_definitions(dist_mnist_mlp PRIVATE NGRAPH_DISTRIBUTED_ENABLE) - target_include_directories(dist_mnist_mlp SYSTEM PRIVATE libmlsl) - target_link_libraries(dist_mnist_mlp ngraph cpu_backend libmlsl) -endif() + +add_executable(dist_mnist_mlp mnist_loader.cpp dist_mnist_mlp.cpp) +target_compile_definitions(dist_mnist_mlp PRIVATE NGRAPH_DISTRIBUTED_ENABLE) +add_dependencies(dist_mnist_mlp ngraph cpu_backend) +target_link_libraries(dist_mnist_mlp ngraph cpu_backend) diff --git a/doc/examples/mnist_mlp/dist_mnist_mlp.cpp b/doc/examples/mnist_mlp/dist_mnist_mlp.cpp index 1771efde1af..56924023448 100644 --- a/doc/examples/mnist_mlp/dist_mnist_mlp.cpp +++ b/doc/examples/mnist_mlp/dist_mnist_mlp.cpp @@ -90,10 +90,8 @@ float test_accuracy(MNistDataLoader& loader, { loader.load(); t_X->write(loader.get_image_floats(), - 0, loader.get_image_batch_size() * sizeof(float)); t_Y->write(loader.get_label_floats(), - 0, loader.get_label_batch_size() * sizeof(float)); exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1}); size_t acc = accuracy_count(t_softmax, t_Y); @@ -106,8 +104,6 @@ float test_accuracy(MNistDataLoader& loader, int main(int argc, char* argv[]) { - ngraph::Distributed dist; - size_t epochs = 5; size_t batch_size = 128; size_t output_size = 10; @@ -177,10 +173,10 @@ int main(int argc, char* argv[]) // Updates ngraph::autodiff::Adjoints adjoints(OutputVector{loss}, OutputVector{delta}); - auto grad_W0 = adjoints.backprop_node(W0); - auto grad_b0 = adjoints.backprop_node(b0); - auto grad_W1 = adjoints.backprop_node(W1); - auto grad_b1 = adjoints.backprop_node(b1); + auto grad_W0 = adjoints.backprop_output(W0); + auto grad_b0 = adjoints.backprop_output(b0); + auto grad_W1 = adjoints.backprop_output(W1); + auto grad_b1 = adjoints.backprop_output(b1); auto avg_grad_W0 = std::make_shared(grad_W0); auto avg_grad_b0 = std::make_shared(grad_b0); @@ -254,10 +250,8 @@ int main(int argc, char* argv[]) { train_loader.load(); t_X->write(train_loader.get_image_floats(), - 0, train_loader.get_image_batch_size() * sizeof(float)); t_Y->write(train_loader.get_label_floats(), - 0, train_loader.get_label_batch_size() * sizeof(float)); train_exec->call( {t_loss, diff --git a/doc/examples/mnist_mlp/mnist_mlp.cpp b/doc/examples/mnist_mlp/mnist_mlp.cpp index 3b6b4684c9e..110d2bf7f4c 100644 --- a/doc/examples/mnist_mlp/mnist_mlp.cpp +++ b/doc/examples/mnist_mlp/mnist_mlp.cpp @@ -89,10 +89,8 @@ float test_accuracy(MNistDataLoader& loader, { loader.load(); t_X->write(loader.get_image_floats(), - 0, loader.get_image_batch_size() * sizeof(float)); t_Y->write(loader.get_label_floats(), - 0, loader.get_label_batch_size() * sizeof(float)); exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1}); size_t acc = accuracy_count(t_softmax, t_Y); @@ -174,10 +172,10 @@ int main(int argc, const char* argv[]) // Updates ngraph::autodiff::Adjoints adjoints(OutputVector{loss}, OutputVector{delta}); - auto W0_next = W0 + adjoints.backprop_node(W0); - auto b0_next = b0 + adjoints.backprop_node(b0); - auto W1_next = W1 + adjoints.backprop_node(W1); - auto b1_next = b1 + adjoints.backprop_node(b1); + auto W0_next = W0 + adjoints.backprop_output(W0); + auto b0_next = b0 + adjoints.backprop_output(b0); + auto W1_next = W1 + adjoints.backprop_output(W1); + auto b1_next = b1 + adjoints.backprop_output(b1); // Get the backend auto backend = runtime::Backend::create("CPU"); @@ -232,7 +230,7 @@ int main(int argc, const char* argv[]) clone_function(Function(OutputVector{softmax}, ParameterVector{X, W0, b0, W1, b1}), inference_node_map); - auto inference_exe = backend->compile(inference_function); + auto inference_exec = backend->compile(inference_function); set_scalar(t_learning_rate, .03f); @@ -241,10 +239,8 @@ int main(int argc, const char* argv[]) { train_loader.load(); t_X->write(train_loader.get_image_floats(), - 0, train_loader.get_image_batch_size() * sizeof(float)); t_Y->write(train_loader.get_label_floats(), - 0, train_loader.get_label_batch_size() * sizeof(float)); train_exec->call( {t_loss, @@ -264,7 +260,7 @@ int main(int argc, const char* argv[]) { last_epoch = train_loader.get_epoch(); std::cout << "Test accuracy: " << test_accuracy(test_loader, - exec, + inference_exec, t_X, t_Y, t_softmax, diff --git a/doc/examples/mnist_mlp/tensor_utils.hpp b/doc/examples/mnist_mlp/tensor_utils.hpp index eed723edfad..c9ffa9b5798 100644 --- a/doc/examples/mnist_mlp/tensor_utils.hpp +++ b/doc/examples/mnist_mlp/tensor_utils.hpp @@ -49,7 +49,7 @@ void randomize(std::function rand, { temp.push_back(rand()); } - t->write(&temp[0], 0, element_count * sizeof(T)); + t->write(&temp[0], element_count * sizeof(T)); } // Get a scalar value from a tensor, optionally at an element offset @@ -58,7 +58,7 @@ T get_scalar(const std::shared_ptr& t, size_t element_offset = 0) { T result; - t->read(&result, element_offset * sizeof(T), sizeof(T)); + t->read(&result + (element_offset * sizeof(T)), sizeof(T)); return result; } @@ -68,7 +68,7 @@ void set_scalar(const std::shared_ptr& t, T value, size_t element_offset = 0) { - t->write(&value, element_offset * sizeof(T), sizeof(T)); + t->write(&value + (element_offset * sizeof(T)), sizeof(T)); } // Show a shape diff --git a/doc/sphinx/ngraph_theme/layout_old.html b/doc/sphinx/ngraph_theme/layout_old.html deleted file mode 100644 index deb8df2a1a7..00000000000 --- a/doc/sphinx/ngraph_theme/layout_old.html +++ /dev/null @@ -1,205 +0,0 @@ -{# - basic/layout.html - ~~~~~~~~~~~~~~~~~ - - Master layout template for Sphinx themes. - - :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#} -{%- block doctype -%} - -{%- endblock %} -{%- set reldelim1 = reldelim1 is not defined and ' »' or reldelim1 %} -{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %} -{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and - (sidebars != []) %} -{%- set url_root = pathto('', 1) %} -{# XXX necessary? #} -{%- if url_root == '#' %}{% set url_root = '' %}{% endif %} -{%- if not embedded and docstitle %} - {%- set titlesuffix = " — "|safe + docstitle|e %} -{%- else %} - {%- set titlesuffix = "" %} -{%- endif %} - -{%- macro relbar() %} - -{%- endmacro %} - -{%- macro sidebar() %} - {%- if render_sidebar %} -
-
- {%- block sidebarlogo %} - {%- if logo %} - - {%- endif %} - {%- endblock %} - {%- if sidebars != None %} - {#- new style sidebar: explicitly include/exclude templates #} - {%- for sidebartemplate in sidebars %} - {%- include sidebartemplate %} - {%- endfor %} - {%- else %} - {#- old style sidebars: using blocks -- should be deprecated #} - {%- block sidebartoc %} - {%- include "localtoc.html" %} - {%- endblock %} - {%- block sidebarrel %} - {%- include "relations.html" %} - {%- endblock %} - {%- block sidebarsourcelink %} - {%- include "sourcelink.html" %} - {%- endblock %} - {%- if customsidebar %} - {%- include customsidebar %} - {%- endif %} - {%- block sidebarsearch %} - {%- include "searchbox.html" %} - {%- endblock %} - {%- endif %} -
-
- {%- endif %} -{%- endmacro %} - -{%- macro script() %} - - {%- for scriptfile in script_files %} - - {%- endfor %} -{%- endmacro %} - -{%- macro css() %} - - - {%- for cssfile in css_files %} - - {%- endfor %} -{%- endmacro %} - - - - - {{ metatags }} - {%- block htmltitle %} - {{ title|striptags|e }}{{ titlesuffix }} - {%- endblock %} - {{ css() }} - {%- if not embedded %} - {{ script() }} - {%- if use_opensearch %} - - {%- endif %} - {%- if favicon %} - - {%- endif %} - {%- endif %} -{%- block linktags %} - {%- if hasdoc('about') %} - - {%- endif %} - {%- if hasdoc('genindex') %} - - {%- endif %} - {%- if hasdoc('search') %} - - {%- endif %} - {%- if hasdoc('copyright') %} - - {%- endif %} - - {%- if parents %} - - {%- endif %} - {%- if next %} - - {%- endif %} - {%- if prev %} - - {%- endif %} -{%- endblock %} -{%- block extrahead %} {% endblock %} - - -{%- block header %}{% endblock %} - -{%- block relbar1 %}{{ relbar() }}{% endblock %} - -{%- block content %} - {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %} - -
- {%- block document %} -
- {%- if render_sidebar %} -
- {%- endif %} -
- {% block body %} {% endblock %} -
- {%- if render_sidebar %} -
- {%- endif %} -
- {%- endblock %} - - {%- block sidebar2 %}{{ sidebar() }}{% endblock %} -
-
-{%- endblock %} - -{%- block relbar2 %}{{ relbar() }}{% endblock %} - -{%- block footer %} - -

asdf asdf asdf asdf 22

-{%- endblock %} - - - diff --git a/doc/sphinx/ngraph_theme/ngversions.html b/doc/sphinx/ngraph_theme/ngversions.html index cdc30be0d4b..0b34662bf6d 100644 --- a/doc/sphinx/ngraph_theme/ngversions.html +++ b/doc/sphinx/ngraph_theme/ngversions.html @@ -9,6 +9,8 @@
{{ _('Recent Versions') }}
    +
  • 0.27.1
  • +
  • 0.27.0
  • 0.26.0
  • 0.25.1
  • 0.25.0
  • diff --git a/doc/sphinx/source/backends/backend-api/index.rst b/doc/sphinx/source/backends/backend-api/index.rst index 832f1fef229..9ca5ebb5d65 100644 --- a/doc/sphinx/source/backends/backend-api/index.rst +++ b/doc/sphinx/source/backends/backend-api/index.rst @@ -11,10 +11,3 @@ appropriately to import symbols referenced from outside the library, and to export them from within the library. See any of the ``${backend}_backend_visibility`` header files for an example; see also :ref:`what_is_backend` - -.. - - -.. doxygenclass:: ngraph::runtime::Backend - :project: ngraph - :members: diff --git a/doc/sphinx/source/backends/cpp-api.rst b/doc/sphinx/source/backends/cpp-api.rst index 1ad4beee09e..e863e45c210 100644 --- a/doc/sphinx/source/backends/cpp-api.rst +++ b/doc/sphinx/source/backends/cpp-api.rst @@ -9,7 +9,6 @@ Backend APIs backend-api/index dynamicbackend-api/index plaidml-ng-api/index - executable-api/index As of version ``0.15``, there is a new backend API to work with functions that diff --git a/doc/sphinx/source/backends/executable-api/index.rst b/doc/sphinx/source/backends/executable-api/index.rst deleted file mode 100644 index d4404c46e73..00000000000 --- a/doc/sphinx/source/backends/executable-api/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. backends/executable-api/index.rst: - - -Executable -========== - -The ``compile`` function on an ``Executable`` has more direct methods to -actions such as ``validate``, ``call``, ``get_performance_data``, and so on. - -.. doxygenclass:: ngraph::runtime::Executable - :project: ngraph - :members: - diff --git a/doc/sphinx/source/backends/index.rst b/doc/sphinx/source/backends/index.rst index d4472f9c857..a548fabb5f7 100644 --- a/doc/sphinx/source/backends/index.rst +++ b/doc/sphinx/source/backends/index.rst @@ -1,5 +1,7 @@ .. backends/index.rst +.. _backend_support: + ##################### Working with Backends ##################### diff --git a/doc/sphinx/source/conf.py b/doc/sphinx/source/conf.py index 633e2ec6124..c687a07e8f9 100644 --- a/doc/sphinx/source/conf.py +++ b/doc/sphinx/source/conf.py @@ -77,7 +77,7 @@ # The Documentation full version, including alpha/beta/rc tags. Some features # available in the latest code will not necessarily be documented first -release = '0.27.0' +release = '0.27.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/sphinx/source/core/constructing-graphs/derive-for-training.rst b/doc/sphinx/source/core/constructing-graphs/derive-for-training.rst index 4df1966618b..60d53353d9e 100644 --- a/doc/sphinx/source/core/constructing-graphs/derive-for-training.rst +++ b/doc/sphinx/source/core/constructing-graphs/derive-for-training.rst @@ -82,20 +82,20 @@ weights and bias: .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 127-135 + :lines: 123-134 Repeat the process for the next layer, .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 138-146 + :lines: 137-144 and normalize everything with a ``softmax``. .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 148-150 + :lines: 146-147 .. _loss: @@ -109,7 +109,7 @@ underflow. .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 154-166 + :lines: 150-164 .. _backprop: @@ -125,7 +125,7 @@ allows the calculations for the adjustments to be further optimized. .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 169-172 + :lines: 167-170 For any node ``N``, if the update for ``loss`` is ``delta``, the @@ -138,7 +138,7 @@ update computation for ``N`` will be given by the node .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 177-181 + :lines: 177-179 The different update nodes will share intermediate computations. So to @@ -147,7 +147,7 @@ get the updated values for the weights as computed with the specified .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 182-215 + :lines: 184-212 .. _update: @@ -167,5 +167,5 @@ compile clones of the nodes. .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp :language: cpp - :lines: 216-226 + :lines: 217-224 diff --git a/doc/sphinx/source/core/constructing-graphs/distribute-train.rst b/doc/sphinx/source/core/constructing-graphs/distribute-train.rst index 4f6f0fd57ac..00b8bdc5893 100644 --- a/doc/sphinx/source/core/constructing-graphs/distribute-train.rst +++ b/doc/sphinx/source/core/constructing-graphs/distribute-train.rst @@ -41,7 +41,7 @@ the new code is highlighted below: .. literalinclude:: ../../../../examples/mnist_mlp/dist_mnist_mlp.cpp :language: cpp - :lines: 178-194 + :lines: 174-189 :emphasize-lines: 8-11 See the `full code`_ in the ``examples`` folder ``/doc/examples/mnist_mlp/dist_mnist_mlp.cpp``. diff --git a/doc/sphinx/source/core/constructing-graphs/execute.rst b/doc/sphinx/source/core/constructing-graphs/execute.rst index 491c215218b..5e5cb053158 100644 --- a/doc/sphinx/source/core/constructing-graphs/execute.rst +++ b/doc/sphinx/source/core/constructing-graphs/execute.rst @@ -270,16 +270,15 @@ programmatically or manually) in order to successfully retreive shape data. * :ref:`create_dyn_tensor` * :ref:`call_graph_vw_` -* :ref:`call_graph_vwnew` +* :ref:`dyn_ten_result` * :ref:`kpsh` -Create and compile a graph for ``f(x) = x + x`` where the provided info -of shape ``x`` is ``(2,?)``: +Create and compile a graph where the provided info of shape ``x`` is ``(2,?)``: .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp :language: cpp - :lines: 27-32 + :lines: 35-40 .. _create_dyn_tensor: @@ -291,7 +290,7 @@ Create a dynamic tensor of shape ``(2,?)`` .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp :language: cpp - :lines: 35 + :lines: 43-46 At this point, ``t_out->get_shape()`` would throw an exception, while ``t_out->get_partial_shape()`` would return ``"(2,?)"``. @@ -299,29 +298,25 @@ At this point, ``t_out->get_shape()`` would throw an exception, while .. _call_graph_vw_: -Write shape ------------ - -Call the graph to write a value with shape (2,3) to t_out +Initialize input of shape +------------------------- .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp :language: cpp - :lines: 38-40 + :lines: 57-62 At this point, ``t_out->get_shape()`` would return ``Shape{2,3}``, while ``t_out->get_partial_shape()`` would return ``"(2,?)"``. -.. _call_graph_vwnew: - -Write new shape ---------------- +.. _dyn_ten_result: -Call the graph again, to write a value with a different shape to ``t_out``. +Get the result +-------------- .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp :language: cpp - :lines: 44-45 + :lines: 64-80 At this point, ``t_out->get_shape()`` would return ``Shape{2,20}``, while ``t_out->get_partial_shape()`` would return ``"(2,?)"``. diff --git a/doc/sphinx/source/core/overview.rst b/doc/sphinx/source/core/overview.rst index 5c04219f803..46fdef27730 100644 --- a/doc/sphinx/source/core/overview.rst +++ b/doc/sphinx/source/core/overview.rst @@ -1,5 +1,6 @@ .. core/overview.rst: +.. _ngraph_core: Basic Concepts ============== diff --git a/doc/sphinx/source/core/passes/passes.rst b/doc/sphinx/source/core/passes/passes.rst index d9b591d3cad..c54a150bc25 100644 --- a/doc/sphinx/source/core/passes/passes.rst +++ b/doc/sphinx/source/core/passes/passes.rst @@ -95,7 +95,7 @@ group before finally concatenating the result back together. .. _figure-mobilenet-gc: -.. figure:: ../../graphics/mobilenet-group-conv.png +.. figure:: ../../graphics/mobilenet-group-conv.svg :width: 700px :alt: MobileNet example @@ -109,4 +109,4 @@ several ways: * Reduces sheer node count, * Provides mappability to MKL-DNN, which has an accelerated group convolution implementation, and -* Eliminates unnecessary temporary nodes. \ No newline at end of file +* Eliminates unnecessary temporary nodes. diff --git a/doc/sphinx/source/frameworks/overview.rst b/doc/sphinx/source/frameworks/overview.rst index da3312e3b7b..1d098982e47 100644 --- a/doc/sphinx/source/frameworks/overview.rst +++ b/doc/sphinx/source/frameworks/overview.rst @@ -1,5 +1,7 @@ .. frameworks/overview.rst +.. _framework_support: + Basic concepts ============== diff --git a/doc/sphinx/source/graphics/mobilenet-group-conv.svg b/doc/sphinx/source/graphics/mobilenet-group-conv.svg new file mode 100644 index 00000000000..64566ffb782 --- /dev/null +++ b/doc/sphinx/source/graphics/mobilenet-group-conv.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/sphinx/source/index.rst b/doc/sphinx/source/index.rst index 1e297fc1dbf..6c26fef3f8c 100644 --- a/doc/sphinx/source/index.rst +++ b/doc/sphinx/source/index.rst @@ -59,7 +59,6 @@ nGraph Compiler Stack Documentation core/fusion/index.rst nGraph Core Ops provenance/index.rst - Graph Execution API core/quantization.rst dynamic/index.rst diff --git a/doc/sphinx/source/inspection/index.rst b/doc/sphinx/source/inspection/index.rst index b82e5b6a714..330ddd8b5fb 100644 --- a/doc/sphinx/source/inspection/index.rst +++ b/doc/sphinx/source/inspection/index.rst @@ -1,5 +1,7 @@ .. inspection/index: +.. _inspection: + Visualization Tools ################### diff --git a/doc/sphinx/source/ops/abs.rst b/doc/sphinx/source/ops/abs.rst index d1e969faf9a..0de1bbf9840 100644 --- a/doc/sphinx/source/ops/abs.rst +++ b/doc/sphinx/source/ops/abs.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Abs +.. doxygenclass:: ngraph::op::v0::Abs :project: ngraph :members: diff --git a/doc/sphinx/source/ops/acos.rst b/doc/sphinx/source/ops/acos.rst index 4582c482c28..893ac25442f 100644 --- a/doc/sphinx/source/ops/acos.rst +++ b/doc/sphinx/source/ops/acos.rst @@ -53,6 +53,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Acos +.. doxygenclass:: ngraph::op::v0::Acos :project: ngraph :members: diff --git a/doc/sphinx/source/ops/all.rst b/doc/sphinx/source/ops/all.rst index fa362979bc0..dc2e9652f74 100644 --- a/doc/sphinx/source/ops/all.rst +++ b/doc/sphinx/source/ops/all.rst @@ -42,6 +42,6 @@ Outputs C++ Interface ============= -.. doxygenclass:: ngraph::op::All +.. doxygenclass:: ngraph::op::v0::All :project: ngraph :members: diff --git a/doc/sphinx/source/ops/allreduce.rst b/doc/sphinx/source/ops/allreduce.rst index 54908eb81e4..c00680c7091 100644 --- a/doc/sphinx/source/ops/allreduce.rst +++ b/doc/sphinx/source/ops/allreduce.rst @@ -41,6 +41,6 @@ Outputs C++ Interface ============= -.. doxygenclass:: ngraph::op::AllReduce +.. doxygenclass:: ngraph::op::v0::AllReduce :project: ngraph :members: diff --git a/doc/sphinx/source/ops/asin.rst b/doc/sphinx/source/ops/asin.rst index b776a57a0b5..e86617c07cc 100644 --- a/doc/sphinx/source/ops/asin.rst +++ b/doc/sphinx/source/ops/asin.rst @@ -52,6 +52,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Asin +.. doxygenclass:: ngraph::op::v0::Asin :project: ngraph :members: diff --git a/doc/sphinx/source/ops/atan.rst b/doc/sphinx/source/ops/atan.rst index eaaeb454ba6..68c632d90f5 100644 --- a/doc/sphinx/source/ops/atan.rst +++ b/doc/sphinx/source/ops/atan.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Atan +.. doxygenclass:: ngraph::op::v0::Atan :project: ngraph :members: diff --git a/doc/sphinx/source/ops/batch_norm_inference.rst b/doc/sphinx/source/ops/batch_norm_inference.rst index 9017ac19c20..e5cb4c2ea0b 100644 --- a/doc/sphinx/source/ops/batch_norm_inference.rst +++ b/doc/sphinx/source/ops/batch_norm_inference.rst @@ -73,7 +73,7 @@ and by two scaling attributes: :math:`\gamma` and :math:`\beta`. C++ Interface ============== -.. doxygenclass:: ngraph::op::BatchNormInference +.. doxygenclass:: ngraph::op::v0::BatchNormInference :project: ngraph :members: diff --git a/doc/sphinx/source/ops/batch_norm_training.rst b/doc/sphinx/source/ops/batch_norm_training.rst index 272178e18df..222ffb006e2 100644 --- a/doc/sphinx/source/ops/batch_norm_training.rst +++ b/doc/sphinx/source/ops/batch_norm_training.rst @@ -91,7 +91,7 @@ Backprop C++ Interface ============== -.. doxygenclass:: ngraph::op::BatchNormTraining +.. doxygenclass:: ngraph::op::v0::BatchNormTraining :project: ngraph :members: diff --git a/doc/sphinx/source/ops/batch_norm_training_backprop.rst b/doc/sphinx/source/ops/batch_norm_training_backprop.rst index fdd98518a9d..8043dd2cb6f 100644 --- a/doc/sphinx/source/ops/batch_norm_training_backprop.rst +++ b/doc/sphinx/source/ops/batch_norm_training_backprop.rst @@ -101,7 +101,7 @@ We work backwards C++ Interface ============== -.. doxygenclass:: ngraph::op::BatchNormTrainingBackprop +.. doxygenclass:: ngraph::op::v0::BatchNormTrainingBackprop :project: ngraph :members: diff --git a/doc/sphinx/source/ops/broadcast_distributed.rst b/doc/sphinx/source/ops/broadcast_distributed.rst index 19e80260875..d4958270eb7 100644 --- a/doc/sphinx/source/ops/broadcast_distributed.rst +++ b/doc/sphinx/source/ops/broadcast_distributed.rst @@ -41,6 +41,6 @@ Outputs (in place) C++ Interface ============= -.. doxygenclass:: ngraph::op::BroadcastDistributed +.. doxygenclass:: ngraph::op::v0::BroadcastDistributed :project: ngraph :members: diff --git a/doc/sphinx/source/ops/ceiling.rst b/doc/sphinx/source/ops/ceiling.rst index 262b2950fea..4e9239045eb 100644 --- a/doc/sphinx/source/ops/ceiling.rst +++ b/doc/sphinx/source/ops/ceiling.rst @@ -56,6 +56,6 @@ to define a backprop. C++ Interface ============= -.. doxygenclass:: ngraph::op::Ceiling +.. doxygenclass:: ngraph::op::v0::Ceiling :project: ngraph :members: diff --git a/doc/sphinx/source/ops/concat.rst b/doc/sphinx/source/ops/concat.rst index 48a9fc84648..d5f9ee09ca7 100644 --- a/doc/sphinx/source/ops/concat.rst +++ b/doc/sphinx/source/ops/concat.rst @@ -74,6 +74,6 @@ We slice the backprop value into the backprops associated with the inputs. C++ Interface ============= -.. doxygenclass:: ngraph::op::Concat +.. doxygenclass:: ngraph::op::v0::Concat :project: ngraph :members: diff --git a/doc/sphinx/source/ops/convert.rst b/doc/sphinx/source/ops/convert.rst index f234ed21046..af529833f25 100644 --- a/doc/sphinx/source/ops/convert.rst +++ b/doc/sphinx/source/ops/convert.rst @@ -55,6 +55,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Convert +.. doxygenclass:: ngraph::op::v0::Convert :project: ngraph :members: diff --git a/doc/sphinx/source/ops/cos.rst b/doc/sphinx/source/ops/cos.rst index 61e74d3d016..9c57ec2be98 100644 --- a/doc/sphinx/source/ops/cos.rst +++ b/doc/sphinx/source/ops/cos.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Cos +.. doxygenclass:: ngraph::op::v0::Cos :project: ngraph :members: diff --git a/doc/sphinx/source/ops/cosh.rst b/doc/sphinx/source/ops/cosh.rst index 61177d1a61a..4e576066c83 100644 --- a/doc/sphinx/source/ops/cosh.rst +++ b/doc/sphinx/source/ops/cosh.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Cosh +.. doxygenclass:: ngraph::op::v0::Cosh :project: ngraph :members: diff --git a/doc/sphinx/source/ops/dot.rst b/doc/sphinx/source/ops/dot.rst index 668bd069022..6de4b9de2e3 100644 --- a/doc/sphinx/source/ops/dot.rst +++ b/doc/sphinx/source/ops/dot.rst @@ -82,6 +82,6 @@ To be documented. C++ Interface ============= -.. doxygenclass:: ngraph::op::Dot +.. doxygenclass:: ngraph::op::v0::Dot :project: ngraph :members: diff --git a/doc/sphinx/source/ops/exp.rst b/doc/sphinx/source/ops/exp.rst index 3467da9e8ae..817ad15fa3a 100644 --- a/doc/sphinx/source/ops/exp.rst +++ b/doc/sphinx/source/ops/exp.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Exp +.. doxygenclass:: ngraph::op::v0::Exp :project: ngraph :members: diff --git a/doc/sphinx/source/ops/floor.rst b/doc/sphinx/source/ops/floor.rst index 3024cee87d8..102a6079682 100644 --- a/doc/sphinx/source/ops/floor.rst +++ b/doc/sphinx/source/ops/floor.rst @@ -55,6 +55,6 @@ to define a backprop. C++ Interface ============= -.. doxygenclass:: ngraph::op::Floor +.. doxygenclass:: ngraph::op::v0::Floor :project: ngraph :members: diff --git a/doc/sphinx/source/ops/get_output_element.rst b/doc/sphinx/source/ops/get_output_element.rst index 2a0d857c766..b899f88e5a9 100644 --- a/doc/sphinx/source/ops/get_output_element.rst +++ b/doc/sphinx/source/ops/get_output_element.rst @@ -44,6 +44,6 @@ Outputs C++ Interface ============= -.. doxygenclass:: ngraph::op::GetOutputElement +.. doxygenclass:: ngraph::op::v0::GetOutputElement :project: ngraph :members: diff --git a/doc/sphinx/source/ops/log.rst b/doc/sphinx/source/ops/log.rst index 6f8fbc79e7c..134586e3e96 100644 --- a/doc/sphinx/source/ops/log.rst +++ b/doc/sphinx/source/ops/log.rst @@ -55,6 +55,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Log +.. doxygenclass:: ngraph::op::v0::Log :project: ngraph :members: diff --git a/doc/sphinx/source/ops/negative.rst b/doc/sphinx/source/ops/negative.rst index b9ddf993052..d518cf1d32e 100644 --- a/doc/sphinx/source/ops/negative.rst +++ b/doc/sphinx/source/ops/negative.rst @@ -54,6 +54,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Negative +.. doxygenclass:: ngraph::op::v0::Negative :project: ngraph :members: diff --git a/doc/sphinx/source/ops/quantize.rst b/doc/sphinx/source/ops/quantize.rst index c9658ad5756..80fb1cd4343 100644 --- a/doc/sphinx/source/ops/quantize.rst +++ b/doc/sphinx/source/ops/quantize.rst @@ -102,6 +102,6 @@ Mathematical Definition C++ Interface ============= -.. doxygenclass:: ngraph::op::Quantize +.. doxygenclass:: ngraph::op::v0::Quantize :project: ngraph :members: diff --git a/doc/sphinx/source/ops/relu.rst b/doc/sphinx/source/ops/relu.rst index e1caf46bba6..023a9c63387 100644 --- a/doc/sphinx/source/ops/relu.rst +++ b/doc/sphinx/source/ops/relu.rst @@ -40,6 +40,6 @@ Mathematical Definition C++ Interface ============= -.. doxygenclass:: ngraph::op::Relu +.. doxygenclass:: ngraph::op::v0::Relu :project: ngraph :members: diff --git a/doc/sphinx/source/ops/shape_of.rst b/doc/sphinx/source/ops/shape_of.rst index 07dcc7028e8..2ee490eee9d 100644 --- a/doc/sphinx/source/ops/shape_of.rst +++ b/doc/sphinx/source/ops/shape_of.rst @@ -46,6 +46,6 @@ Mathematical Definition C++ Interface ============= -.. doxygenclass:: ngraph::op::ShapeOf +.. doxygenclass:: ngraph::op::v0::ShapeOf :project: ngraph :members: diff --git a/doc/sphinx/source/ops/sigmoid.rst b/doc/sphinx/source/ops/sigmoid.rst index 77a9cba2b26..840a512eed9 100644 --- a/doc/sphinx/source/ops/sigmoid.rst +++ b/doc/sphinx/source/ops/sigmoid.rst @@ -47,6 +47,6 @@ Mathematical Definition C++ Interface ============= -.. doxygenclass:: ngraph::op::Sigmoid +.. doxygenclass:: ngraph::op::v0::Sigmoid :project: ngraph :members: diff --git a/doc/sphinx/source/ops/sign.rst b/doc/sphinx/source/ops/sign.rst index 67a3ffce337..636df4122b0 100644 --- a/doc/sphinx/source/ops/sign.rst +++ b/doc/sphinx/source/ops/sign.rst @@ -45,6 +45,6 @@ Mathematical Definition C++ Interface ============= -.. doxygenclass:: ngraph::op::Sign +.. doxygenclass:: ngraph::op::v0::Sign :project: ngraph :members: diff --git a/doc/sphinx/source/ops/sin.rst b/doc/sphinx/source/ops/sin.rst index d94b9ccaeab..0bfd4313a23 100644 --- a/doc/sphinx/source/ops/sin.rst +++ b/doc/sphinx/source/ops/sin.rst @@ -52,6 +52,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Sin +.. doxygenclass:: ngraph::op::v0::Sin :project: ngraph :members: diff --git a/doc/sphinx/source/ops/sinh.rst b/doc/sphinx/source/ops/sinh.rst index 2fc6f613a69..ee8a1677e28 100644 --- a/doc/sphinx/source/ops/sinh.rst +++ b/doc/sphinx/source/ops/sinh.rst @@ -52,6 +52,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Sinh +.. doxygenclass:: ngraph::op::v0::Sinh :project: ngraph :members: diff --git a/doc/sphinx/source/ops/sqrt.rst b/doc/sphinx/source/ops/sqrt.rst index e70b6a055d9..08a411dc94e 100644 --- a/doc/sphinx/source/ops/sqrt.rst +++ b/doc/sphinx/source/ops/sqrt.rst @@ -52,6 +52,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Sqrt +.. doxygenclass:: ngraph::op::v0::Sqrt :project: ngraph :members: diff --git a/doc/sphinx/source/ops/subtract.rst b/doc/sphinx/source/ops/subtract.rst index 96817fb93d6..e4738f9d452 100644 --- a/doc/sphinx/source/ops/subtract.rst +++ b/doc/sphinx/source/ops/subtract.rst @@ -58,6 +58,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Subtract +.. doxygenclass:: ngraph::op::v0::Subtract :project: ngraph :members: diff --git a/doc/sphinx/source/ops/tan.rst b/doc/sphinx/source/ops/tan.rst index 485ea94c44d..e004d3a1f64 100644 --- a/doc/sphinx/source/ops/tan.rst +++ b/doc/sphinx/source/ops/tan.rst @@ -52,6 +52,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Tan +.. doxygenclass:: ngraph::op::v0::Tan :project: ngraph :members: diff --git a/doc/sphinx/source/ops/tanh.rst b/doc/sphinx/source/ops/tanh.rst index 17c913ab07a..af6d8ae7510 100644 --- a/doc/sphinx/source/ops/tanh.rst +++ b/doc/sphinx/source/ops/tanh.rst @@ -53,6 +53,6 @@ Backprop C++ Interface ============= -.. doxygenclass:: ngraph::op::Tanh +.. doxygenclass:: ngraph::op::v0::Tanh :project: ngraph :members: diff --git a/doc/sphinx/source/ops/transpose.rst b/doc/sphinx/source/ops/transpose.rst index 0930f23dc4e..665768db929 100644 --- a/doc/sphinx/source/ops/transpose.rst +++ b/doc/sphinx/source/ops/transpose.rst @@ -69,6 +69,6 @@ Not yet implemented. C++ Interface ============= -.. doxygenclass:: ngraph::op::Transpose +.. doxygenclass:: ngraph::op::v0::Transpose :project: ngraph :members: diff --git a/doc/sphinx/source/sitemap.rst b/doc/sphinx/source/sitemap.rst new file mode 100644 index 00000000000..20fd34e5649 --- /dev/null +++ b/doc/sphinx/source/sitemap.rst @@ -0,0 +1,83 @@ +:orphan: + +Sitemap +####### + +* :ref:`Getting Started ` + + .. toctree:: + :maxdepth: 1 + + introduction + tutorials/index.rst + + +* :ref:`Framework Support ` + + .. toctree:: + :maxdepth: 1 + + frameworks/overview.rst + frameworks/tensorflow_connect.rst + frameworks/onnx_integ.rst + frameworks/paddle_integ.rst + frameworks/other/index.rst + + +* :ref:`nGraph Core ` + + .. toctree:: + :maxdepth: 1 + + core/overview.rst + buildlb.rst + core/constructing-graphs/index.rst + core/passes/passes.rst + core/fusion/index.rst + nGraph Core Ops + provenance/index.rst + core/quantization.rst + dynamic/index.rst + + +* :ref:`Backend Support ` + + .. toctree:: + :maxdepth: 1 + + Basic Concepts + backends/plaidml-ng-api/index.rst + Integrating Other Backends + + +* :ref:`Training ` + + .. toctree:: + :maxdepth: 1 + + training/index.rst + training/qat.rst + +* :ref:`Validated Workloads ` + + .. toctree:: + :maxdepth: 1 + + frameworks/validated/list.rst + + +* :ref:`Debugging Graphs ` + + .. toctree:: + :maxdepth: 1 + + inspection/index.rst + + +* :ref:`Contribution ` + + .. toctree:: + :maxdepth: 1 + + project/contribution-guide.rst + glossary.rst diff --git a/doc/sphinx/source/training/index.rst b/doc/sphinx/source/training/index.rst index c35b7a14d03..44d5e3b2782 100644 --- a/doc/sphinx/source/training/index.rst +++ b/doc/sphinx/source/training/index.rst @@ -1,5 +1,6 @@ .. training/index.rst: +.. _training: Distributed Training #################### diff --git a/doc/sphinx/source/training/overview.rst b/doc/sphinx/source/training/overview.rst index fbf844ceda1..14f3cd136e8 100644 --- a/doc/sphinx/source/training/overview.rst +++ b/doc/sphinx/source/training/overview.rst @@ -1,5 +1,6 @@ .. training/overview.rst: +.. _overview: Basic Concepts ============== diff --git a/src/ngraph/autodiff/adjoints.cpp b/src/ngraph/autodiff/adjoints.cpp index 1105d31010e..cba3894aecd 100644 --- a/src/ngraph/autodiff/adjoints.cpp +++ b/src/ngraph/autodiff/adjoints.cpp @@ -64,9 +64,6 @@ autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs) // Number of nodes that use the node's value std::unordered_map, size_t> parent_counts; - // Nodes that have been processed - std::unordered_set> visited_nodes; - // Nodes we should check std::list> nodes_to_check; for (auto& y : ys) @@ -77,40 +74,40 @@ autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs) { auto node = nodes_to_check.front(); nodes_to_check.pop_front(); - if (visited_nodes.count(node) != 0) - { - continue; - } - for (auto input : node->inputs()) + if (m_adjoint_map.find(node.get()) == m_adjoint_map.end()) { - auto arg = input.get_source_output().get_node_shared_ptr(); - auto count_it = parent_counts.find(arg); - if (count_it == parent_counts.end()) + m_adjoint_map[node.get()] = OutputVector(node->get_output_size()); + for (auto value : node->input_values()) { - parent_counts[arg] = 1; - nodes_to_check.push_front(arg); - } - else - { - parent_counts[arg]++; + auto arg = value.get_node_shared_ptr(); + auto count_it = parent_counts.find(arg); + if (count_it == parent_counts.end()) + { + parent_counts[arg] = 1; + nodes_to_check.push_front(arg); + } + else + { + parent_counts[arg]++; + } } } - visited_nodes.insert(node); } // Second pass visits the nodes so that all users of a node's value are visited // before a node is visited. for (size_t i = 0; i < ys.size(); i++) { - Node* n = ys.at(i).get_node(); - OutputVector t{cs.at(i)}; - std::pair pair = std::make_pair(n, t); - m_adjoint_map.insert(std::make_pair(ys.at(i).get_node(), OutputVector{cs.at(i)})); + add_delta(ys.at(i), cs.at(i)); } for (auto& y : ys) { - nodes_to_check.push_back(y.get_node_shared_ptr()); + auto node = y.get_node_shared_ptr(); + if (find(nodes_to_check.begin(), nodes_to_check.end(), node) == nodes_to_check.end()) + { + nodes_to_check.push_back(y.get_node_shared_ptr()); + } } while (nodes_to_check.size() > 0) @@ -118,9 +115,9 @@ autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs) auto node = nodes_to_check.front(); nodes_to_check.pop_front(); // Look for nodes that will be available when this node is done - for (auto input : node->inputs()) + for (auto value : node->input_values()) { - auto input_source_node = input.get_source_output().get_node_shared_ptr(); + auto input_source_node = value.get_node_shared_ptr(); auto count_it = parent_counts.find(input_source_node); count_it->second--; if (0 == count_it->second) @@ -128,43 +125,57 @@ autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs) nodes_to_check.push_front(input_source_node); } } - OutputVector deltas = get(node); - NodeVector delta_nodes; - for (auto delta : deltas) + OutputVector deltas = m_adjoint_map[node.get()]; + for (size_t i = 0; i < node->get_output_size(); ++i) { - delta_nodes.push_back(get_output_element(delta)); + auto& delta = deltas[i]; + if (delta == Output()) + { + delta = make_broadcast_zero(node->output(i)); + } } - node->generate_adjoints(*this, delta_nodes); + node->generate_adjoints(*this, deltas); } } -const OutputVector& autodiff::Adjoints::get(const Output& x) +Output autodiff::Adjoints::backprop_output(const Output& x) { - auto adjoint_it = m_adjoint_map.find(x.get_node()); + auto node = x.get_node(); + auto adjoint_it = m_adjoint_map.find(node); + Output result; + OutputVector deltas; if (m_adjoint_map.end() == adjoint_it) { - adjoint_it = - m_adjoint_map.insert({x.get_node(), make_zeros(x.get_node_shared_ptr())}).first; + deltas = OutputVector(node->get_output_size()); + m_adjoint_map[node] = deltas; + } + else + { + deltas = adjoint_it->second; + } + if (deltas.at(x.get_index()) == Output()) + { + deltas.at(x.get_index()) = make_broadcast_zero(x); } - return adjoint_it->second; + return deltas.at(x.get_index()); } -void autodiff::Adjoints::add_delta(const Output& x, - const Output& delta, - size_t output_index) +void autodiff::Adjoints::add_delta(const Output& x, const Output& delta) { auto adjoint_it = m_adjoint_map.find(x.get_node()); - if (m_adjoint_map.end() == adjoint_it) + if (adjoint_it == m_adjoint_map.end()) { - auto zeros = make_zeros(x.get_node_shared_ptr()); - zeros.at(output_index) = delta; - m_adjoint_map.insert({x.get_node(), zeros}); + m_adjoint_map[x.get_node()] = OutputVector(x.get_node()->get_output_size()); + adjoint_it = m_adjoint_map.find(x.get_node()); + } + auto& deltas = adjoint_it->second[x.get_index()]; + if (deltas == Output()) + { + deltas = delta; } else { - auto& deltas = adjoint_it->second; - deltas.at(output_index) = std::make_shared(deltas.at(output_index), delta); - adjoint_it->second = deltas; + deltas = std::make_shared(deltas, delta); } } @@ -183,33 +194,21 @@ void autodiff::Adjoints::add_delta_to_slice(const Output& x, } auto adjoint_it = m_adjoint_map.find(x.get_node()); - if (m_adjoint_map.end() == adjoint_it) + auto& deltas = adjoint_it->second[x.get_index()]; + if (deltas == Output()) { auto zero = make_broadcast_zero(x); - OutputVector zeros{ - std::make_shared(zero, delta, lower_bounds, upper_bounds, strides)}; - m_adjoint_map.insert({x.get_node(), zeros}); + deltas = + std::make_shared(zero, delta, lower_bounds, upper_bounds, strides); } else { - auto& deltas = adjoint_it->second; - deltas.at(0) = std::make_shared( - deltas.at(0), + deltas = std::make_shared( + deltas, std::make_shared( - std::make_shared(deltas.at(0), lower_bounds, upper_bounds, strides), - delta), + std::make_shared(deltas, lower_bounds, upper_bounds, strides), delta), lower_bounds, upper_bounds, strides); } } - -std::shared_ptr autodiff::Adjoints::backprop_node(const Output& x) -{ - return get_output_element(backprop_output(x)); -} - -Output autodiff::Adjoints::backprop_output(const Output& x) -{ - return get(x).at(x.get_index()); -} diff --git a/src/ngraph/autodiff/adjoints.hpp b/src/ngraph/autodiff/adjoints.hpp index 15d4faec020..a9cb4386e77 100644 --- a/src/ngraph/autodiff/adjoints.hpp +++ b/src/ngraph/autodiff/adjoints.hpp @@ -50,18 +50,11 @@ namespace ngraph Adjoints& operator=(const Adjoints& adjoints) = default; Adjoints() = default; - /// \brief (dy/dx)(c) - /// - /// \param x The node whose adjoint is desired. - const OutputVector& get(const Output& x); - /// \brief Add a backprop contribution to x's adjoint /// /// \param x The adjoint node /// \param delta A backprop contribution - void add_delta(const Output& x, - const Output& delta, - size_t output_index = 0); + void add_delta(const Output& x, const Output& delta); /// \brief Add a backprop contribution to a slice of x's adjoint /// @@ -76,7 +69,9 @@ namespace ngraph const Coordinate& upper_bounds, const Strides& strides); - std::shared_ptr backprop_node(const Output& x); + /// \brief (dy/dx)(c) + /// + /// \param x The output whose adjoint is desired. Output backprop_output(const Output& x); protected: diff --git a/src/ngraph/frontend/fluid/CMakeLists.txt b/src/ngraph/frontend/fluid/CMakeLists.txt index 772a562ff1d..8c1cc9aa2db 100644 --- a/src/ngraph/frontend/fluid/CMakeLists.txt +++ b/src/ngraph/frontend/fluid/CMakeLists.txt @@ -16,6 +16,8 @@ # Add files here target_sources (ngraph PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/operators/matmul.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/operators/matmul.cpp ${CMAKE_CURRENT_SOURCE_DIR}/operators/pool.cpp ${CMAKE_CURRENT_SOURCE_DIR}/operators/pool.hpp ${CMAKE_CURRENT_SOURCE_DIR}/operators/reduce_sum.cpp diff --git a/src/ngraph/frontend/fluid/operators/matmul.cpp b/src/ngraph/frontend/fluid/operators/matmul.cpp new file mode 100644 index 00000000000..7c1d4c6a9bf --- /dev/null +++ b/src/ngraph/frontend/fluid/operators/matmul.cpp @@ -0,0 +1,123 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** +#include "ngraph/frontend/fluid/operators/matmul.hpp" +#include +#include +#include "ngraph/builder/matmul_factory.hpp" +#include "ngraph/builder/reshape.hpp" +#include "ngraph/op/reshape.hpp" + +using namespace std; +using namespace ngraph; + +constexpr NodeTypeInfo fluid::MatMul::type_info; +fluid::MatMul::MatMul(const Output& A, + const Output& B, + const bool& transpose_a, + const bool& transpose_b) + : FusedOp(OutputVector{A, B}) + , m_transpose_a{transpose_a} + , m_transpose_b{transpose_b} +{ + constructor_validate_and_infer_types(); +} + +template +void DecomposeLogic(Input& input, bool transpose, bool reverse = false) +{ + auto rank = input.get_shape().size(); + if (rank < 2) + { + if (rank) + { + if (reverse) + { + input = + make_shared(input, AxisVector{0}, Shape{input.get_shape()[0], 1}); + } + else + { + input = + make_shared(input, AxisVector{0}, Shape{1, input.get_shape()[0]}); + } + } + else + { + input = make_shared(input, AxisVector{}, Shape{1, 1}); + } + rank = 2; + } + if (transpose) + { + vector axes_order(rank); + iota(axes_order.begin(), axes_order.end(), 0); + swap(axes_order[rank - 1], axes_order[rank - 2]); + input = builder::reorder_axes(input, axes_order); + } +} + +inline NodeVector remove_1(std::shared_ptr input_node) +{ + auto input_shape = input_node->get_shape(); + AxisVector axis(input_shape.size()); + iota(axis.begin(), axis.end(), 0); + Shape shape(input_shape.begin(), input_shape.end()); + auto b_remove = std::remove(shape.begin(), shape.end(), 1); + shape.erase(b_remove, shape.end()); + Output node(input_node); + auto reshape = make_shared(node, axis, shape); + NodeVector final_vector{reshape}; + return final_vector; +} + +void fluid::MatMul::pre_validate_and_infer_types() +{ + element::Type input_element_type = get_input_element_type(0); + NODE_VALIDATION_CHECK(this, + input_element_type.is_dynamic() || input_element_type.is_real(), + "Argument element type must be f16, bf16, f32, f64 or dynamic (got ", + input_element_type, + ")."); + if (is_dynamic()) + { + set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + } +} + +NodeVector fluid::MatMul::decompose_op() const +{ + auto A = input_value(0); + auto B = input_value(1); + DecomposeLogic(A, m_transpose_a); + DecomposeLogic(B, m_transpose_b, true); + builder::MatmulFactory factory({A, B}); + auto node_vector_matmul = factory.make_matmul_op(); + auto first_item_node_vector = node_vector_matmul[0]; + auto b = first_item_node_vector->get_shape().begin(); + auto e = first_item_node_vector->get_shape().end(); + auto it = std::find(b, e, 1); + if (it != e) + { + node_vector_matmul = remove_1(first_item_node_vector); + } + return node_vector_matmul; +} + +shared_ptr fluid::MatMul::copy_with_new_args(const NodeVector& new_args) const +{ + check_new_args_count(this, new_args); + return make_shared(new_args.at(0), new_args.at(1), m_transpose_a, m_transpose_b); +} diff --git a/src/ngraph/frontend/fluid/operators/matmul.hpp b/src/ngraph/frontend/fluid/operators/matmul.hpp new file mode 100644 index 00000000000..0725a7bbe54 --- /dev/null +++ b/src/ngraph/frontend/fluid/operators/matmul.hpp @@ -0,0 +1,58 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +namespace ngraph +{ + namespace fluid + { + /// \brief Operator performing Matrix Multiplication. + class NGRAPH_API MatMul : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"MatMul", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + MatMul() = default; + /// \brief Constructs an ScaleShift operation. + /// + /// \param A Matrix A + /// \param B Matrix B + /// \param transpose_a If matrix A should be transposed. + /// \param transpose_b If matrix B should be transposed. + MatMul(const Output& A, + const Output& B, + const bool& transpose_a = 0, + const bool& transpose_b = 0); + + virtual NodeVector decompose_op() const override; + void pre_validate_and_infer_types() override; + + virtual std::shared_ptr + copy_with_new_args(const NodeVector& new_args) const override; + + bool get_transpose_a() const { return m_transpose_a; } + bool get_transpose_b() const { return m_transpose_b; } + private: + bool m_transpose_a; + bool m_transpose_b; + }; + } // namespace fluid +} // namespace ngraph diff --git a/src/ngraph/frontend/onnx_import/CMakeLists.txt b/src/ngraph/frontend/onnx_import/CMakeLists.txt index 8e92eb01414..d7522687ecd 100644 --- a/src/ngraph/frontend/onnx_import/CMakeLists.txt +++ b/src/ngraph/frontend/onnx_import/CMakeLists.txt @@ -37,12 +37,14 @@ add_library(onnx_import STATIC core/operator_set.hpp core/tensor.hpp core/value_info.hpp + default_opset.hpp exceptions.cpp exceptions.hpp op/acos.hpp op/acosh.cpp op/acosh.hpp op/add.hpp + op/add.cpp op/and.hpp op/argmax.cpp op/argmax.hpp @@ -96,6 +98,8 @@ add_library(onnx_import STATIC op/flatten.hpp op/floor.hpp op/gather.hpp + op/gather_nd.hpp + op/gather_nd.cpp op/gemm.cpp op/gemm.hpp op/global_average_pool.cpp diff --git a/src/ngraph/frontend/onnx_import/default_opset.hpp b/src/ngraph/frontend/onnx_import/default_opset.hpp new file mode 100644 index 00000000000..481c9cc4a2b --- /dev/null +++ b/src/ngraph/frontend/onnx_import/default_opset.hpp @@ -0,0 +1,9 @@ +#include "ngraph/opsets/opset1.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace default_opset = ngraph::opset1; + } +} diff --git a/src/ngraph/frontend/onnx_import/op/abs.hpp b/src/ngraph/frontend/onnx_import/op/abs.hpp index bd4014d8cd7..9f064d97136 100644 --- a/src/ngraph/frontend/onnx_import/op/abs.hpp +++ b/src/ngraph/frontend/onnx_import/op/abs.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/abs.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector abs(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/acos.hpp b/src/ngraph/frontend/onnx_import/op/acos.hpp index 68bbd3fd207..6fbe57626b9 100644 --- a/src/ngraph/frontend/onnx_import/op/acos.hpp +++ b/src/ngraph/frontend/onnx_import/op/acos.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/acos.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector acos(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/acosh.cpp b/src/ngraph/frontend/onnx_import/op/acosh.cpp index e79b867703c..f01eb600d6f 100644 --- a/src/ngraph/frontend/onnx_import/op/acosh.cpp +++ b/src/ngraph/frontend/onnx_import/op/acosh.cpp @@ -17,11 +17,9 @@ #include #include "acosh.hpp" +#include "default_opset.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/log.hpp" #include "ngraph/op/multiply.hpp" -#include "ngraph/op/sqrt.hpp" #include "ngraph/op/subtract.hpp" #include "ngraph/shape.hpp" @@ -42,15 +40,15 @@ namespace ngraph // arccosh(x) = ln(x + sqrt(x^2 - 1)) // - std::shared_ptr one_node{ngraph::op::Constant::create( + std::shared_ptr one_node{default_opset::Constant::create( data->get_element_type(), data->get_shape(), std::vector(ngraph::shape_size(data->get_shape()), 1.f))}; std::shared_ptr sqrt_node{ - std::make_shared(data * data - one_node)}; + std::make_shared(data * data - one_node)}; - return {std::make_shared(data + sqrt_node)}; + return {std::make_shared(data + sqrt_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/add.cpp b/src/ngraph/frontend/onnx_import/op/add.cpp new file mode 100644 index 00000000000..1b5425e8cc5 --- /dev/null +++ b/src/ngraph/frontend/onnx_import/op/add.cpp @@ -0,0 +1,59 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "add.hpp" +#include "default_opset.hpp" +#include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + NodeVector add(const Node& node) + { + auto left_rank = node.get_ng_inputs().at(0)->get_shape().size(); + auto right_rank = node.get_ng_inputs().at(1)->get_shape().size(); + auto axis = + node.get_attribute_value("axis", left_rank - right_rank); + NodeVector ng_inputs{ngraph::op::legacy_style_broadcast_for_binary_operation( + node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), axis)}; + + return { + std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; + } + + } // namespace set_1 + + namespace set_7 + { + NodeVector add(const Node& node) + { + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; + } + + } // namespace set_7 + + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/src/ngraph/frontend/onnx_import/op/add.hpp b/src/ngraph/frontend/onnx_import/op/add.hpp index b26793419e4..aa4b3952595 100644 --- a/src/ngraph/frontend/onnx_import/op/add.hpp +++ b/src/ngraph/frontend/onnx_import/op/add.hpp @@ -20,8 +20,6 @@ #include "core/node.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/util/broadcasting.hpp" namespace ngraph { @@ -31,27 +29,13 @@ namespace ngraph { namespace set_1 { - inline NodeVector add(const Node& node) - { - auto left_rank = node.get_ng_inputs().at(0)->get_shape().size(); - auto right_rank = node.get_ng_inputs().at(1)->get_shape().size(); - auto axis = - node.get_attribute_value("axis", left_rank - right_rank); - NodeVector ng_inputs{ngraph::op::legacy_style_broadcast_for_binary_operation( - node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), axis)}; - - return {std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; - } + NodeVector add(const Node& node); } // namespace set_1 namespace set_7 { - inline NodeVector add(const Node& node) - { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; - } + NodeVector add(const Node& node); } // namespace set_7 diff --git a/src/ngraph/frontend/onnx_import/op/and.hpp b/src/ngraph/frontend/onnx_import/op/and.hpp index 9f8574c3417..14a9529b168 100644 --- a/src/ngraph/frontend/onnx_import/op/and.hpp +++ b/src/ngraph/frontend/onnx_import/op/and.hpp @@ -19,6 +19,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/and.hpp" @@ -32,7 +33,7 @@ namespace ngraph { inline NodeVector logical_and(const Node& node) { - return {std::make_shared( + return {std::make_shared( node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; } diff --git a/src/ngraph/frontend/onnx_import/op/argmax.cpp b/src/ngraph/frontend/onnx_import/op/argmax.cpp index ee5bd154227..09b919719df 100644 --- a/src/ngraph/frontend/onnx_import/op/argmax.cpp +++ b/src/ngraph/frontend/onnx_import/op/argmax.cpp @@ -17,7 +17,7 @@ #include "argmax.hpp" #include "core/node.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/argmax.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/reduction.hpp" namespace ngraph @@ -30,7 +30,7 @@ namespace ngraph { NodeVector argmax(const Node& node) { - return {reduction::make_ng_index_reduction_op(node)}; + return {reduction::make_ng_index_reduction_op(node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/argmin.cpp b/src/ngraph/frontend/onnx_import/op/argmin.cpp index 33d6dc288a1..651c717bf73 100644 --- a/src/ngraph/frontend/onnx_import/op/argmin.cpp +++ b/src/ngraph/frontend/onnx_import/op/argmin.cpp @@ -17,7 +17,7 @@ #include "argmin.hpp" #include "core/node.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/argmin.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/reduction.hpp" namespace ngraph @@ -30,7 +30,7 @@ namespace ngraph { NodeVector argmin(const Node& node) { - return {reduction::make_ng_index_reduction_op(node)}; + return {reduction::make_ng_index_reduction_op(node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/asin.hpp b/src/ngraph/frontend/onnx_import/op/asin.hpp index 4bc01b4ad20..38222862c8e 100644 --- a/src/ngraph/frontend/onnx_import/op/asin.hpp +++ b/src/ngraph/frontend/onnx_import/op/asin.hpp @@ -19,6 +19,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/asin.hpp" @@ -32,7 +33,7 @@ namespace ngraph { inline NodeVector asin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/asinh.cpp b/src/ngraph/frontend/onnx_import/op/asinh.cpp index 80f967e3c9e..9eed3112e85 100644 --- a/src/ngraph/frontend/onnx_import/op/asinh.cpp +++ b/src/ngraph/frontend/onnx_import/op/asinh.cpp @@ -17,11 +17,9 @@ #include #include "asinh.hpp" +#include "default_opset.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/log.hpp" #include "ngraph/op/multiply.hpp" -#include "ngraph/op/sqrt.hpp" #include "ngraph/shape.hpp" namespace ngraph @@ -41,15 +39,15 @@ namespace ngraph // asinh(x) = ln(x + sqrt(x^2 + 1)) // - std::shared_ptr one_node{ngraph::op::Constant::create( + std::shared_ptr one_node{default_opset::Constant::create( data->get_element_type(), data->get_shape(), std::vector(ngraph::shape_size(data->get_shape()), 1.f))}; std::shared_ptr sqrt_node{ - std::make_shared(data * data + one_node)}; + std::make_shared(data * data + one_node)}; - return {std::make_shared(data + sqrt_node)}; + return {std::make_shared(data + sqrt_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/atan.hpp b/src/ngraph/frontend/onnx_import/op/atan.hpp index 5396e601c49..d1af78bc3fd 100644 --- a/src/ngraph/frontend/onnx_import/op/atan.hpp +++ b/src/ngraph/frontend/onnx_import/op/atan.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/atan.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector atan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/atanh.cpp b/src/ngraph/frontend/onnx_import/op/atanh.cpp index e46316af02c..137df9cddc9 100644 --- a/src/ngraph/frontend/onnx_import/op/atanh.cpp +++ b/src/ngraph/frontend/onnx_import/op/atanh.cpp @@ -17,9 +17,8 @@ #include #include "atanh.hpp" +#include "default_opset.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/log.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/subtract.hpp" #include "ngraph/shape.hpp" @@ -42,18 +41,18 @@ namespace ngraph // = 0.5 * (ln(1 + x) - ln(1 - x)) // - std::shared_ptr one_node{ngraph::op::Constant::create( + std::shared_ptr one_node{default_opset::Constant::create( data->get_element_type(), data->get_shape(), std::vector(ngraph::shape_size(data->get_shape()), 1.f))}; - std::shared_ptr half_node{ngraph::op::Constant::create( + std::shared_ptr half_node{default_opset::Constant::create( data->get_element_type(), data->get_shape(), std::vector(ngraph::shape_size(data->get_shape()), 0.5f))}; - return {half_node * (std::make_shared(one_node + data) - - std::make_shared(one_node - data))}; + return {half_node * (std::make_shared(one_node + data) - + std::make_shared(one_node - data))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/batch_norm.cpp b/src/ngraph/frontend/onnx_import/op/batch_norm.cpp index 277c5e2983a..86fcf9e9bb6 100644 --- a/src/ngraph/frontend/onnx_import/op/batch_norm.cpp +++ b/src/ngraph/frontend/onnx_import/op/batch_norm.cpp @@ -19,8 +19,9 @@ #include "batch_norm.hpp" #include "core/null_node.hpp" +#include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/op/batch_norm.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -56,7 +57,7 @@ namespace ngraph { mean = inputs.at(3); var = inputs.at(4); - return {std::make_shared( + return {std::make_shared( x, scale, bias, mean, var, epsilon), after_bn_mean, after_bn_var, @@ -64,12 +65,12 @@ namespace ngraph saved_var}; } - return { - std::make_shared(x, scale, bias, epsilon), - after_bn_mean, - after_bn_var, - saved_mean, - saved_var}; + return {std::make_shared( + x, scale, bias, epsilon), + after_bn_mean, + after_bn_var, + saved_mean, + saved_var}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/cast.cpp b/src/ngraph/frontend/onnx_import/op/cast.cpp index 760168830fc..43f6ee375c1 100644 --- a/src/ngraph/frontend/onnx_import/op/cast.cpp +++ b/src/ngraph/frontend/onnx_import/op/cast.cpp @@ -16,7 +16,7 @@ #include #include "cast.hpp" -#include "ngraph/op/convert.hpp" +#include "default_opset.hpp" #include "ngraph/type/element_type.hpp" #include "utils/common.hpp" @@ -34,7 +34,7 @@ namespace ngraph int64_t target_type = node.get_attribute_value("to"); element::Type elem_type = common::get_ngraph_element_type(target_type); - return {std::make_shared(data, elem_type)}; + return {std::make_shared(data, elem_type)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/ceil.hpp b/src/ngraph/frontend/onnx_import/op/ceil.hpp index cb04bf5c123..5ff6935f652 100644 --- a/src/ngraph/frontend/onnx_import/op/ceil.hpp +++ b/src/ngraph/frontend/onnx_import/op/ceil.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/ceiling.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector ceil(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/clip.cpp b/src/ngraph/frontend/onnx_import/op/clip.cpp index 2681a22d37d..2c072d7642a 100644 --- a/src/ngraph/frontend/onnx_import/op/clip.cpp +++ b/src/ngraph/frontend/onnx_import/op/clip.cpp @@ -18,12 +18,9 @@ #include #include "clip.hpp" +#include "default_opset.hpp" #include "ngraph/builder/make_constant.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/clamp.hpp" -#include "ngraph/op/maximum.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/reshape.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -43,7 +40,7 @@ namespace ngraph const double min_value = node.get_attribute_value( "min", std::numeric_limits::lowest()); - return {std::make_shared(data, min_value, max_value)}; + return {std::make_shared(data, min_value, max_value)}; } } // namespace set_1 @@ -83,12 +80,12 @@ namespace ngraph data_type, data_shape, std::numeric_limits::max()); } - auto max_of_min_and_data = std::make_shared( + auto max_of_min_and_data = std::make_shared( min, data, ngraph::op::AutoBroadcastSpec(ngraph::op::AutoBroadcastType::NUMPY)); - return {std::make_shared( + return {std::make_shared( max, max_of_min_and_data, ngraph::op::AutoBroadcastSpec(ngraph::op::AutoBroadcastType::NUMPY))}; diff --git a/src/ngraph/frontend/onnx_import/op/concat.cpp b/src/ngraph/frontend/onnx_import/op/concat.cpp index ae7c678c925..00c9ab0c193 100644 --- a/src/ngraph/frontend/onnx_import/op/concat.cpp +++ b/src/ngraph/frontend/onnx_import/op/concat.cpp @@ -17,6 +17,7 @@ #include #include "concat.hpp" +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/op/concat.hpp" #include "utils/common.hpp" @@ -36,7 +37,7 @@ namespace ngraph size_t valid_axis = common::validate_axis(node, axis, inputs.at(0)->get_shape().size()); - return {std::make_shared(inputs, valid_axis)}; + return {std::make_shared(inputs, valid_axis)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/constant.cpp b/src/ngraph/frontend/onnx_import/op/constant.cpp index 093a7746863..5159c4b482d 100644 --- a/src/ngraph/frontend/onnx_import/op/constant.cpp +++ b/src/ngraph/frontend/onnx_import/op/constant.cpp @@ -16,6 +16,7 @@ #include "constant.hpp" #include "core/tensor.hpp" +#include "default_opset.hpp" #include "ngraph/op/constant.hpp" namespace ngraph @@ -29,98 +30,99 @@ namespace ngraph namespace { template - inline std::shared_ptr + inline std::shared_ptr __make_ng_constant(const element::Type& type, const Tensor& tensor) { - return std::make_shared( + return std::make_shared( type, tensor.get_shape(), tensor.get_data()); } template - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { throw error::tensor::unsupported_data_type{tensor}; } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::f16, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::f32, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::f64, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::i8, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::i16, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::i32, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::i64, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::u8, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::u16, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::u32, tensor); } template <> - inline std::shared_ptr + inline std::shared_ptr make_ng_constant(const Tensor& tensor) { return __make_ng_constant(element::u64, tensor); } - inline std::shared_ptr make_constant(const Tensor& tensor) + inline std::shared_ptr + make_constant(const Tensor& tensor) { #define MAKE_NG_CONSTANT(data_type_) \ case data_type_: return make_ng_constant(tensor) diff --git a/src/ngraph/frontend/onnx_import/op/conv.cpp b/src/ngraph/frontend/onnx_import/op/conv.cpp index fec8e3a63d5..8f24d1d1abf 100644 --- a/src/ngraph/frontend/onnx_import/op/conv.cpp +++ b/src/ngraph/frontend/onnx_import/op/conv.cpp @@ -19,17 +19,14 @@ #include #include "conv.hpp" +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/builder/reshape.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convolution.hpp" #include "ngraph/op/fused/group_conv.hpp" #include "ngraph/op/slice.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/convpool.hpp" namespace ngraph @@ -61,7 +58,7 @@ namespace ngraph auto reshaped_filters = ngraph::builder::reshape(filters, filters_shape); - return std::make_shared( + return std::make_shared( data, reshaped_filters, strides, @@ -72,13 +69,13 @@ namespace ngraph } else { - return std::make_shared(data, - filters, - strides, - padding_below, - padding_above, - dilations, - auto_pad); + return std::make_shared(data, + filters, + strides, + padding_below, + padding_above, + dilations, + auto_pad); } } @@ -134,11 +131,11 @@ namespace ngraph auto bias = inputs.at(2); const Shape& new_shape = conv_node->get_shape(); - auto broadcasted_bias = std::make_shared( + auto broadcasted_bias = std::make_shared( bias, new_shape, ngraph::op::calculate_broadcast_axes(new_shape, bias->get_shape(), 1)); - return {std::make_shared(conv_node, broadcasted_bias)}; + return {std::make_shared(conv_node, broadcasted_bias)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/conv_integer.cpp b/src/ngraph/frontend/onnx_import/op/conv_integer.cpp index 08aca6ed2ef..6a9777393ec 100644 --- a/src/ngraph/frontend/onnx_import/op/conv_integer.cpp +++ b/src/ngraph/frontend/onnx_import/op/conv_integer.cpp @@ -15,11 +15,11 @@ //***************************************************************************** #include "conv_integer.hpp" +#include "exceptions.hpp" #include "ngraph/builder/make_constant.hpp" -#include "ngraph/frontend/onnx_import/exceptions.hpp" -#include "ngraph/frontend/onnx_import/utils/convpool.hpp" -#include "ngraph/op/quantized_convolution.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset0.hpp" +#include "utils/convpool.hpp" using namespace ngraph::builder; @@ -66,7 +66,7 @@ namespace ngraph if (num_inputs == 2) { - return {std::make_shared( + return {std::make_shared( input, filters, window_movement_strides, @@ -92,7 +92,7 @@ namespace ngraph filters_zero_point = inputs.at(3); } - return {std::make_shared( + return {std::make_shared( input, filters, window_movement_strides, diff --git a/src/ngraph/frontend/onnx_import/op/conv_transpose.cpp b/src/ngraph/frontend/onnx_import/op/conv_transpose.cpp index e52bfade8f2..52acc393fef 100644 --- a/src/ngraph/frontend/onnx_import/op/conv_transpose.cpp +++ b/src/ngraph/frontend/onnx_import/op/conv_transpose.cpp @@ -27,6 +27,7 @@ #include "ngraph/op/fused/group_conv_transpose.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" #include "utils/convpool.hpp" @@ -84,7 +85,7 @@ namespace ngraph std::shared_ptr conv_node; if (!output_shape.empty()) { - conv_node = std::make_shared( + conv_node = std::make_shared( data, filters, strides, @@ -95,7 +96,7 @@ namespace ngraph } else { - conv_node = std::make_shared( + conv_node = std::make_shared( data, filters, strides, @@ -115,7 +116,7 @@ namespace ngraph auto bias = inputs.at(2); - return {std::make_shared( + return {std::make_shared( conv_node, ngraph::op::make_broadcast_node(bias, conv_node->get_shape(), 1))}; } diff --git a/src/ngraph/frontend/onnx_import/op/cum_sum.cpp b/src/ngraph/frontend/onnx_import/op/cum_sum.cpp index 6aaff508bc3..7bb6e2b313a 100644 --- a/src/ngraph/frontend/onnx_import/op/cum_sum.cpp +++ b/src/ngraph/frontend/onnx_import/op/cum_sum.cpp @@ -17,8 +17,8 @@ #include #include "cum_sum.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/cum_sum.hpp" +#include "default_opset.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -42,10 +42,11 @@ namespace ngraph } else { - axis = ngraph::op::Constant::create(element::i64, Shape{}, {0}); // default + axis = + default_opset::Constant::create(element::i64, Shape{}, {0}); // default } return NodeVector{ - std::make_shared(data, axis, exclusive, reverse)}; + std::make_shared(data, axis, exclusive, reverse)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/depth_to_space.cpp b/src/ngraph/frontend/onnx_import/op/depth_to_space.cpp index 3da4d59dc42..db3685780de 100644 --- a/src/ngraph/frontend/onnx_import/op/depth_to_space.cpp +++ b/src/ngraph/frontend/onnx_import/op/depth_to_space.cpp @@ -15,7 +15,7 @@ //***************************************************************************** #include "depth_to_space.hpp" -#include "ngraph/op/fused/depth_to_space.hpp" +#include "default_opset.hpp" namespace ngraph { @@ -30,11 +30,12 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); const auto mode = node.get_attribute_value("mode", "DCR"); const auto ngraph_mode = - (mode == "DCR") ? ngraph::op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST - : ngraph::op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; + (mode == "DCR") + ? default_opset::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST + : default_opset::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST; const auto block_size = node.get_attribute_value("blocksize"); - return NodeVector{ - std::make_shared(data, ngraph_mode, block_size)}; + return NodeVector{std::make_shared( + data, ngraph_mode, block_size)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/dequantize_linear.cpp b/src/ngraph/frontend/onnx_import/op/dequantize_linear.cpp index b219ecf18c0..5ad61fdd6fb 100644 --- a/src/ngraph/frontend/onnx_import/op/dequantize_linear.cpp +++ b/src/ngraph/frontend/onnx_import/op/dequantize_linear.cpp @@ -17,11 +17,13 @@ #include #include +#include "default_opset.hpp" #include "dequantize_linear.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/op/convert.hpp" #include "ngraph/op/dequantize.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" namespace ngraph @@ -73,11 +75,11 @@ namespace ngraph if (x->get_element_type() != zero_point->get_element_type()) { - zero_point = std::make_shared(zero_point, - x->get_element_type()); + zero_point = std::make_shared( + zero_point, x->get_element_type()); } - return {std::make_shared( + return {std::make_shared( x, x_scale, zero_point, x_scale->get_element_type(), axes)}; } diff --git a/src/ngraph/frontend/onnx_import/op/div.hpp b/src/ngraph/frontend/onnx_import/op/div.hpp index a50ca56d534..261a3ebb2f7 100644 --- a/src/ngraph/frontend/onnx_import/op/div.hpp +++ b/src/ngraph/frontend/onnx_import/op/div.hpp @@ -19,9 +19,10 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/divide.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -40,7 +41,8 @@ namespace ngraph NodeVector ng_inputs{ngraph::op::legacy_style_broadcast_for_binary_operation( node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), axis)}; - return {std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; + return { + std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; } } // namespace set_1 @@ -49,8 +51,8 @@ namespace ngraph { inline NodeVector div(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/elu.cpp b/src/ngraph/frontend/onnx_import/op/elu.cpp index 61f4ebd4c44..195694dd315 100644 --- a/src/ngraph/frontend/onnx_import/op/elu.cpp +++ b/src/ngraph/frontend/onnx_import/op/elu.cpp @@ -17,9 +17,8 @@ #include #include +#include "default_opset.hpp" #include "elu.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/elu.hpp" namespace ngraph { @@ -34,7 +33,7 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); double alpha = node.get_attribute_value("alpha", 1); - return NodeVector{std::make_shared(data, alpha)}; + return NodeVector{std::make_shared(data, alpha)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/equal.hpp b/src/ngraph/frontend/onnx_import/op/equal.hpp index ae7af50db76..255f25e4508 100644 --- a/src/ngraph/frontend/onnx_import/op/equal.hpp +++ b/src/ngraph/frontend/onnx_import/op/equal.hpp @@ -19,9 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/equal.hpp" -#include "ngraph/op/util/broadcasting.hpp" namespace ngraph { @@ -33,8 +32,8 @@ namespace ngraph { inline NodeVector equal(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/erf.hpp b/src/ngraph/frontend/onnx_import/op/erf.hpp index 8c1c2e18951..2684c573d62 100644 --- a/src/ngraph/frontend/onnx_import/op/erf.hpp +++ b/src/ngraph/frontend/onnx_import/op/erf.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/erf.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector erf(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/exp.hpp b/src/ngraph/frontend/onnx_import/op/exp.hpp index 64b27cc90b6..cdd0cdd1c09 100644 --- a/src/ngraph/frontend/onnx_import/op/exp.hpp +++ b/src/ngraph/frontend/onnx_import/op/exp.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/exp.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector exp(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/expand.cpp b/src/ngraph/frontend/onnx_import/op/expand.cpp index 17886fff0bb..888fc5d5d51 100644 --- a/src/ngraph/frontend/onnx_import/op/expand.cpp +++ b/src/ngraph/frontend/onnx_import/op/expand.cpp @@ -18,10 +18,10 @@ #include #include +#include "default_opset.hpp" #include "expand.hpp" #include "ngraph/descriptor/output.hpp" #include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/experimental/dyn_broadcast.hpp" #include "ngraph/op/experimental/dyn_reshape.hpp" #include "ngraph/op/experimental/range.hpp" @@ -46,7 +46,8 @@ namespace ngraph "Ngraph does not support dynamic braodcasting for Expand op."); std::vector shape_vector = - ngraph::as_type_ptr(shape)->get_vector(); + ngraph::as_type_ptr(shape) + ->get_vector(); const ngraph::Shape shape_shape{shape_vector}; return {ngraph::op::numpy_style_broadcast(data, shape_shape)}; diff --git a/src/ngraph/frontend/onnx_import/op/floor.hpp b/src/ngraph/frontend/onnx_import/op/floor.hpp index 91c22126b0e..ee506281956 100644 --- a/src/ngraph/frontend/onnx_import/op/floor.hpp +++ b/src/ngraph/frontend/onnx_import/op/floor.hpp @@ -19,6 +19,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/floor.hpp" @@ -32,7 +33,7 @@ namespace ngraph { inline NodeVector floor(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/gather.hpp b/src/ngraph/frontend/onnx_import/op/gather.hpp index bd07e23014e..491e37d10ff 100644 --- a/src/ngraph/frontend/onnx_import/op/gather.hpp +++ b/src/ngraph/frontend/onnx_import/op/gather.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/gather.hpp" #include "utils/common.hpp" namespace ngraph @@ -39,7 +39,10 @@ namespace ngraph auto axis = node.get_attribute_value("axis", 0); auto valid_axis = common::validate_axis(node, axis, data->get_shape().size()); - return {std::make_shared(data, indices, valid_axis)}; + return {std::make_shared( + data, + indices, + default_opset::Constant::create(element::i64, Shape{}, {valid_axis}))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/gather_nd.cpp b/src/ngraph/frontend/onnx_import/op/gather_nd.cpp new file mode 100644 index 00000000000..14de7da01e4 --- /dev/null +++ b/src/ngraph/frontend/onnx_import/op/gather_nd.cpp @@ -0,0 +1,43 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "ngraph/opsets/opset0.hpp" +#include "utils/common.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + NodeVector gather_nd(const Node& node) + { + NodeVector ng_inputs{node.get_ng_inputs()}; + auto data = ng_inputs.at(0); + auto indices = ng_inputs.at(1); + + return {std::make_shared(data, indices)}; + } + + } // namespace set_1 + + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/src/ngraph/frontend/onnx_import/op/gather_nd.hpp b/src/ngraph/frontend/onnx_import/op/gather_nd.hpp new file mode 100644 index 00000000000..8ea8730a137 --- /dev/null +++ b/src/ngraph/frontend/onnx_import/op/gather_nd.hpp @@ -0,0 +1,38 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "core/node.hpp" +#include "ngraph/node.hpp" + +namespace ngraph +{ + namespace onnx_import + { + namespace op + { + namespace set_1 + { + NodeVector gather_nd(const Node& node); + + } // namespace set_1 + + } // namespace op + + } // namespace onnx_import + +} // namespace ngraph diff --git a/src/ngraph/frontend/onnx_import/op/gemm.cpp b/src/ngraph/frontend/onnx_import/op/gemm.cpp index 4a543a1d0e3..31aab45509a 100644 --- a/src/ngraph/frontend/onnx_import/op/gemm.cpp +++ b/src/ngraph/frontend/onnx_import/op/gemm.cpp @@ -16,6 +16,7 @@ #include +#include "default_opset.hpp" #include "gemm.hpp" #include "ngraph/builder/reshape.hpp" #include "ngraph/op/add.hpp" @@ -44,16 +45,16 @@ namespace ngraph } else { - input_c = ngraph::op::Constant::create( + input_c = default_opset::Constant::create( input_b->get_element_type(), ngraph::Shape{}, {0}); } const auto alpha = node.get_attribute_value("alpha", 1); const auto beta = node.get_attribute_value("beta", 1); - const auto alpha_node = ngraph::op::Constant::create( + const auto alpha_node = default_opset::Constant::create( element::Type_t::f32, Shape{}, std::vector{alpha}); - const auto beta_node = ngraph::op::Constant::create( + const auto beta_node = default_opset::Constant::create( element::Type_t::f32, Shape{}, std::vector{beta}); const bool trans_a = node.get_attribute_value("transA", 0); @@ -75,12 +76,12 @@ namespace ngraph auto matmul_node = std::make_shared(input_a, input_b); auto alpha_times_product = - std::make_shared(alpha_node, matmul_node); + std::make_shared(alpha_node, matmul_node); auto beta_times_input_c = - std::make_shared(beta_node, input_c); + std::make_shared(beta_node, input_c); - return NodeVector{std::make_shared(alpha_times_product, - beta_times_input_c)}; + return NodeVector{std::make_shared(alpha_times_product, + beta_times_input_c)}; } } // namespace set_1 @@ -100,31 +101,31 @@ namespace ngraph } else { - input_c = ngraph::op::Constant::create( + input_c = default_opset::Constant::create( input_b->get_element_type(), ngraph::Shape{}, {0}); } const auto alpha = node.get_attribute_value("alpha", 1); const auto beta = node.get_attribute_value("beta", 1); - const auto alpha_node = ngraph::op::Constant::create( + const auto alpha_node = default_opset::Constant::create( element::Type_t::f32, Shape{}, std::vector{alpha}); - const auto beta_node = ngraph::op::Constant::create( + const auto beta_node = default_opset::Constant::create( element::Type_t::f32, Shape{}, std::vector{beta}); const bool trans_a = node.get_attribute_value("transA", 0); const bool trans_b = node.get_attribute_value("transB", 0); auto matmul_node = - std::make_shared(input_a, input_b, trans_a, trans_b); + std::make_shared(input_a, input_b, trans_a, trans_b); auto alpha_times_product = - std::make_shared(alpha_node, matmul_node); + std::make_shared(alpha_node, matmul_node); auto beta_times_input_c = - std::make_shared(beta_node, input_c); + std::make_shared(beta_node, input_c); - return NodeVector{std::make_shared(alpha_times_product, - beta_times_input_c)}; + return NodeVector{std::make_shared(alpha_times_product, + beta_times_input_c)}; } } // namespace set_6 diff --git a/src/ngraph/frontend/onnx_import/op/greater.hpp b/src/ngraph/frontend/onnx_import/op/greater.hpp index 11c5f9248f4..5a98544e19d 100644 --- a/src/ngraph/frontend/onnx_import/op/greater.hpp +++ b/src/ngraph/frontend/onnx_import/op/greater.hpp @@ -19,6 +19,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/greater.hpp" #include "ngraph/op/util/broadcasting.hpp" @@ -33,8 +34,8 @@ namespace ngraph { inline NodeVector greater(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/hard_sigmoid.cpp b/src/ngraph/frontend/onnx_import/op/hard_sigmoid.cpp index 5650103d7ca..17d6b096272 100644 --- a/src/ngraph/frontend/onnx_import/op/hard_sigmoid.cpp +++ b/src/ngraph/frontend/onnx_import/op/hard_sigmoid.cpp @@ -16,11 +16,8 @@ #include +#include "default_opset.hpp" #include "hard_sigmoid.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/hard_sigmoid.hpp" - -using namespace ngraph::op; namespace ngraph { @@ -34,17 +31,17 @@ namespace ngraph { const auto data = node.get_ng_inputs().at(0); - const auto alpha = Constant::create( + const auto alpha = default_opset::Constant::create( data->get_element_type(), Shape{}, std::vector{node.get_attribute_value("alpha", 0.2)}); - const auto beta = Constant::create( + const auto beta = default_opset::Constant::create( data->get_element_type(), Shape{}, std::vector{node.get_attribute_value("beta", 0.5)}); - return {std::make_shared(data, alpha, beta)}; + return {std::make_shared(data, alpha, beta)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/hardmax.cpp b/src/ngraph/frontend/onnx_import/op/hardmax.cpp index e83f594042b..96f76523b34 100644 --- a/src/ngraph/frontend/onnx_import/op/hardmax.cpp +++ b/src/ngraph/frontend/onnx_import/op/hardmax.cpp @@ -18,8 +18,7 @@ #include "exceptions.hpp" #include "ngraph/builder/reshape.hpp" #include "ngraph/frontend/onnx_import/utils/common.hpp" -#include "ngraph/op/argmax.hpp" -#include "ngraph/op/embedding_lookup.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -42,7 +41,7 @@ namespace ngraph const auto& coerced_shape = coerced_tensor->get_shape(); const std::shared_ptr argmax_2d = - std::make_shared(coerced_tensor, 1, element::i64); + std::make_shared(coerced_tensor, 1, element::i64); std::shared_ptr eye_matrix = common::square_identity(coerced_shape.at(1), input->get_element_type()); @@ -50,7 +49,7 @@ namespace ngraph // the results are elements of the eye_matrix indexed by argmax_2d values // in other words: eye_matrix[argmax_2d] auto results = - std::make_shared(argmax_2d, eye_matrix); + std::make_shared(argmax_2d, eye_matrix); return {ngraph::builder::reshape(results, input_shape)}; } diff --git a/src/ngraph/frontend/onnx_import/op/instance_norm.cpp b/src/ngraph/frontend/onnx_import/op/instance_norm.cpp index e0afe45f214..ad06f558ac3 100644 --- a/src/ngraph/frontend/onnx_import/op/instance_norm.cpp +++ b/src/ngraph/frontend/onnx_import/op/instance_norm.cpp @@ -17,18 +17,18 @@ #include #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "instance_norm.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/builder/reduce_ops.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/sqrt.hpp" #include "ngraph/op/subtract.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/common.hpp" namespace ngraph @@ -63,9 +63,9 @@ namespace ngraph common::get_monotonic_range(data->get_shape().size(), 2)}; const std::shared_ptr eps_node = - std::make_shared(data->get_element_type(), - data->get_shape(), - std::vector{epsilon}); + std::make_shared(data->get_element_type(), + data->get_shape(), + std::vector{epsilon}); scale = ngraph::op::legacy_style_broadcast_for_binary_operation(data, scale, 1) .at(1); @@ -73,14 +73,14 @@ namespace ngraph .at(1); std::shared_ptr mean = builder::mean(data, reduction_axes); - mean = std::make_shared( + mean = std::make_shared( mean, data->get_shape(), reduction_axes); std::shared_ptr variance = builder::variance(data, reduction_axes); - variance = std::make_shared( + variance = std::make_shared( variance, data->get_shape(), reduction_axes); - const auto sqrt = std::make_shared(variance + eps_node); + const auto sqrt = std::make_shared(variance + eps_node); return {scale * (data - mean) / sqrt + bias}; } diff --git a/src/ngraph/frontend/onnx_import/op/leaky_relu.cpp b/src/ngraph/frontend/onnx_import/op/leaky_relu.cpp index 119d6493d96..6fdb56111c6 100644 --- a/src/ngraph/frontend/onnx_import/op/leaky_relu.cpp +++ b/src/ngraph/frontend/onnx_import/op/leaky_relu.cpp @@ -16,11 +16,12 @@ #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "leaky_relu.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/maximum.hpp" #include "ngraph/op/multiply.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -39,10 +40,10 @@ namespace ngraph << " alpha value should be in range (0,1)"; std::shared_ptr alpha_node = - std::make_shared(data->get_element_type(), - data->get_shape(), - std::vector{alpha}); - return {std::make_shared(data * alpha_node, data)}; + std::make_shared(data->get_element_type(), + data->get_shape(), + std::vector{alpha}); + return {std::make_shared(data * alpha_node, data)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/less.hpp b/src/ngraph/frontend/onnx_import/op/less.hpp index c8ea0987891..d61ab8442ad 100644 --- a/src/ngraph/frontend/onnx_import/op/less.hpp +++ b/src/ngraph/frontend/onnx_import/op/less.hpp @@ -19,6 +19,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/less.hpp" #include "ngraph/op/util/broadcasting.hpp" @@ -33,8 +34,8 @@ namespace ngraph { inline NodeVector less(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/log_softmax.cpp b/src/ngraph/frontend/onnx_import/op/log_softmax.cpp index 81bce51ff24..7ca7f26998b 100644 --- a/src/ngraph/frontend/onnx_import/op/log_softmax.cpp +++ b/src/ngraph/frontend/onnx_import/op/log_softmax.cpp @@ -17,7 +17,7 @@ #include #include "log_softmax.hpp" -#include "ngraph/op/fused/log_softmax.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -34,7 +34,7 @@ namespace ngraph auto data_shape = data->get_shape(); int axis = node.get_attribute_value("axis", 1); - return {std::make_shared(data, axis)}; + return {std::make_shared(data, axis)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/lp_norm.cpp b/src/ngraph/frontend/onnx_import/op/lp_norm.cpp index abb3367e8e8..d1131471101 100644 --- a/src/ngraph/frontend/onnx_import/op/lp_norm.cpp +++ b/src/ngraph/frontend/onnx_import/op/lp_norm.cpp @@ -22,8 +22,8 @@ #include "lp_norm.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/builder/norm.hpp" -#include "ngraph/op/broadcast.hpp" #include "ngraph/op/divide.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/common.hpp" namespace ngraph @@ -49,7 +49,7 @@ namespace ngraph const AxisSet reduction_axes{valid_axis}; std::shared_ptr norm = ngraph::builder::lp_norm( data, reduction_axes, static_cast(p_norm)); - norm = std::make_shared( + norm = std::make_shared( norm, data->get_shape(), reduction_axes); return {data / norm}; diff --git a/src/ngraph/frontend/onnx_import/op/lp_pool.cpp b/src/ngraph/frontend/onnx_import/op/lp_pool.cpp index da61d70f719..a953d611512 100644 --- a/src/ngraph/frontend/onnx_import/op/lp_pool.cpp +++ b/src/ngraph/frontend/onnx_import/op/lp_pool.cpp @@ -18,6 +18,7 @@ #include #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "lp_pool.hpp" #include "ngraph/axis_set.hpp" @@ -25,6 +26,7 @@ #include "ngraph/builder/split.hpp" #include "ngraph/op/concat.hpp" #include "ngraph/op/reshape.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/util.hpp" #include "utils/common.hpp" @@ -61,13 +63,13 @@ namespace ngraph // output shape is all ones except N channel Shape output_shape(orig_shape.size(), 1); output_shape.at(0) = orig_shape.at(0); - slice = std::make_shared( + slice = std::make_shared( slice, ngraph::get_default_order(slice->get_shape().size()), output_shape); } - return {std::make_shared(slices, channel_axis)}; + return {std::make_shared(slices, channel_axis)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/lrn.cpp b/src/ngraph/frontend/onnx_import/op/lrn.cpp index 06d4bc19d0f..d966f77b196 100644 --- a/src/ngraph/frontend/onnx_import/op/lrn.cpp +++ b/src/ngraph/frontend/onnx_import/op/lrn.cpp @@ -16,8 +16,8 @@ #include +#include "default_opset.hpp" #include "lrn.hpp" -#include "ngraph/op/lrn.hpp" namespace ngraph { @@ -35,7 +35,7 @@ namespace ngraph double bias = node.get_attribute_value("bias", 1); size_t size = node.get_attribute_value("size"); - return {std::make_shared(data, alpha, beta, bias, size)}; + return {std::make_shared(data, alpha, beta, bias, size)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/lstm.cpp b/src/ngraph/frontend/onnx_import/op/lstm.cpp index 61547c97825..c308a062404 100644 --- a/src/ngraph/frontend/onnx_import/op/lstm.cpp +++ b/src/ngraph/frontend/onnx_import/op/lstm.cpp @@ -21,6 +21,7 @@ #include #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "lstm.hpp" #include "ngraph/builder/split.hpp" @@ -29,6 +30,7 @@ #include "ngraph/op/constant.hpp" #include "ngraph/op/fused/lstm_sequence.hpp" #include "ngraph/op/get_output_element.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" @@ -94,7 +96,7 @@ namespace ngraph } else { - m_map[LSTMInput::LSTM_INPUT_B] = ngraph::op::Constant::create( + m_map[LSTMInput::LSTM_INPUT_B] = default_opset::Constant::create( element::f32, Shape{num_directions, gates_count * hidden_size}, std::vector(num_directions * gates_count * hidden_size, @@ -107,11 +109,13 @@ namespace ngraph } else { - m_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = ngraph::op::Constant::create( - element::i32, - Shape{batch_size}, - std::vector( - batch_size, m_map[LSTMInput::LSTM_INPUT_X]->get_shape().at(0))); + m_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = + default_opset::Constant::create( + element::i32, + Shape{batch_size}, + std::vector( + batch_size, + m_map[LSTMInput::LSTM_INPUT_X]->get_shape().at(0))); } // The initial value of the hidden. // Shape [num_directions, batch_size, hidden_size] @@ -121,7 +125,7 @@ namespace ngraph } else { - m_map[LSTMInput::LSTM_INPUT_INIT_H] = ngraph::op::Constant::create( + m_map[LSTMInput::LSTM_INPUT_INIT_H] = default_opset::Constant::create( element::f32, Shape{num_directions, batch_size, hidden_size}, std::vector(num_directions * batch_size * hidden_size, 0.f)); @@ -134,7 +138,7 @@ namespace ngraph } else { - m_map[LSTMInput::LSTM_INPUT_INIT_C] = ngraph::op::Constant::create( + m_map[LSTMInput::LSTM_INPUT_INIT_C] = default_opset::Constant::create( element::f32, Shape{num_directions, batch_size, hidden_size}, std::vector(num_directions * batch_size * hidden_size, 0.f)); @@ -146,7 +150,7 @@ namespace ngraph } else { - m_map[LSTMInput::LSTM_INPUT_P] = ngraph::op::Constant::create( + m_map[LSTMInput::LSTM_INPUT_P] = default_opset::Constant::create( element::f32, Shape{num_directions, peepholes_count * hidden_size}, std::vector(num_directions * peepholes_count * hidden_size, @@ -188,15 +192,15 @@ namespace ngraph " is invalid"); if (direction == "forward") { - m_direction = ngraph::op::LSTMSequence::direction::FORWARD; + m_direction = default_opset::LSTMSequence::direction::FORWARD; } else if (direction == "reverse") { - m_direction = ngraph::op::LSTMSequence::direction::REVERSE; + m_direction = default_opset::LSTMSequence::direction::REVERSE; } else // (direction == "bidirectional") { - m_direction = ngraph::op::LSTMSequence::direction::BIDIRECTIONAL; + m_direction = default_opset::LSTMSequence::direction::BIDIRECTIONAL; } } @@ -218,7 +222,7 @@ namespace ngraph LSTMNgInputMap input_map{node}; LSTMAttributes attributes{node}; - auto lstmSequence = std::make_shared( + auto lstmSequence = std::make_shared( input_map.at(LSTMInput::LSTM_INPUT_X), input_map.at(LSTMInput::LSTM_INPUT_INIT_H), input_map.at(LSTMInput::LSTM_INPUT_INIT_C), @@ -235,9 +239,9 @@ namespace ngraph attributes.m_activations, attributes.m_clip_threshold, attributes.m_input_forget); - return {std::make_shared(lstmSequence, 0), - std::make_shared(lstmSequence, 1), - std::make_shared(lstmSequence, 2)}; + return {std::make_shared(lstmSequence, 0), + std::make_shared(lstmSequence, 1), + std::make_shared(lstmSequence, 2)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/matmul.hpp b/src/ngraph/frontend/onnx_import/op/matmul.hpp index 9e8963cfdf0..46774c0550c 100644 --- a/src/ngraph/frontend/onnx_import/op/matmul.hpp +++ b/src/ngraph/frontend/onnx_import/op/matmul.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/fused/matmul.hpp" namespace ngraph { @@ -32,8 +32,8 @@ namespace ngraph { NodeVector matmul(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/max.hpp b/src/ngraph/frontend/onnx_import/op/max.hpp index 8402bc51d98..5ca8ca15756 100644 --- a/src/ngraph/frontend/onnx_import/op/max.hpp +++ b/src/ngraph/frontend/onnx_import/op/max.hpp @@ -17,8 +17,9 @@ #pragma once #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/maximum.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/variadic.hpp" namespace ngraph @@ -31,7 +32,7 @@ namespace ngraph { inline NodeVector max(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_1 @@ -40,7 +41,7 @@ namespace ngraph { inline NodeVector max(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_8 diff --git a/src/ngraph/frontend/onnx_import/op/mean.cpp b/src/ngraph/frontend/onnx_import/op/mean.cpp index 295ddf3e111..37f8e2779eb 100644 --- a/src/ngraph/frontend/onnx_import/op/mean.cpp +++ b/src/ngraph/frontend/onnx_import/op/mean.cpp @@ -15,9 +15,11 @@ //***************************************************************************** #include "mean.hpp" +#include "default_opset.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/variadic.hpp" namespace ngraph @@ -30,12 +32,12 @@ namespace ngraph { NodeVector mean(const Node& node) { - auto sum = variadic::make_ng_variadic_op(node).front(); + auto sum = variadic::make_ng_variadic_op(node).front(); auto shape = sum->get_shape(); // Create a Constant representing the number of inputs with the same shape as // sum - auto count = ngraph::op::Constant::create( + auto count = default_opset::Constant::create( sum->get_element_type(), shape, std::vector(shape_size(shape), node.get_ng_inputs().size())); @@ -49,12 +51,12 @@ namespace ngraph { NodeVector mean(const Node& node) { - auto sum = variadic::make_ng_variadic_op(node).front(); + auto sum = variadic::make_ng_variadic_op(node).front(); auto shape = sum->get_shape(); // Create a Constant representing the number of inputs with the same shape as // sum - auto count = ngraph::op::Constant::create( + auto count = default_opset::Constant::create( sum->get_element_type(), shape, std::vector(shape_size(shape), node.get_ng_inputs().size())); diff --git a/src/ngraph/frontend/onnx_import/op/mean_variance_normalization.cpp b/src/ngraph/frontend/onnx_import/op/mean_variance_normalization.cpp index 2b9d0604dbc..d86426d0bcc 100644 --- a/src/ngraph/frontend/onnx_import/op/mean_variance_normalization.cpp +++ b/src/ngraph/frontend/onnx_import/op/mean_variance_normalization.cpp @@ -19,6 +19,7 @@ #include "mean_variance_normalization.hpp" #include "ngraph/axis_set.hpp" #include "ngraph/op/fused/mvn.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/common.hpp" namespace ngraph @@ -37,7 +38,7 @@ namespace ngraph bool normalize_variance = node.get_attribute_value("normalize_variance", 1); - return {std::make_shared( + return {std::make_shared( data, across_channels, normalize_variance)}; } @@ -52,7 +53,7 @@ namespace ngraph std::vector valid_axes = common::validate_axes(node, axes, data->get_shape().size()); - return {std::make_shared(data, AxisSet(valid_axes))}; + return {std::make_shared(data, AxisSet(valid_axes))}; } } // namespace set_9 diff --git a/src/ngraph/frontend/onnx_import/op/min.hpp b/src/ngraph/frontend/onnx_import/op/min.hpp index d3a05d3592a..bfdfb8d61fb 100644 --- a/src/ngraph/frontend/onnx_import/op/min.hpp +++ b/src/ngraph/frontend/onnx_import/op/min.hpp @@ -17,8 +17,9 @@ #pragma once #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/minimum.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/variadic.hpp" namespace ngraph @@ -31,7 +32,7 @@ namespace ngraph { inline NodeVector min(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_1 @@ -40,7 +41,7 @@ namespace ngraph { inline NodeVector min(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_8 diff --git a/src/ngraph/frontend/onnx_import/op/mod.cpp b/src/ngraph/frontend/onnx_import/op/mod.cpp index bc9e2c23286..15cf2cec2f5 100644 --- a/src/ngraph/frontend/onnx_import/op/mod.cpp +++ b/src/ngraph/frontend/onnx_import/op/mod.cpp @@ -16,6 +16,7 @@ #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "mod.hpp" #include "ngraph/op/abs.hpp" @@ -39,7 +40,7 @@ namespace ngraph ASSERT_IS_SUPPORTED(node, fmod == 1) << "Only 'fmod=1' mode is supported for mod operator."; - return {std::make_shared(dividend, divisor)}; + return {std::make_shared(dividend, divisor)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/mul.hpp b/src/ngraph/frontend/onnx_import/op/mul.hpp index a93ec441519..e80c2f4766a 100644 --- a/src/ngraph/frontend/onnx_import/op/mul.hpp +++ b/src/ngraph/frontend/onnx_import/op/mul.hpp @@ -19,10 +19,12 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/broadcast.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -41,8 +43,8 @@ namespace ngraph NodeVector ng_inputs{ngraph::op::legacy_style_broadcast_for_binary_operation( node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), axis)}; - return { - std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; + return {std::make_shared(ng_inputs.at(0), + ng_inputs.at(1))}; } } // namespace set_1 @@ -51,8 +53,8 @@ namespace ngraph { inline NodeVector mul(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_7 diff --git a/src/ngraph/frontend/onnx_import/op/non_max_suppression.cpp b/src/ngraph/frontend/onnx_import/op/non_max_suppression.cpp index 2ac3ab87cf6..c95a123b87c 100644 --- a/src/ngraph/frontend/onnx_import/op/non_max_suppression.cpp +++ b/src/ngraph/frontend/onnx_import/op/non_max_suppression.cpp @@ -16,8 +16,8 @@ #include -#include "ngraph/frontend/onnx_import/exceptions.hpp" -#include "ngraph/op/constant.hpp" +#include "default_opset.hpp" +#include "exceptions.hpp" #include "ngraph/op/non_max_suppression.hpp" #include "ngraph/op/util/attr_types.hpp" #include "non_max_suppression.hpp" @@ -47,7 +47,7 @@ namespace ngraph else { max_output_boxes_per_class = - ngraph::op::Constant::create(element::i64, Shape{}, {0}); + default_opset::Constant::create(element::i64, Shape{}, {0}); } std::shared_ptr iou_threshold; @@ -57,7 +57,8 @@ namespace ngraph } else { - iou_threshold = ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + iou_threshold = + default_opset::Constant::create(element::f32, Shape{}, {.0f}); } std::shared_ptr score_threshold; @@ -68,7 +69,7 @@ namespace ngraph else { score_threshold = - ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + default_opset::Constant::create(element::f32, Shape{}, {.0f}); } const auto center_point_box = @@ -79,10 +80,10 @@ namespace ngraph const auto box_encoding = center_point_box == 0 - ? ngraph::op::v1::NonMaxSuppression::BoxEncodingType::CORNER - : ngraph::op::v1::NonMaxSuppression::BoxEncodingType::CENTER; + ? default_opset::NonMaxSuppression::BoxEncodingType::CORNER + : default_opset::NonMaxSuppression::BoxEncodingType::CENTER; - return {std::make_shared( + return {std::make_shared( boxes, scores, max_output_boxes_per_class, diff --git a/src/ngraph/frontend/onnx_import/op/not.hpp b/src/ngraph/frontend/onnx_import/op/not.hpp index ae3c99dac97..a908947bee5 100644 --- a/src/ngraph/frontend/onnx_import/op/not.hpp +++ b/src/ngraph/frontend/onnx_import/op/not.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/not.hpp" namespace ngraph { @@ -33,7 +33,7 @@ namespace ngraph inline NodeVector logical_not(const Node& node) { return { - std::make_shared(node.get_ng_inputs().at(0))}; + std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/onehot.cpp b/src/ngraph/frontend/onnx_import/op/onehot.cpp index c0c81a1c0a0..625f6dc9966 100644 --- a/src/ngraph/frontend/onnx_import/op/onehot.cpp +++ b/src/ngraph/frontend/onnx_import/op/onehot.cpp @@ -17,9 +17,8 @@ #include #include -#include "ngraph/op/convert.hpp" -#include "ngraph/op/one_hot.hpp" -#include "ngraph/op/slice.hpp" +#include "default_opset.hpp" +#include "ngraph/opsets/opset0.hpp" #include "onehot.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" @@ -36,18 +35,20 @@ namespace ngraph { NodeVector inputs{node.get_ng_inputs()}; auto indices = - std::make_shared(inputs.at(0), element::i64); + std::make_shared(inputs.at(0), element::i64); auto depth = reshape::interpret_as_scalar(inputs.at(1)); auto values = inputs.at(2); - std::shared_ptr off_value = reshape::interpret_as_scalar( - std::make_shared(values, Coordinate{0}, Coordinate{1})); - std::shared_ptr on_value = reshape::interpret_as_scalar( - std::make_shared(values, Coordinate{1}, Coordinate{2})); + std::shared_ptr off_value = + reshape::interpret_as_scalar(std::make_shared( + values, Coordinate{0}, Coordinate{1})); + std::shared_ptr on_value = + reshape::interpret_as_scalar(std::make_shared( + values, Coordinate{1}, Coordinate{2})); auto axis = node.get_attribute_value("axis", -1); - return {std::make_shared( + return {std::make_shared( indices, depth, on_value, off_value, axis)}; } diff --git a/src/ngraph/frontend/onnx_import/op/or.hpp b/src/ngraph/frontend/onnx_import/op/or.hpp index 111bceef3ac..f036590c5f9 100644 --- a/src/ngraph/frontend/onnx_import/op/or.hpp +++ b/src/ngraph/frontend/onnx_import/op/or.hpp @@ -19,9 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/or.hpp" -#include "ngraph/op/util/broadcasting.hpp" namespace ngraph { @@ -33,8 +32,8 @@ namespace ngraph { inline NodeVector logical_or(const Node& node) { - return {std::make_shared( - node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/pad.cpp b/src/ngraph/frontend/onnx_import/op/pad.cpp index c2b594ddbc2..dd2e41732c5 100644 --- a/src/ngraph/frontend/onnx_import/op/pad.cpp +++ b/src/ngraph/frontend/onnx_import/op/pad.cpp @@ -16,6 +16,7 @@ #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/constant.hpp" @@ -60,13 +61,13 @@ namespace ngraph ngraph::CoordinateDiff padding_below = paddings.first; ngraph::CoordinateDiff padding_above = paddings.second; - return {std::make_shared( + return {std::make_shared( data, - std::make_shared( + std::make_shared( element::i64, ngraph::Shape{padding_below.size()}, padding_below), - std::make_shared( + std::make_shared( element::i64, ngraph::Shape{padding_above.size()}, padding_above), - std::make_shared( + std::make_shared( data->get_element_type(), ngraph::Shape{}, std::vector{value}), pad_mode)}; } diff --git a/src/ngraph/frontend/onnx_import/op/pow.hpp b/src/ngraph/frontend/onnx_import/op/pow.hpp index 6a4bcd2f94e..238663dcf8b 100644 --- a/src/ngraph/frontend/onnx_import/op/pow.hpp +++ b/src/ngraph/frontend/onnx_import/op/pow.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/power.hpp" #include "ngraph/op/util/broadcasting.hpp" namespace ngraph @@ -33,8 +33,8 @@ namespace ngraph { inline NodeVector pow(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/prelu.cpp b/src/ngraph/frontend/onnx_import/op/prelu.cpp index be0c3538f9a..107a2d9638f 100644 --- a/src/ngraph/frontend/onnx_import/op/prelu.cpp +++ b/src/ngraph/frontend/onnx_import/op/prelu.cpp @@ -16,7 +16,7 @@ #include -#include "ngraph/op/fused/prelu.hpp" +#include "default_opset.hpp" #include "prelu.hpp" namespace ngraph @@ -32,7 +32,7 @@ namespace ngraph NodeVector ng_inputs{node.get_ng_inputs()}; const auto& data = ng_inputs.at(0); const auto& slope = ng_inputs.at(1); - return {std::make_shared(data, slope)}; + return {std::make_shared(data, slope)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/quant_conv.cpp b/src/ngraph/frontend/onnx_import/op/quant_conv.cpp index 67a6f531a00..4349cd181fa 100644 --- a/src/ngraph/frontend/onnx_import/op/quant_conv.cpp +++ b/src/ngraph/frontend/onnx_import/op/quant_conv.cpp @@ -18,14 +18,13 @@ #include #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/builder/quantization/quantized_linear_convolution.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/frontend/onnx_import/utils/convpool.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/quantized_convolution.hpp" -#include "ngraph/op/slice.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/strides.hpp" #include "quant_conv.hpp" @@ -102,12 +101,12 @@ namespace ngraph // slice data data_lower_bounds[1] = group * data_group_size; data_upper_bounds[1] = (group + 1) * data_group_size; - auto sliced_data = std::make_shared( + auto sliced_data = std::make_shared( data, data_lower_bounds, data_upper_bounds); // slice filters filters_lower_bounds[0] = group * filters_group_size; filters_upper_bounds[0] = (group + 1) * filters_group_size; - auto sliced_filters = std::make_shared( + auto sliced_filters = std::make_shared( filters, filters_lower_bounds, filters_upper_bounds); if (bias) @@ -119,7 +118,7 @@ namespace ngraph else { convolution_nodes.push_back( - std::make_shared( + std::make_shared( sliced_data, sliced_filters, strides, @@ -140,8 +139,8 @@ namespace ngraph } } std::size_t concatenation_axis = 1; - return std::make_shared(convolution_nodes, - concatenation_axis); + return std::make_shared(convolution_nodes, + concatenation_axis); } else { @@ -162,7 +161,7 @@ namespace ngraph } else { - return std::make_shared( + return std::make_shared( data, filters, strides, diff --git a/src/ngraph/frontend/onnx_import/op/quantize_linear.cpp b/src/ngraph/frontend/onnx_import/op/quantize_linear.cpp index f4cf8536868..1ce93fa5875 100644 --- a/src/ngraph/frontend/onnx_import/op/quantize_linear.cpp +++ b/src/ngraph/frontend/onnx_import/op/quantize_linear.cpp @@ -18,7 +18,7 @@ #include #include "ngraph/axis_set.hpp" -#include "ngraph/op/quantize.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" #include "quantize_linear.hpp" @@ -61,13 +61,13 @@ namespace ngraph Shape y_scale_shape = y_scale->get_shape(); Shape y_zero_point_shape = y_zero_point->get_shape(); - return {std::make_shared( + return {std::make_shared( x, y_scale, y_zero_point, y_zero_point->get_element_type(), axes, - ngraph::op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN)}; + ngraph::opset0::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/reciprocal.cpp b/src/ngraph/frontend/onnx_import/op/reciprocal.cpp index a6ac26623d9..2e53f2e150e 100644 --- a/src/ngraph/frontend/onnx_import/op/reciprocal.cpp +++ b/src/ngraph/frontend/onnx_import/op/reciprocal.cpp @@ -17,8 +17,8 @@ #include #include -#include "ngraph/op/fused/reciprocal.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" #include "reciprocal.hpp" @@ -34,7 +34,7 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); - return {std::make_shared(data)}; + return {std::make_shared(data)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/reduce.cpp b/src/ngraph/frontend/onnx_import/op/reduce.cpp index 95629eebb3d..3bc9e79e3c3 100644 --- a/src/ngraph/frontend/onnx_import/op/reduce.cpp +++ b/src/ngraph/frontend/onnx_import/op/reduce.cpp @@ -18,8 +18,7 @@ #include // std::begin, std::end #include // std::accumulate -#include "ngraph/op/constant.hpp" -#include "ngraph/op/divide.hpp" +#include "default_opset.hpp" #include "ngraph/shape.hpp" #include "reduce.hpp" @@ -46,17 +45,17 @@ namespace ngraph auto sum_node = std::shared_ptr{reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const ngraph::AxisSet&>)}; - auto const_node = ngraph::op::Constant::create( + auto const_node = default_opset::Constant::create( sum_node->get_element_type(), sum_node->get_shape(), std::vector(shape_size(sum_node->get_shape()), elem_count_product)); - return {std::make_shared(sum_node, const_node)}; + return {std::make_shared(sum_node, const_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/reduce.hpp b/src/ngraph/frontend/onnx_import/op/reduce.hpp index fb885074357..6f809be5edb 100644 --- a/src/ngraph/frontend/onnx_import/op/reduce.hpp +++ b/src/ngraph/frontend/onnx_import/op/reduce.hpp @@ -20,6 +20,7 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/builder/norm.hpp" #include "ngraph/node.hpp" #include "ngraph/op/abs.hpp" @@ -32,6 +33,7 @@ #include "ngraph/op/reduce_sum.hpp" #include "ngraph/op/sum.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/reduction.hpp" namespace ngraph @@ -59,10 +61,10 @@ namespace ngraph std::shared_ptr sum_node{reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const ngraph::AxisSet&>)}; - return {std::make_shared(sum_node)}; + return {std::make_shared(sum_node)}; } /// \brief Compute the log sum exponent of the input tensor's elements along @@ -79,14 +81,15 @@ namespace ngraph /// inline NodeVector reduce_log_sum_exp(const Node& node) { - auto exp_node = std::make_shared(node.get_ng_inputs().at(0)); + auto exp_node = + std::make_shared(node.get_ng_inputs().at(0)); std::shared_ptr sum_node{reduction::make_ng_reduction_op( node, exp_node, - std::make_shared&, const ngraph::AxisSet&>)}; - return {std::make_shared(sum_node)}; + return {std::make_shared(sum_node)}; } /// \brief Compute the L1 norm of the input tensor's element along the provided @@ -152,7 +155,7 @@ namespace ngraph return {reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const ngraph::AxisSet&>)}; } @@ -188,7 +191,7 @@ namespace ngraph return {reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const ngraph::AxisSet&>)}; } @@ -210,7 +213,7 @@ namespace ngraph return {reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const std::shared_ptr&, bool>)}; @@ -233,7 +236,7 @@ namespace ngraph return {reduction::make_ng_reduction_op( node, node.get_ng_inputs().at(0), - std::make_shared&, const std::shared_ptr&, bool>)}; @@ -258,7 +261,7 @@ namespace ngraph return {reduction::make_ng_reduction_op( node, square_node, - std::make_shared&, const ngraph::AxisSet&>)}; } diff --git a/src/ngraph/frontend/onnx_import/op/relu.hpp b/src/ngraph/frontend/onnx_import/op/relu.hpp index 6c63fe3b9b2..0ed617e0d42 100644 --- a/src/ngraph/frontend/onnx_import/op/relu.hpp +++ b/src/ngraph/frontend/onnx_import/op/relu.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/relu.hpp" namespace ngraph { @@ -33,7 +33,7 @@ namespace ngraph inline NodeVector relu(const Node& node) { NodeVector ng_inputs{node.get_ng_inputs()}; - return {std::make_shared(ng_inputs.at(0))}; + return {std::make_shared(ng_inputs.at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/reshape.cpp b/src/ngraph/frontend/onnx_import/op/reshape.cpp index 3de85970e59..e6de99c23bb 100644 --- a/src/ngraph/frontend/onnx_import/op/reshape.cpp +++ b/src/ngraph/frontend/onnx_import/op/reshape.cpp @@ -18,10 +18,9 @@ #include #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_vector.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/reshape.hpp" #include "ngraph/shape.hpp" #include "reshape.hpp" #include "utils/reshape.hpp" @@ -37,32 +36,28 @@ namespace ngraph NodeVector reshape(const Node& node) { NodeVector ng_inputs{node.get_ng_inputs()}; - auto data = ng_inputs.at(0); - auto data_shape = data->get_shape(); + const auto data = ng_inputs.at(0); - auto output_shape = - node.get_attribute_value>("shape", {}); + std::shared_ptr pattern; - // If no shape argument (opset >= 5) and there is second input. - if (output_shape.empty() && ng_inputs.size() == 2) + // Since opset 5 the target shape is provided as input + if (ng_inputs.size() == 2) { - // Currently only support Constant node. - ASSERT_IS_SUPPORTED(node, ng_inputs.at(1)->description() == "Constant") - << "doesn't support shape input of other type than Constant."; + NGRAPH_CHECK(ng_inputs.at(1)->is_constant(), + "The target shape input has to be a Constant."); - output_shape = ngraph::as_type_ptr(ng_inputs.at(1)) - ->get_vector(); + pattern = ng_inputs.at(1); } - // Do nothing if there is no shape argument nor second node input. - else if (output_shape.empty()) + else { - return {data}; + const auto output_shape = + node.get_attribute_value>("shape", {}); + + pattern = default_opset::Constant::create( + element::i64, Shape{output_shape.size()}, output_shape); } - output_shape = - reshape::infer_dimensions(node.get_name(), data_shape, output_shape); - return {std::make_shared( - data, ngraph::get_default_order(data_shape.size()), Shape{output_shape})}; + return {std::make_shared(data, pattern, true)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/reverse_sequence.cpp b/src/ngraph/frontend/onnx_import/op/reverse_sequence.cpp index 1e3dce1b7aa..04c502d066e 100644 --- a/src/ngraph/frontend/onnx_import/op/reverse_sequence.cpp +++ b/src/ngraph/frontend/onnx_import/op/reverse_sequence.cpp @@ -17,9 +17,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/reverse_sequence.hpp" #include "ngraph/type/element_type.hpp" #include "reverse_sequence.hpp" #include "utils/common.hpp" @@ -38,7 +37,7 @@ namespace ngraph const auto sequence_lengths = node.get_ng_inputs().at(1); // nGraph supports only int32 type of sequence_lengths - const auto sequence_lengths_i32 = std::make_shared( + const auto sequence_lengths_i32 = std::make_shared( node.get_ng_inputs().at(1), element::i32); const auto batch_axis = node.get_attribute_value("batch_axis", 1); @@ -60,7 +59,7 @@ namespace ngraph "'batch_axis' and 'time_axis' attributes of the ReverseSequence " "operator can't point to the same dimension"); - return {std::make_shared( + return {std::make_shared( data, sequence_lengths_i32, valid_batch_axis, valid_time_axis)}; } diff --git a/src/ngraph/frontend/onnx_import/op/selu.cpp b/src/ngraph/frontend/onnx_import/op/selu.cpp index 96e3d0e9235..b1b79ed3654 100644 --- a/src/ngraph/frontend/onnx_import/op/selu.cpp +++ b/src/ngraph/frontend/onnx_import/op/selu.cpp @@ -17,12 +17,11 @@ #include #include +#include "default_opset.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/fused/selu.hpp" #include "selu.hpp" -using namespace ngraph::op; - namespace ngraph { namespace onnx_import @@ -39,13 +38,13 @@ namespace ngraph auto gamma = node.get_attribute_value("gamma", 1.05070102214813232421875); - auto alpha_node = std::make_shared( + auto alpha_node = std::make_shared( data->get_element_type(), data->get_shape(), std::vector{alpha}); - auto gamma_node = std::make_shared( + auto gamma_node = std::make_shared( data->get_element_type(), data->get_shape(), std::vector{gamma}); - return {std::make_shared(data, alpha_node, gamma_node)}; + return {std::make_shared(data, alpha_node, gamma_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/shape.cpp b/src/ngraph/frontend/onnx_import/op/shape.cpp index 1e43b6aa6b2..2d94b9a1842 100644 --- a/src/ngraph/frontend/onnx_import/op/shape.cpp +++ b/src/ngraph/frontend/onnx_import/op/shape.cpp @@ -16,8 +16,8 @@ #include +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "shape.hpp" @@ -35,7 +35,7 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); auto data_shape = data->get_shape(); - return {std::make_shared( + return {std::make_shared( ngraph::element::i64, Shape{data_shape.size()}, data_shape)}; } diff --git a/src/ngraph/frontend/onnx_import/op/shrink.cpp b/src/ngraph/frontend/onnx_import/op/shrink.cpp index 71d57099bff..2619fd6c389 100644 --- a/src/ngraph/frontend/onnx_import/op/shrink.cpp +++ b/src/ngraph/frontend/onnx_import/op/shrink.cpp @@ -16,14 +16,12 @@ #include +#include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/greater.hpp" -#include "ngraph/op/less.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/subtract.hpp" +#include "ngraph/opsets/opset0.hpp" #include "shrink.hpp" namespace ngraph @@ -43,40 +41,40 @@ namespace ngraph ASSERT_VALID_ARGUMENT(node, !(lambd < 0.0f)) << " The provided 'lambd' value:" << lambd << " must not be negative."; - std::shared_ptr negative_lambd; + std::shared_ptr negative_lambd; const auto input_element_type = input->get_element_type(); if (input_element_type.is_signed()) { - negative_lambd = ngraph::op::Constant::create( + negative_lambd = default_opset::Constant::create( input_element_type, input->get_shape(), {-lambd}); } else { // Passing -lambd to unsigned type constant will cause an overflow. // For unsigned types the lowest possible value is 0. - negative_lambd = ngraph::op::Constant::create( + negative_lambd = default_opset::Constant::create( input_element_type, input->get_shape(), {0}); } - const auto positive_lambd = ngraph::op::Constant::create( + const auto positive_lambd = default_opset::Constant::create( input_element_type, input->get_shape(), {lambd}); - const auto bias_tensor = ngraph::op::Constant::create( + const auto bias_tensor = default_opset::Constant::create( input_element_type, input->get_shape(), {bias}); // Create a mask indicating locations of values that need to be adjusted // by adding and subtracting bias // All other values indicated by 'false' in the masks need to be zeroed out std::shared_ptr values_below_neg_lambd = - std::make_shared(input, negative_lambd); + std::make_shared(input, negative_lambd); std::shared_ptr values_above_pos_lambd = - std::make_shared(input, positive_lambd); + std::make_shared(input, positive_lambd); // Convert from bool to the input type to be able to multiply adjusted inputs // by the created masks - values_below_neg_lambd = std::make_shared( + values_below_neg_lambd = std::make_shared( values_below_neg_lambd, input_element_type); - values_above_pos_lambd = std::make_shared( + values_above_pos_lambd = std::make_shared( values_above_pos_lambd, input_element_type); std::shared_ptr input_minus_bias = input - bias_tensor; diff --git a/src/ngraph/frontend/onnx_import/op/sigmoid.hpp b/src/ngraph/frontend/onnx_import/op/sigmoid.hpp index 3d819b28bd3..a8cf33f1b28 100644 --- a/src/ngraph/frontend/onnx_import/op/sigmoid.hpp +++ b/src/ngraph/frontend/onnx_import/op/sigmoid.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/sigmoid.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector sigmoid(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/sign.hpp b/src/ngraph/frontend/onnx_import/op/sign.hpp index 210b20bd0ee..03786c2fc09 100644 --- a/src/ngraph/frontend/onnx_import/op/sign.hpp +++ b/src/ngraph/frontend/onnx_import/op/sign.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/sign.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector sign(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/sin.hpp b/src/ngraph/frontend/onnx_import/op/sin.hpp index 37714474cdc..071be751deb 100644 --- a/src/ngraph/frontend/onnx_import/op/sin.hpp +++ b/src/ngraph/frontend/onnx_import/op/sin.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/sin.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector sin(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/sinh.hpp b/src/ngraph/frontend/onnx_import/op/sinh.hpp index 530d079667d..591196ccc5e 100644 --- a/src/ngraph/frontend/onnx_import/op/sinh.hpp +++ b/src/ngraph/frontend/onnx_import/op/sinh.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/sinh.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector sinh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/size.cpp b/src/ngraph/frontend/onnx_import/op/size.cpp index 2ea86477fe8..653122fa205 100644 --- a/src/ngraph/frontend/onnx_import/op/size.cpp +++ b/src/ngraph/frontend/onnx_import/op/size.cpp @@ -18,7 +18,7 @@ #include #include -#include "ngraph/op/constant.hpp" +#include "default_opset.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "size.hpp" @@ -37,7 +37,7 @@ namespace ngraph std::int64_t tensor_elements_count{ static_cast(shape_size(data->get_shape()))}; - return {std::make_shared( + return {std::make_shared( ngraph::element::i64, Shape{}, std::vector{tensor_elements_count})}; diff --git a/src/ngraph/frontend/onnx_import/op/slice.cpp b/src/ngraph/frontend/onnx_import/op/slice.cpp index 9e26ab3497d..e5ee19b46f7 100644 --- a/src/ngraph/frontend/onnx_import/op/slice.cpp +++ b/src/ngraph/frontend/onnx_import/op/slice.cpp @@ -19,7 +19,7 @@ #include #include "ngraph/node.hpp" -#include "ngraph/op/slice.hpp" +#include "ngraph/opsets/opset0.hpp" #include "slice.hpp" #include "utils/common.hpp" @@ -72,7 +72,8 @@ namespace ngraph } } - return {std::make_shared(data, lower_bounds, upper_bounds)}; + return { + std::make_shared(data, lower_bounds, upper_bounds)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/softmax.cpp b/src/ngraph/frontend/onnx_import/op/softmax.cpp index dac2c3a042f..a5d052dbdc2 100644 --- a/src/ngraph/frontend/onnx_import/op/softmax.cpp +++ b/src/ngraph/frontend/onnx_import/op/softmax.cpp @@ -16,7 +16,7 @@ #include -#include "ngraph/op/softmax.hpp" +#include "default_opset.hpp" #include "softmax.hpp" #include "utils/common.hpp" @@ -37,7 +37,7 @@ namespace ngraph int axis = node.get_attribute_value("axis", 1); std::size_t valid_axis = common::validate_axis(node, axis, data_shape.size()); - return {std::make_shared(data, valid_axis)}; + return {std::make_shared(data, valid_axis)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/softplus.cpp b/src/ngraph/frontend/onnx_import/op/softplus.cpp index f9b4f0b6f7e..1cd1426df12 100644 --- a/src/ngraph/frontend/onnx_import/op/softplus.cpp +++ b/src/ngraph/frontend/onnx_import/op/softplus.cpp @@ -16,14 +16,12 @@ #include +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/exp.hpp" #include "ngraph/op/greater.hpp" -#include "ngraph/op/log.hpp" -#include "ngraph/op/negative.hpp" #include "ngraph/op/select.hpp" +#include "ngraph/opsets/opset0.hpp" #include "softplus.hpp" namespace ngraph @@ -39,30 +37,31 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); std::shared_ptr zero_node = - std::make_shared( + std::make_shared( data->get_element_type(), data->get_shape(), std::vector{0.f}); - std::shared_ptr one_node = std::make_shared( - data->get_element_type(), data->get_shape(), std::vector{1.f}); + std::shared_ptr one_node = + std::make_shared( + data->get_element_type(), data->get_shape(), std::vector{1.f}); std::shared_ptr positive_val_node = - data + std::make_shared( - std::make_shared( - std::make_shared(data)) + + data + std::make_shared( + std::make_shared( + std::make_shared(data)) + one_node); std::shared_ptr negative_val_node = - std::make_shared(std::make_shared(data) + - one_node); + std::make_shared( + std::make_shared(data) + one_node); std::shared_ptr condition_node = - std::make_shared(data, zero_node); + std::make_shared(data, zero_node); // // This equation represents: // x + log(exp(-x) + 1) - for x > 0; to manage exponent overflow, // log(exp(x) + 1) - elsewhere. // - return {std::make_shared( + return {std::make_shared( condition_node, positive_val_node, negative_val_node)}; } diff --git a/src/ngraph/frontend/onnx_import/op/softsign.cpp b/src/ngraph/frontend/onnx_import/op/softsign.cpp index 866a964e3c7..9e5ff1edf1a 100644 --- a/src/ngraph/frontend/onnx_import/op/softsign.cpp +++ b/src/ngraph/frontend/onnx_import/op/softsign.cpp @@ -17,9 +17,9 @@ #include #include +#include "default_opset.hpp" #include "ngraph/op/abs.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/op/util/broadcasting.hpp" #include "ngraph/shape.hpp" @@ -37,11 +37,12 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); - std::shared_ptr one_node = std::make_shared( - data->get_element_type(), Shape{}, std::vector{1}); + std::shared_ptr one_node = + std::make_shared( + data->get_element_type(), Shape{}, std::vector{1}); one_node = ngraph::op::make_broadcast_node(one_node, data->get_shape()); - return {data / (std::make_shared(data) + one_node)}; + return {data / (std::make_shared(data) + one_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/space_to_depth.cpp b/src/ngraph/frontend/onnx_import/op/space_to_depth.cpp index 71bf487c4b5..4a2fb8bf807 100644 --- a/src/ngraph/frontend/onnx_import/op/space_to_depth.cpp +++ b/src/ngraph/frontend/onnx_import/op/space_to_depth.cpp @@ -14,8 +14,8 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/space_to_depth.hpp" #include "space_to_depth.hpp" +#include "default_opset.hpp" namespace ngraph { @@ -29,9 +29,9 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); std::size_t block_size = node.get_attribute_value("blocksize"); - const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; + const auto mode = default_opset::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; return NodeVector{ - std::make_shared(data, mode, block_size)}; + std::make_shared(data, mode, block_size)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/split.cpp b/src/ngraph/frontend/onnx_import/op/split.cpp index 8be65d608fc..66386581156 100644 --- a/src/ngraph/frontend/onnx_import/op/split.cpp +++ b/src/ngraph/frontend/onnx_import/op/split.cpp @@ -17,9 +17,7 @@ #include #include -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/split.hpp" -#include "ngraph/op/variadic_split.hpp" +#include "default_opset.hpp" #include "split.hpp" #include "utils/common.hpp" @@ -36,7 +34,7 @@ namespace ngraph const auto input = node.get_ng_inputs().at(0); const auto axis = node.get_attribute_value("axis", 0); const auto axis_node = - ngraph::op::Constant::create(element::i64, Shape{}, {axis}); + default_opset::Constant::create(element::i64, Shape{}, {axis}); std::shared_ptr split; if (node.has_attribute("split")) @@ -44,16 +42,16 @@ namespace ngraph const auto splits = node.get_attribute_value>("split"); - const auto split_lengths = ngraph::op::Constant::create( + const auto split_lengths = default_opset::Constant::create( element::u64, Shape{splits.size()}, splits); - split = std::make_shared( + split = std::make_shared( input, axis_node, split_lengths); } else { const auto outputs_number = node.get_output_names().size(); - split = std::make_shared( + split = std::make_shared( input, axis_node, outputs_number); } return common::get_outputs(split); diff --git a/src/ngraph/frontend/onnx_import/op/sqrt.hpp b/src/ngraph/frontend/onnx_import/op/sqrt.hpp index 47d178a1bbe..7581c1b4c8b 100644 --- a/src/ngraph/frontend/onnx_import/op/sqrt.hpp +++ b/src/ngraph/frontend/onnx_import/op/sqrt.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/sqrt.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector sqrt(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/squeeze.cpp b/src/ngraph/frontend/onnx_import/op/squeeze.cpp index c9f23fe2de6..44f79ae9e30 100644 --- a/src/ngraph/frontend/onnx_import/op/squeeze.cpp +++ b/src/ngraph/frontend/onnx_import/op/squeeze.cpp @@ -37,9 +37,9 @@ namespace ngraph node.get_attribute_value>("axes", {}); std::vector valid_axes = common::validate_axes(node, axes, data->get_shape().size()); - auto axes_node = std::make_shared( + auto axes_node = std::make_shared( element::u64, Shape{valid_axes.size()}, valid_axes); - return {std::make_shared(data, axes_node)}; + return {std::make_shared(data, axes_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/sub.hpp b/src/ngraph/frontend/onnx_import/op/sub.hpp index 9087587b7fd..e41ad50842c 100644 --- a/src/ngraph/frontend/onnx_import/op/sub.hpp +++ b/src/ngraph/frontend/onnx_import/op/sub.hpp @@ -17,9 +17,10 @@ #pragma once #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/subtract.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -38,8 +39,8 @@ namespace ngraph NodeVector ng_inputs{ngraph::op::legacy_style_broadcast_for_binary_operation( node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), axis)}; - return { - std::make_shared(ng_inputs.at(0), ng_inputs.at(1))}; + return {std::make_shared(ng_inputs.at(0), + ng_inputs.at(1))}; } } // namespace set_1 @@ -48,8 +49,8 @@ namespace ngraph { inline NodeVector sub(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0), - node.get_ng_inputs().at(1))}; + return {std::make_shared(node.get_ng_inputs().at(0), + node.get_ng_inputs().at(1))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/sum.hpp b/src/ngraph/frontend/onnx_import/op/sum.hpp index 8037e4ea4b3..88bf91d798b 100644 --- a/src/ngraph/frontend/onnx_import/op/sum.hpp +++ b/src/ngraph/frontend/onnx_import/op/sum.hpp @@ -17,8 +17,9 @@ #pragma once #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/add.hpp" +#include "ngraph/opsets/opset0.hpp" #include "utils/variadic.hpp" namespace ngraph @@ -31,7 +32,7 @@ namespace ngraph { inline NodeVector sum(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_1 @@ -40,7 +41,7 @@ namespace ngraph { inline NodeVector sum(const Node& node) { - return variadic::make_ng_variadic_op(node); + return variadic::make_ng_variadic_op(node); } } // namespace set_8 diff --git a/src/ngraph/frontend/onnx_import/op/tan.hpp b/src/ngraph/frontend/onnx_import/op/tan.hpp index 91dc51c41bc..eeb7338644a 100644 --- a/src/ngraph/frontend/onnx_import/op/tan.hpp +++ b/src/ngraph/frontend/onnx_import/op/tan.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/tan.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector tan(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/tanh.hpp b/src/ngraph/frontend/onnx_import/op/tanh.hpp index d9c0d2f9f89..d2edff1687e 100644 --- a/src/ngraph/frontend/onnx_import/op/tanh.hpp +++ b/src/ngraph/frontend/onnx_import/op/tanh.hpp @@ -19,8 +19,8 @@ #include #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/tanh.hpp" namespace ngraph { @@ -32,7 +32,7 @@ namespace ngraph { inline NodeVector tanh(const Node& node) { - return {std::make_shared(node.get_ng_inputs().at(0))}; + return {std::make_shared(node.get_ng_inputs().at(0))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/thresholded_relu.cpp b/src/ngraph/frontend/onnx_import/op/thresholded_relu.cpp index e5a7730f5b9..0b22e153127 100644 --- a/src/ngraph/frontend/onnx_import/op/thresholded_relu.cpp +++ b/src/ngraph/frontend/onnx_import/op/thresholded_relu.cpp @@ -17,11 +17,10 @@ #include #include -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/greater.hpp" +#include "default_opset.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" #include "thresholded_relu.hpp" namespace ngraph @@ -38,17 +37,17 @@ namespace ngraph double alpha = node.get_attribute_value("alpha", 1.0); std::shared_ptr alpha_node = - std::make_shared(data->get_element_type(), - data->get_shape(), - std::vector{alpha}); + std::make_shared(data->get_element_type(), + data->get_shape(), + std::vector{alpha}); - auto data_map = std::make_shared( - std::make_shared(data, alpha_node), + auto data_map = std::make_shared( + std::make_shared(data, alpha_node), data->get_element_type()); return {data * data_map}; } - } // namespace set_1 + } // namespace set_1default_opset } // namespace op diff --git a/src/ngraph/frontend/onnx_import/op/topk.cpp b/src/ngraph/frontend/onnx_import/op/topk.cpp index b72a610e588..0042b352f08 100644 --- a/src/ngraph/frontend/onnx_import/op/topk.cpp +++ b/src/ngraph/frontend/onnx_import/op/topk.cpp @@ -17,10 +17,11 @@ #include #include +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/get_output_element.hpp" #include "ngraph/op/topk.hpp" +#include "ngraph/opsets/opset0.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "topk.hpp" @@ -54,9 +55,9 @@ namespace ngraph::NodeVector get_outputs(const std::shared_ptr& node) { std::shared_ptr values = - std::make_shared(node, 0); + std::make_shared(node, 0); std::shared_ptr indices = - std::make_shared(node, 1); + std::make_shared(node, 1); return {values, indices}; } @@ -74,15 +75,15 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); std::int64_t k{node.get_attribute_value("k")}; - auto k_node = ngraph::op::Constant::create(element::i64, Shape{}, {k}); + auto k_node = default_opset::Constant::create(element::i64, Shape{}, {k}); auto axis = get_axis(node); - std::shared_ptr top_k = std::make_shared( + std::shared_ptr top_k = std::make_shared( data, k_node, axis, - ngraph::op::v1::TopK::Mode::MAX, - ngraph::op::v1::TopK::SortType::SORT_VALUES, + default_opset::TopK::Mode::MAX, + default_opset::TopK::SortType::SORT_VALUES, element::i64); return get_outputs(top_k); @@ -97,12 +98,12 @@ namespace ngraph auto k = get_k(node); auto axis = get_axis(node); - std::shared_ptr top_k = std::make_shared( + std::shared_ptr top_k = std::make_shared( data, k, axis, - ngraph::op::v1::TopK::Mode::MAX, - ngraph::op::v1::TopK::SortType::SORT_VALUES, + default_opset::TopK::Mode::MAX, + default_opset::TopK::SortType::SORT_VALUES, element::i64); return get_outputs(top_k); @@ -123,14 +124,14 @@ namespace ngraph const auto sorted = node.get_attribute_value("sorted", 1); // Map attribute values to nGraph enums - const auto sort_type = sorted ? ngraph::op::v1::TopK::SortType::SORT_VALUES - : ngraph::op::v1::TopK::SortType::NONE; + const auto sort_type = sorted ? default_opset::TopK::SortType::SORT_VALUES + : default_opset::TopK::SortType::NONE; const auto compute_max = static_cast(largest); - const auto mode = compute_max ? ngraph::op::v1::TopK::Mode::MAX - : ngraph::op::v1::TopK::Mode::MIN; + const auto mode = compute_max ? default_opset::TopK::Mode::MAX + : default_opset::TopK::Mode::MIN; - std::shared_ptr top_k = std::make_shared( + std::shared_ptr top_k = std::make_shared( data, k, axis, mode, sort_type, element::i64); return get_outputs(top_k); diff --git a/src/ngraph/frontend/onnx_import/op/unsqueeze.cpp b/src/ngraph/frontend/onnx_import/op/unsqueeze.cpp index bf695fdf785..38b0e48289f 100644 --- a/src/ngraph/frontend/onnx_import/op/unsqueeze.cpp +++ b/src/ngraph/frontend/onnx_import/op/unsqueeze.cpp @@ -16,8 +16,7 @@ #include -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" +#include "default_opset.hpp" #include "ngraph/shape.hpp" #include "unsqueeze.hpp" #include "utils/common.hpp" @@ -37,9 +36,9 @@ namespace ngraph const auto expanded_rank = data->get_shape().size() + axes.size(); std::vector valid_axes = common::validate_axes(node, axes, expanded_rank); - auto axes_node = std::make_shared( + auto axes_node = std::make_shared( element::i64, Shape{valid_axes.size()}, valid_axes); - return {std::make_shared(data, axes_node)}; + return {std::make_shared(data, axes_node)}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/where.hpp b/src/ngraph/frontend/onnx_import/op/where.hpp index d5d964d3358..430440720f8 100644 --- a/src/ngraph/frontend/onnx_import/op/where.hpp +++ b/src/ngraph/frontend/onnx_import/op/where.hpp @@ -20,8 +20,8 @@ #include "core/node.hpp" #include "ngraph/node.hpp" -#include "ngraph/op/select.hpp" #include "ngraph/op/util/broadcasting.hpp" +#include "ngraph/opsets/opset0.hpp" namespace ngraph { @@ -35,7 +35,7 @@ namespace ngraph { NodeVector ng_inputs{ngraph::op::numpy_style_broadcast(node.get_ng_inputs())}; - return {std::make_shared( + return {std::make_shared( ng_inputs.at(0), ng_inputs.at(1), ng_inputs.at(2))}; } } // namespace set_1 diff --git a/src/ngraph/frontend/onnx_import/op/xor.hpp b/src/ngraph/frontend/onnx_import/op/xor.hpp index e644b468cd7..97fef65e9e4 100644 --- a/src/ngraph/frontend/onnx_import/op/xor.hpp +++ b/src/ngraph/frontend/onnx_import/op/xor.hpp @@ -17,9 +17,9 @@ #pragma once #include "core/node.hpp" +#include "default_opset.hpp" #include "ngraph/node.hpp" #include "ngraph/op/util/broadcasting.hpp" -#include "ngraph/op/xor.hpp" namespace ngraph { @@ -31,7 +31,7 @@ namespace ngraph { inline NodeVector logical_xor(const Node& node) { - return {std::make_shared( + return {std::make_shared( node.get_ng_inputs().at(0), node.get_ng_inputs().at(1), ngraph::op::AutoBroadcastSpec(ngraph::op::AutoBroadcastType::NUMPY))}; diff --git a/src/ngraph/frontend/onnx_import/ops_bridge.cpp b/src/ngraph/frontend/onnx_import/ops_bridge.cpp index 9f4ea017dff..6a37e0e614c 100644 --- a/src/ngraph/frontend/onnx_import/ops_bridge.cpp +++ b/src/ngraph/frontend/onnx_import/ops_bridge.cpp @@ -59,6 +59,7 @@ #include "op/flatten.hpp" #include "op/floor.hpp" #include "op/gather.hpp" +#include "op/gather_nd.hpp" #include "op/gemm.hpp" #include "op/global_average_pool.hpp" #include "op/global_max_pool.hpp" @@ -275,6 +276,7 @@ namespace ngraph REGISTER_OPERATOR("Flatten", 1, flatten); REGISTER_OPERATOR("Floor", 1, floor); REGISTER_OPERATOR("Gather", 1, gather); + REGISTER_OPERATOR("GatherND", 1, gather_nd); REGISTER_OPERATOR("Gemm", 1, gemm); REGISTER_OPERATOR("Gemm", 6, gemm); REGISTER_OPERATOR("GlobalAveragePool", 1, global_average_pool); diff --git a/src/ngraph/frontend/onnx_import/utils/common.cpp b/src/ngraph/frontend/onnx_import/utils/common.cpp index 64fcad1020f..ebb2fe6c0cc 100644 --- a/src/ngraph/frontend/onnx_import/utils/common.cpp +++ b/src/ngraph/frontend/onnx_import/utils/common.cpp @@ -16,7 +16,9 @@ #include // onnx types #include "common.hpp" +#include "default_opset.hpp" #include "ngraph/op/get_output_element.hpp" +#include "ngraph/opsets/opset0.hpp" #include "validation_util.hpp" namespace ngraph @@ -92,7 +94,7 @@ namespace ngraph } else { - outputs[i] = std::make_shared(node, i); + outputs[i] = std::make_shared(node, i); } } return outputs; diff --git a/src/ngraph/frontend/onnx_import/utils/common.hpp b/src/ngraph/frontend/onnx_import/utils/common.hpp index f32d865770f..c29477f712c 100644 --- a/src/ngraph/frontend/onnx_import/utils/common.hpp +++ b/src/ngraph/frontend/onnx_import/utils/common.hpp @@ -26,7 +26,7 @@ #include #include "core/node.hpp" -#include "ngraph/op/constant.hpp" +#include "default_opset.hpp" #include "ngraph/op/util/broadcasting.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" @@ -155,7 +155,7 @@ namespace ngraph identity_matrix.at(diagonal_element_idx) = T{1}; } - return std::make_shared( + return std::make_shared( output_type, output_shape, identity_matrix); } @@ -165,8 +165,8 @@ namespace ngraph /// /// \return A Constant node representing identity matrix with shape (n, n). template - std::shared_ptr square_identity(const size_t n, - const element::Type& type) + std::shared_ptr square_identity(const size_t n, + const element::Type& type) { return shifted_square_identity(Shape{n, n}, type, 0); } diff --git a/src/ngraph/frontend/onnx_import/utils/pooling_factory.cpp b/src/ngraph/frontend/onnx_import/utils/pooling_factory.cpp index 099c9510549..252c95fdb21 100644 --- a/src/ngraph/frontend/onnx_import/utils/pooling_factory.cpp +++ b/src/ngraph/frontend/onnx_import/utils/pooling_factory.cpp @@ -16,6 +16,7 @@ #include +#include "default_opset.hpp" #include "ngraph/coordinate_diff.hpp" #include "utils/convpool.hpp" #include "utils/pooling_factory.hpp" @@ -45,25 +46,25 @@ namespace ngraph { bool count_include_pad = m_onnx_node.get_attribute_value("count_include_pad", 0); - return {std::make_shared(m_inputs.at(0), - m_strides, - m_padding_below, - m_padding_above, - m_kernel_shape, - !count_include_pad, - op::RoundingType::FLOOR, - m_auto_pad)}; + return {std::make_shared(m_inputs.at(0), + m_strides, + m_padding_below, + m_padding_above, + m_kernel_shape, + !count_include_pad, + op::RoundingType::FLOOR, + m_auto_pad)}; } NodeVector PoolingFactory::make_max_pool() const { - return {std::make_shared(m_inputs.at(0), - m_strides, - m_padding_below, - m_padding_above, - m_kernel_shape, - op::RoundingType::FLOOR, - m_auto_pad)}; + return {std::make_shared(m_inputs.at(0), + m_strides, + m_padding_below, + m_padding_above, + m_kernel_shape, + op::RoundingType::FLOOR, + m_auto_pad)}; } GlobalPoolingFactory::GlobalPoolingFactory(const Node& node) diff --git a/src/ngraph/node.hpp b/src/ngraph/node.hpp index 1af9dccada2..cf7762b17a2 100644 --- a/src/ngraph/node.hpp +++ b/src/ngraph/node.hpp @@ -147,9 +147,11 @@ namespace ngraph /// \param output_size Number of outputs for this node Node(const NodeVector& arguments, size_t output_size = 1); - virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + // For back-compatibility + virtual void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) {} + virtual void generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { + generate_adjoints(adjoints, as_node_vector(deltas)); } /// \brief Moves nodes that would be deleted from inputs to nodes to avoid stack overflows /// on deep networks. diff --git a/src/ngraph/op/abs.cpp b/src/ngraph/op/abs.cpp index 18ffd44c10f..2a291c01405 100644 --- a/src/ngraph/op/abs.cpp +++ b/src/ngraph/op/abs.cpp @@ -35,7 +35,7 @@ shared_ptr op::Abs::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Abs::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Abs::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/abs.hpp b/src/ngraph/op/abs.hpp index 56b1cffb63e..76a8b03378d 100644 --- a/src/ngraph/op/abs.hpp +++ b/src/ngraph/op/abs.hpp @@ -49,7 +49,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Abs; diff --git a/src/ngraph/op/acos.cpp b/src/ngraph/op/acos.cpp index 037f9eff98f..437ccbf3873 100644 --- a/src/ngraph/op/acos.cpp +++ b/src/ngraph/op/acos.cpp @@ -44,7 +44,7 @@ shared_ptr op::Acos::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Acos::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Acos::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/acos.hpp b/src/ngraph/op/acos.hpp index 4095ffc5160..664333ff5b2 100644 --- a/src/ngraph/op/acos.hpp +++ b/src/ngraph/op/acos.hpp @@ -48,7 +48,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Acos; diff --git a/src/ngraph/op/add.cpp b/src/ngraph/op/add.cpp index c2924fc0e39..7c0de50f8ff 100644 --- a/src/ngraph/op/add.cpp +++ b/src/ngraph/op/add.cpp @@ -43,7 +43,7 @@ bool op::v0::Add::visit_attributes(AttributeVisitor& visitor) return true; } -void op::v0::Add::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Add::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -88,7 +88,7 @@ shared_ptr op::v1::Add::copy_with_new_args(const NodeVector& new_args) con return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Add::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Add::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/add.hpp b/src/ngraph/op/add.hpp index 997c123bd66..35136d8dc3f 100644 --- a/src/ngraph/op/add.hpp +++ b/src/ngraph/op/add.hpp @@ -59,7 +59,7 @@ namespace ngraph virtual bool is_commutative() const override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -100,7 +100,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/asin.cpp b/src/ngraph/op/asin.cpp index b44cf28c74d..b2a520c628b 100644 --- a/src/ngraph/op/asin.cpp +++ b/src/ngraph/op/asin.cpp @@ -45,7 +45,7 @@ shared_ptr op::Asin::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Asin::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Asin::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/asin.hpp b/src/ngraph/op/asin.hpp index 6ec9e6d3a32..54bc5646e2c 100644 --- a/src/ngraph/op/asin.hpp +++ b/src/ngraph/op/asin.hpp @@ -49,7 +49,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Asin; diff --git a/src/ngraph/op/atan.cpp b/src/ngraph/op/atan.cpp index ccfebe660cc..57820baf209 100644 --- a/src/ngraph/op/atan.cpp +++ b/src/ngraph/op/atan.cpp @@ -44,7 +44,7 @@ shared_ptr op::Atan::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Atan::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Atan::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/atan.hpp b/src/ngraph/op/atan.hpp index c6411e01fae..e0dec4d0e8c 100644 --- a/src/ngraph/op/atan.hpp +++ b/src/ngraph/op/atan.hpp @@ -50,7 +50,7 @@ namespace ngraph bool visit_attributes(AttributeVisitor& visitor) override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Atan; diff --git a/src/ngraph/op/atan2.cpp b/src/ngraph/op/atan2.cpp index 6be6111921e..1231ac5fc72 100644 --- a/src/ngraph/op/atan2.cpp +++ b/src/ngraph/op/atan2.cpp @@ -39,7 +39,7 @@ shared_ptr op::Atan2::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::Atan2::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Atan2::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/atan2.hpp b/src/ngraph/op/atan2.hpp index dae716f4d44..385bc54b1ac 100644 --- a/src/ngraph/op/atan2.hpp +++ b/src/ngraph/op/atan2.hpp @@ -47,7 +47,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/op/avg_pool.cpp b/src/ngraph/op/avg_pool.cpp index 829851921d9..933bf6a1da1 100644 --- a/src/ngraph/op/avg_pool.cpp +++ b/src/ngraph/op/avg_pool.cpp @@ -383,7 +383,7 @@ shared_ptr op::v0::AvgPoolBackprop::copy_with_new_args(const NodeVector& n m_include_padding_in_avg_computation); } -void op::v0::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (m_ceil_mode) { @@ -727,7 +727,7 @@ shared_ptr op::v1::AvgPoolBackprop::copy_with_new_args(const NodeVector& n m_exclude_pad); } -void op::v1::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (m_rounding_type == op::RoundingType::CEIL) { diff --git a/src/ngraph/op/avg_pool.hpp b/src/ngraph/op/avg_pool.hpp index be0d16e4d67..ea3831ac56a 100644 --- a/src/ngraph/op/avg_pool.hpp +++ b/src/ngraph/op/avg_pool.hpp @@ -137,7 +137,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The window shape. const Shape& get_window_shape() const; @@ -287,7 +287,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The kernel shape. const Shape& get_kernel() const; diff --git a/src/ngraph/op/batch_norm.cpp b/src/ngraph/op/batch_norm.cpp index 5b70de64a3c..dae6f810ad5 100644 --- a/src/ngraph/op/batch_norm.cpp +++ b/src/ngraph/op/batch_norm.cpp @@ -81,7 +81,7 @@ std::shared_ptr op::BatchNormTraining::copy_with_new_args(const NodeVector } void op::BatchNormTraining::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { auto gamma = input_value(0); auto beta = input_value(1); @@ -99,9 +99,9 @@ void op::BatchNormTraining::generate_adjoints(autodiff::Adjoints& adjoints, auto bbn = std::make_shared( data, gamma, beta, mean, var, deltas.at(0), get_eps_value()); - auto dinput = std::make_shared(bbn, 0); - auto dgamma = std::make_shared(bbn, 1); - auto dbeta = std::make_shared(bbn, 2); + auto dinput = Output(bbn, 0); + auto dgamma = Output(bbn, 1); + auto dbeta = Output(bbn, 2); adjoints.add_delta(data, dinput); adjoints.add_delta(gamma, dgamma); diff --git a/src/ngraph/op/batch_norm.hpp b/src/ngraph/op/batch_norm.hpp index e28aa1700aa..e0f61b764e2 100644 --- a/src/ngraph/op/batch_norm.hpp +++ b/src/ngraph/op/batch_norm.hpp @@ -82,7 +82,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; static constexpr size_t INPUT_GAMMA = 0; static constexpr size_t INPUT_BETA = 1; @@ -149,7 +149,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { throw ngraph_error("Invalid operation"); } diff --git a/src/ngraph/op/binary_convolution.cpp b/src/ngraph/op/binary_convolution.cpp index 7c05e812606..7b1e7b377ad 100644 --- a/src/ngraph/op/binary_convolution.cpp +++ b/src/ngraph/op/binary_convolution.cpp @@ -143,7 +143,7 @@ shared_ptr op::v1::BinaryConvolution::copy_with_new_args(const NodeVector& } void op::v1::BinaryConvolution::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { throw ngraph_error("BinaryConvolution generate_adjoints not implemented"); } diff --git a/src/ngraph/op/binary_convolution.hpp b/src/ngraph/op/binary_convolution.hpp index 9b6bce230d6..a108ea814e1 100644 --- a/src/ngraph/op/binary_convolution.hpp +++ b/src/ngraph/op/binary_convolution.hpp @@ -77,7 +77,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The strides. const Strides& get_strides() const { return m_strides; } diff --git a/src/ngraph/op/broadcast.cpp b/src/ngraph/op/broadcast.cpp index 3696304530d..de35ffb0b97 100644 --- a/src/ngraph/op/broadcast.cpp +++ b/src/ngraph/op/broadcast.cpp @@ -256,7 +256,7 @@ shared_ptr op::v1::Broadcast::copy_with_new_args(const NodeVector& new_arg new_args.at(0), new_args.at(1), new_args.at(2), m_broadcast_spec); } -void op::v1::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); @@ -348,7 +348,7 @@ shared_ptr op::v0::Broadcast::copy_with_new_args(const NodeVector& new_arg return make_shared(new_args.at(0), m_shape, m_broadcast_axes); } -void op::v0::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/broadcast.hpp b/src/ngraph/op/broadcast.hpp index 586c2925d39..c1513ebc39a 100644 --- a/src/ngraph/op/broadcast.hpp +++ b/src/ngraph/op/broadcast.hpp @@ -65,7 +65,7 @@ namespace ngraph const AxisSet& broadcast_axes); virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; virtual void infer_shape() {} Shape m_shape; @@ -170,7 +170,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: AutoBroadcastSpec m_broadcast_spec; diff --git a/src/ngraph/op/concat.cpp b/src/ngraph/op/concat.cpp index 34cbc63bfd6..f380aa59d1f 100644 --- a/src/ngraph/op/concat.cpp +++ b/src/ngraph/op/concat.cpp @@ -112,7 +112,7 @@ shared_ptr op::Concat::copy_with_new_args(const NodeVector& new_args) cons return make_shared(new_args, m_axis); } -void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/concat.hpp b/src/ngraph/op/concat.hpp index acf89b305b0..954452cfdce 100644 --- a/src/ngraph/op/concat.hpp +++ b/src/ngraph/op/concat.hpp @@ -63,7 +63,7 @@ namespace ngraph void set_axis(int64_t axis) { m_axis = axis; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \ brief m_axis stores default value for all iterations int64_t m_axis; /// \brief m_concat_axis stores m_axis plus the number of rank for each iteration diff --git a/src/ngraph/op/constant.hpp b/src/ngraph/op/constant.hpp index 10eae5d3541..c6370fa31ab 100644 --- a/src/ngraph/op/constant.hpp +++ b/src/ngraph/op/constant.hpp @@ -16,6 +16,7 @@ #pragma once +#include #include #include @@ -46,8 +47,9 @@ namespace ngraph Constant(const element::Type& type, Shape shape, const std::vector& values) : m_element_type(type) , m_shape(shape) - , m_data(new runtime::AlignedBuffer(shape_size(m_shape) * m_element_type.size(), - host_alignment())) + , m_data(new runtime::AlignedBuffer( + std::ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f), + host_alignment())) { NODE_VALIDATION_CHECK( this, @@ -82,8 +84,9 @@ namespace ngraph Constant(const element::Type& type, Shape shape, const std::vector& values) : m_element_type(type) , m_shape(shape) - , m_data(new runtime::AlignedBuffer(shape_size(m_shape) * m_element_type.size(), - host_alignment())) + , m_data(new runtime::AlignedBuffer( + std::ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f), + host_alignment())) { NODE_VALIDATION_CHECK( this, @@ -143,9 +146,8 @@ namespace ngraph , m_shape(shape) , m_data(nullptr) { - size_t size = shape_size(m_shape) * m_element_type.size(); - m_data.reset(new runtime::AlignedBuffer(shape_size(m_shape) * m_element_type.size(), - host_alignment())); + size_t size = std::ceil(shape_size(m_shape) * m_element_type.bitwidth() / 8.f); + m_data.reset(new runtime::AlignedBuffer(size, host_alignment())); std::memcpy(m_data->get_ptr(), data, size); constructor_validate_and_infer_types(); m_all_elements_bitwise_identical = are_all_data_elements_bitwise_identical(); @@ -240,6 +242,87 @@ namespace ngraph return rc; } + /// \brief Return the Constant's value as a vector cast to type T + /// + /// \tparam T Type to which data vector's entries will be cast. + /// \return Constant's data vector. + template + std::vector cast_vector() const + { + auto source_type = get_element_type(); + switch (source_type) + { + case element::Type_t::boolean: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::bf16: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::f16: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::f32: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::f64: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::i8: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::i16: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::i32: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::i64: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::u8: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::u16: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::u32: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::u64: + { + auto vector = get_vector(); + return std::vector(vector.begin(), vector.end()); + } + case element::Type_t::u1: + case element::Type_t::undefined: + case element::Type_t::dynamic: throw std::runtime_error("unsupported type"); + } + } + const void* get_data_ptr() const { return (m_data ? m_data->get_ptr() : nullptr); } template const T* get_data_ptr() const diff --git a/src/ngraph/op/convert.cpp b/src/ngraph/op/convert.cpp index b96ce7bb563..3d144876d01 100644 --- a/src/ngraph/op/convert.cpp +++ b/src/ngraph/op/convert.cpp @@ -41,7 +41,7 @@ shared_ptr op::Convert::copy_with_new_args(const NodeVector& new_args) con return make_shared(new_args.at(0), m_destination_type); } -void op::Convert::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Convert::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/convert.hpp b/src/ngraph/op/convert.hpp index ba4b169416a..8b88fe6eca4 100644 --- a/src/ngraph/op/convert.hpp +++ b/src/ngraph/op/convert.hpp @@ -57,7 +57,7 @@ namespace ngraph protected: ngraph::element::Type m_destination_type; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Convert; diff --git a/src/ngraph/op/convert_like.cpp b/src/ngraph/op/convert_like.cpp index 9af074e2fed..3944fb53216 100644 --- a/src/ngraph/op/convert_like.cpp +++ b/src/ngraph/op/convert_like.cpp @@ -40,7 +40,8 @@ shared_ptr op::v1::ConvertLike::copy_with_new_args(const NodeVector& new_a return make_shared(new_args.at(0), new_args.at(1)); } -void op::v1::ConvertLike::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::ConvertLike::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { const auto delta = deltas.at(0); diff --git a/src/ngraph/op/convert_like.hpp b/src/ngraph/op/convert_like.hpp index 3371467e4cb..89914794a9b 100644 --- a/src/ngraph/op/convert_like.hpp +++ b/src/ngraph/op/convert_like.hpp @@ -45,7 +45,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/op/convolution.cpp b/src/ngraph/op/convolution.cpp index c36f901c405..702f8579b55 100644 --- a/src/ngraph/op/convolution.cpp +++ b/src/ngraph/op/convolution.cpp @@ -126,7 +126,8 @@ shared_ptr op::v1::Convolution::copy_with_new_args(const NodeVector& new_a m_auto_pad); } -void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto delta = deltas.at(0); @@ -319,7 +320,7 @@ void op::v1::ConvolutionBackpropData::validate_and_infer_types() } void op::v1::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { auto delta = deltas.at(0); @@ -716,7 +717,8 @@ shared_ptr op::v0::Convolution::copy_with_new_args(const NodeVector& new_a m_pad_type); } -void op::v0::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Convolution::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto delta = deltas.at(0); @@ -840,7 +842,7 @@ void op::v0::ConvolutionBackpropData::validate_and_infer_types() } void op::v0::ConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/convolution.hpp b/src/ngraph/op/convolution.hpp index e01c1bbcec9..80a402913d1 100644 --- a/src/ngraph/op/convolution.hpp +++ b/src/ngraph/op/convolution.hpp @@ -67,7 +67,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The strides. const Strides& get_strides() const { return m_strides; } @@ -150,7 +150,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; @@ -368,7 +368,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The window movement strides. const Strides& get_window_movement_strides() const @@ -454,7 +454,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; diff --git a/src/ngraph/op/cos.cpp b/src/ngraph/op/cos.cpp index f37dca64aa9..b99c1fc1457 100644 --- a/src/ngraph/op/cos.cpp +++ b/src/ngraph/op/cos.cpp @@ -36,7 +36,7 @@ shared_ptr op::Cos::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Cos::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Cos::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/cos.hpp b/src/ngraph/op/cos.hpp index ef41aed866f..faf3a0875d9 100644 --- a/src/ngraph/op/cos.hpp +++ b/src/ngraph/op/cos.hpp @@ -42,7 +42,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Cos; diff --git a/src/ngraph/op/cosh.cpp b/src/ngraph/op/cosh.cpp index 172fa7cf5d5..7d6f18b7584 100644 --- a/src/ngraph/op/cosh.cpp +++ b/src/ngraph/op/cosh.cpp @@ -35,7 +35,7 @@ shared_ptr op::Cosh::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Cosh::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Cosh::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/cosh.hpp b/src/ngraph/op/cosh.hpp index c0181c29aaf..389f4d077fa 100644 --- a/src/ngraph/op/cosh.hpp +++ b/src/ngraph/op/cosh.hpp @@ -42,7 +42,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Cosh; diff --git a/src/ngraph/op/cum_sum.cpp b/src/ngraph/op/cum_sum.cpp index 4115992d2e6..ca28c091835 100644 --- a/src/ngraph/op/cum_sum.cpp +++ b/src/ngraph/op/cum_sum.cpp @@ -22,39 +22,53 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::CumSum::type_info; +constexpr NodeTypeInfo op::v0::CumSum::type_info; -op::CumSum::CumSum(const Output& arg, - const Output& axis, - const bool exclusive, - const bool reverse) +op::v0::CumSum::CumSum(const Output& arg, + const Output& axis, + const bool exclusive, + const bool reverse) : Op({arg, axis}) , m_exclusive(exclusive) , m_reverse(reverse) { + constructor_validate_and_infer_types(); +} + +void op::v0::CumSum::validate_and_infer_types() +{ + element::Type arg_type = get_input_element_type(0); + PartialShape arg_shape = get_input_partial_shape(0); + set_output_type(0, arg_type, arg_shape); + + PartialShape axes_shape{PartialShape::dynamic()}; + if (get_input_partial_shape(1).is_static()) + { + axes_shape = get_input_partial_shape(1); + } + + const auto& axis_type = get_input_element_type(1); NODE_VALIDATION_CHECK(this, - axis.get_element_type() == element::i32 || - axis.get_element_type() == element::i64, + axis_type == element::i32 || axis_type == element::i64, "axis element type must be either int64_t or int32_t but got (", - axis.get_element_type(), + axis_type, ")."); - set_output_type(0, arg.get_element_type(), arg.get_shape()); } -shared_ptr op::CumSum::copy_with_new_args(const NodeVector& new_args) const +shared_ptr op::v0::CumSum::copy_with_new_args(const NodeVector& new_args) const { check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_exclusive, m_reverse); } -void op::CumSum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::CumSum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); auto input_tensor = input_value(0); adjoints.add_delta(input_tensor, delta); } -shared_ptr op::CumSum::get_default_value() const +shared_ptr op::v0::CumSum::get_default_value() const { return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); } diff --git a/src/ngraph/op/cum_sum.hpp b/src/ngraph/op/cum_sum.hpp index 34b9d974bd3..27761dfb5c4 100644 --- a/src/ngraph/op/cum_sum.hpp +++ b/src/ngraph/op/cum_sum.hpp @@ -91,13 +91,15 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; + void validate_and_infer_types() override; + /// \return The default value for CumSum. virtual std::shared_ptr get_default_value() const override; bool is_exclusive() const { return m_exclusive; } bool is_reverse() const { return m_reverse; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_exclusive; diff --git a/src/ngraph/op/dequantize.cpp b/src/ngraph/op/dequantize.cpp index b2c197a0198..68c035edaee 100644 --- a/src/ngraph/op/dequantize.cpp +++ b/src/ngraph/op/dequantize.cpp @@ -158,7 +158,7 @@ shared_ptr op::Dequantize::copy_with_new_args(const NodeVector& new_args) } void op::Dequantize::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/dequantize.hpp b/src/ngraph/op/dequantize.hpp index 1632aee8862..f2f6cc10fd3 100644 --- a/src/ngraph/op/dequantize.hpp +++ b/src/ngraph/op/dequantize.hpp @@ -58,7 +58,7 @@ namespace ngraph void set_type(const element::Type& type) { m_type = type; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: element::Type m_type; diff --git a/src/ngraph/op/divide.cpp b/src/ngraph/op/divide.cpp index 56827dcf5ce..0f4e4988d51 100644 --- a/src/ngraph/op/divide.cpp +++ b/src/ngraph/op/divide.cpp @@ -50,7 +50,7 @@ shared_ptr op::v0::Divide::copy_with_new_args(const NodeVector& new_args) new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob()); } -void op::v0::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -100,7 +100,7 @@ shared_ptr op::v1::Divide::copy_with_new_args(const NodeVector& new_args) new_args.at(0), new_args.at(1), this->is_pythondiv(), this->get_autob()); } -void op::v1::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Divide::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/divide.hpp b/src/ngraph/op/divide.hpp index 306a4c709fc..2b4d6239db8 100644 --- a/src/ngraph/op/divide.hpp +++ b/src/ngraph/op/divide.hpp @@ -61,7 +61,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; protected: bool m_pythondiv{true}; @@ -110,7 +110,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; size_t get_version() const override { return 1; } protected: bool m_pythondiv{true}; diff --git a/src/ngraph/op/dot.cpp b/src/ngraph/op/dot.cpp index ed1825260e7..2daa2a96f9f 100644 --- a/src/ngraph/op/dot.cpp +++ b/src/ngraph/op/dot.cpp @@ -176,16 +176,16 @@ shared_ptr make_reshape_axes_to_front(const Output& n, return make_shared(n, input_order, output_shape); } -void op::Dot::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Dot::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); auto x = input_value(0); auto y = input_value(1); - auto x_shape = x.get_shape(); // shape IJ - auto y_shape = y.get_shape(); // shape JK - auto delta_shape = delta->get_shape(); // shape IK + auto x_shape = x.get_shape(); // shape IJ + auto y_shape = y.get_shape(); // shape JK + auto delta_shape = delta.get_shape(); // shape IK Shape I_shape; Shape J_shape; diff --git a/src/ngraph/op/dot.hpp b/src/ngraph/op/dot.hpp index 16b77cc0fce..36b6864869a 100644 --- a/src/ngraph/op/dot.hpp +++ b/src/ngraph/op/dot.hpp @@ -91,7 +91,7 @@ namespace ngraph bool m_has_reduction_axes_count; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Dot; diff --git a/src/ngraph/op/embedding_lookup.hpp b/src/ngraph/op/embedding_lookup.hpp index ec97312fa27..08759a243ba 100644 --- a/src/ngraph/op/embedding_lookup.hpp +++ b/src/ngraph/op/embedding_lookup.hpp @@ -51,7 +51,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { throw ngraph_error("Not yet implemented"); } diff --git a/src/ngraph/op/exp.cpp b/src/ngraph/op/exp.cpp index 1dc649330fd..e8e6fb380e6 100644 --- a/src/ngraph/op/exp.cpp +++ b/src/ngraph/op/exp.cpp @@ -34,7 +34,7 @@ shared_ptr op::Exp::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Exp::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Exp::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/exp.hpp b/src/ngraph/op/exp.hpp index e2d8c882e47..2cbed5e0e85 100644 --- a/src/ngraph/op/exp.hpp +++ b/src/ngraph/op/exp.hpp @@ -41,7 +41,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Exp; diff --git a/src/ngraph/op/experimental/batch_mat_mul.cpp b/src/ngraph/op/experimental/batch_mat_mul.cpp index 6db4423d3c0..0409dacc5c1 100644 --- a/src/ngraph/op/experimental/batch_mat_mul.cpp +++ b/src/ngraph/op/experimental/batch_mat_mul.cpp @@ -77,7 +77,7 @@ void op::BatchMatMul::validate_and_infer_types() set_output_type(0, output_et, output_shape); } -void op::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::BatchMatMul::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); // NxIxK diff --git a/src/ngraph/op/experimental/batch_mat_mul.hpp b/src/ngraph/op/experimental/batch_mat_mul.hpp index 20c1c289cda..f47fb255741 100644 --- a/src/ngraph/op/experimental/batch_mat_mul.hpp +++ b/src/ngraph/op/experimental/batch_mat_mul.hpp @@ -48,7 +48,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; namespace util diff --git a/src/ngraph/op/experimental/dyn_broadcast.cpp b/src/ngraph/op/experimental/dyn_broadcast.cpp index ef52690040e..de80f0c5f66 100644 --- a/src/ngraph/op/experimental/dyn_broadcast.cpp +++ b/src/ngraph/op/experimental/dyn_broadcast.cpp @@ -131,7 +131,7 @@ shared_ptr op::DynBroadcast::copy_with_new_args(const NodeVector& new_args // TODO: This function is not implemented! void op::DynBroadcast::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for DynBroadcast"); } diff --git a/src/ngraph/op/experimental/dyn_broadcast.hpp b/src/ngraph/op/experimental/dyn_broadcast.hpp index 622ce4c2e01..f09658c682c 100644 --- a/src/ngraph/op/experimental/dyn_broadcast.hpp +++ b/src/ngraph/op/experimental/dyn_broadcast.hpp @@ -51,7 +51,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/op/experimental/dyn_pad.cpp b/src/ngraph/op/experimental/dyn_pad.cpp index e6956edaa33..f7fa960bd45 100644 --- a/src/ngraph/op/experimental/dyn_pad.cpp +++ b/src/ngraph/op/experimental/dyn_pad.cpp @@ -112,7 +112,7 @@ shared_ptr op::DynPad::copy_with_new_args(const NodeVector& new_args) cons // TODO: This function is not implemented! void op::DynPad::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for DynPad"); } diff --git a/src/ngraph/op/experimental/dyn_pad.hpp b/src/ngraph/op/experimental/dyn_pad.hpp index 7d6cd8ed85b..e846f1690f3 100644 --- a/src/ngraph/op/experimental/dyn_pad.hpp +++ b/src/ngraph/op/experimental/dyn_pad.hpp @@ -51,7 +51,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: PadMode m_pad_mode; diff --git a/src/ngraph/op/experimental/dyn_replace_slice.cpp b/src/ngraph/op/experimental/dyn_replace_slice.cpp index 71a17d20749..e32344ee345 100644 --- a/src/ngraph/op/experimental/dyn_replace_slice.cpp +++ b/src/ngraph/op/experimental/dyn_replace_slice.cpp @@ -155,7 +155,7 @@ shared_ptr op::DynReplaceSlice::copy_with_new_args(const NodeVector& new_a } void op::DynReplaceSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for DynReplaceSlice"); } diff --git a/src/ngraph/op/experimental/dyn_replace_slice.hpp b/src/ngraph/op/experimental/dyn_replace_slice.hpp index 9d08689b452..81bd3c79cc5 100644 --- a/src/ngraph/op/experimental/dyn_replace_slice.hpp +++ b/src/ngraph/op/experimental/dyn_replace_slice.hpp @@ -66,7 +66,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: /// Helper method to compute output shape diff --git a/src/ngraph/op/experimental/dyn_reshape.cpp b/src/ngraph/op/experimental/dyn_reshape.cpp index 0fff3a8fa82..ca19d10415b 100644 --- a/src/ngraph/op/experimental/dyn_reshape.cpp +++ b/src/ngraph/op/experimental/dyn_reshape.cpp @@ -154,7 +154,7 @@ shared_ptr op::v0::DynReshape::copy_with_new_args(const NodeVector& new_ar } void op::v0::DynReshape::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for DynReshape"); } diff --git a/src/ngraph/op/experimental/dyn_reshape.hpp b/src/ngraph/op/experimental/dyn_reshape.hpp index 5e9b7bbf142..d8d234e7e5e 100644 --- a/src/ngraph/op/experimental/dyn_reshape.hpp +++ b/src/ngraph/op/experimental/dyn_reshape.hpp @@ -62,7 +62,7 @@ namespace ngraph void set_zero_flag(bool zero_flag) { m_zero_flag = zero_flag; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_zero_flag; diff --git a/src/ngraph/op/experimental/dyn_slice.cpp b/src/ngraph/op/experimental/dyn_slice.cpp index 3b8c8ac0a0c..1fe2531deaf 100644 --- a/src/ngraph/op/experimental/dyn_slice.cpp +++ b/src/ngraph/op/experimental/dyn_slice.cpp @@ -126,7 +126,7 @@ shared_ptr op::DynSlice::copy_with_new_args(const NodeVector& new_args) co } void op::DynSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for DynSlice"); } diff --git a/src/ngraph/op/experimental/dyn_slice.hpp b/src/ngraph/op/experimental/dyn_slice.hpp index afc36342ab0..d13a62a5136 100644 --- a/src/ngraph/op/experimental/dyn_slice.hpp +++ b/src/ngraph/op/experimental/dyn_slice.hpp @@ -64,7 +64,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: /// Helper method to compute output shape diff --git a/src/ngraph/op/experimental/generate_mask.hpp b/src/ngraph/op/experimental/generate_mask.hpp index 28520e1d48d..623b7ef030a 100644 --- a/src/ngraph/op/experimental/generate_mask.hpp +++ b/src/ngraph/op/experimental/generate_mask.hpp @@ -69,7 +69,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { } @@ -128,7 +128,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { } diff --git a/src/ngraph/op/experimental/random_uniform.hpp b/src/ngraph/op/experimental/random_uniform.hpp index 1aaf4fe6164..eb7c9db0e5d 100644 --- a/src/ngraph/op/experimental/random_uniform.hpp +++ b/src/ngraph/op/experimental/random_uniform.hpp @@ -76,7 +76,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { } diff --git a/src/ngraph/op/experimental/tile.cpp b/src/ngraph/op/experimental/tile.cpp index e66c4f7c1f1..387689053ae 100644 --- a/src/ngraph/op/experimental/tile.cpp +++ b/src/ngraph/op/experimental/tile.cpp @@ -90,7 +90,8 @@ shared_ptr op::Tile::copy_with_new_args(const NodeVector& new_args) const } // TODO: This function is not implemented! -void op::Tile::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */) +void op::Tile::generate_adjoints(autodiff::Adjoints& /* adjoints */, + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for Tile"); } diff --git a/src/ngraph/op/experimental/tile.hpp b/src/ngraph/op/experimental/tile.hpp index 0530b93efd8..e262711739b 100644 --- a/src/ngraph/op/experimental/tile.hpp +++ b/src/ngraph/op/experimental/tile.hpp @@ -45,7 +45,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Tile; diff --git a/src/ngraph/op/experimental/transpose.cpp b/src/ngraph/op/experimental/transpose.cpp index 8912368933c..6e194000673 100644 --- a/src/ngraph/op/experimental/transpose.cpp +++ b/src/ngraph/op/experimental/transpose.cpp @@ -74,7 +74,7 @@ shared_ptr op::Transpose::copy_with_new_args(const NodeVector& new_args) c // TODO(amprocte): This will require some way of inverting the permutation in-graph. (TensorFlow, // for example, has an InvertPermutation op, but that doesn't feel very nGraph-y somehow.) void op::Transpose::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for Transpose"); } diff --git a/src/ngraph/op/experimental/transpose.hpp b/src/ngraph/op/experimental/transpose.hpp index 60bdd306e5b..30c1bd3d7e9 100644 --- a/src/ngraph/op/experimental/transpose.hpp +++ b/src/ngraph/op/experimental/transpose.hpp @@ -49,7 +49,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Transpose; diff --git a/src/ngraph/op/fused/batch_mat_mul_transpose.cpp b/src/ngraph/op/fused/batch_mat_mul_transpose.cpp index ef65214587b..a21e384a09c 100644 --- a/src/ngraph/op/fused/batch_mat_mul_transpose.cpp +++ b/src/ngraph/op/fused/batch_mat_mul_transpose.cpp @@ -150,7 +150,7 @@ void op::BatchMatMulTranspose::validate_and_infer_types() } void op::BatchMatMulTranspose::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { auto delta = deltas.at(0); // NxIxK diff --git a/src/ngraph/op/fused/batch_mat_mul_transpose.hpp b/src/ngraph/op/fused/batch_mat_mul_transpose.hpp index da54638811b..289dbe76eb7 100644 --- a/src/ngraph/op/fused/batch_mat_mul_transpose.hpp +++ b/src/ngraph/op/fused/batch_mat_mul_transpose.hpp @@ -61,7 +61,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_transpose_arg0; diff --git a/src/ngraph/op/fused/conv_fused.cpp b/src/ngraph/op/fused/conv_fused.cpp index cb28bf141e0..423b4bd5658 100644 --- a/src/ngraph/op/fused/conv_fused.cpp +++ b/src/ngraph/op/fused/conv_fused.cpp @@ -226,7 +226,8 @@ NodeVector op::ConvolutionBias::decompose_op() const } } -void op::ConvolutionBias::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::ConvolutionBias::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto delta = deltas.at(0); if (m_with_relu) @@ -264,8 +265,8 @@ void op::ConvolutionBias::generate_adjoints(autodiff::Adjoints& adjoints, const m_padding_below, m_padding_above, m_data_dilation_strides); - auto filter_delta = make_shared(filter_bias_backprop, 0); - auto bias_delta = make_shared(filter_bias_backprop, 1); + auto filter_delta = Output(filter_bias_backprop, 0); + auto bias_delta = Output(filter_bias_backprop, 1); adjoints.add_delta(filter, filter_delta); adjoints.add_delta(bias, bias_delta); diff --git a/src/ngraph/op/fused/conv_fused.hpp b/src/ngraph/op/fused/conv_fused.hpp index 75e931d22e7..ad4dee5c822 100644 --- a/src/ngraph/op/fused/conv_fused.hpp +++ b/src/ngraph/op/fused/conv_fused.hpp @@ -74,7 +74,7 @@ namespace ngraph virtual void validate_and_infer_types() override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; protected: Strides m_window_movement_strides; diff --git a/src/ngraph/op/fused/gelu.cpp b/src/ngraph/op/fused/gelu.cpp index cc26abd68fc..cb9141eacd0 100644 --- a/src/ngraph/op/fused/gelu.cpp +++ b/src/ngraph/op/fused/gelu.cpp @@ -80,7 +80,7 @@ void op::Gelu::pre_validate_and_infer_types() } } -void op::Gelu::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Gelu::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/fused/gelu.hpp b/src/ngraph/op/fused/gelu.hpp index 6f7708f759f..0625b3f4118 100644 --- a/src/ngraph/op/fused/gelu.hpp +++ b/src/ngraph/op/fused/gelu.hpp @@ -48,7 +48,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; /// \brief Backprop for Gelu(x) is GeluBackprop(x) * delta diff --git a/src/ngraph/op/fused/group_conv.cpp b/src/ngraph/op/fused/group_conv.cpp index 38717017382..1a7a8e2c1df 100644 --- a/src/ngraph/op/fused/group_conv.cpp +++ b/src/ngraph/op/fused/group_conv.cpp @@ -142,7 +142,7 @@ shared_ptr op::v1::GroupConvolution::copy_with_new_args(const NodeVector& } void op::v1::GroupConvolution::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { ngraph_error("Not Yet Implemented"); } @@ -307,7 +307,7 @@ void op::v1::GroupConvolutionBackpropData::validate_and_infer_types() } void op::v1::GroupConvolutionBackpropData::generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) + const OutputVector& deltas) { ngraph_error("Not Yet Implemented"); } @@ -534,8 +534,8 @@ NodeVector op::v0::GroupConvolution::decompose_op() const return {std::make_shared(convolution_nodes, concatenation_axis)}; } -void op::v0::GroupConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) +void op::GroupConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */, + const OutputVector& /* deltas */) { throw ngraph_error("NYI"); } @@ -600,49 +600,34 @@ shared_ptr NodeVector op::v0::GroupConvolutionBackpropData::decompose_op() const { - auto data_batch = input_value(0); auto filters = input_value(1); auto output_delta = input_value(2); auto data_shape = get_input_shape(0); - auto filters_shape = get_input_shape(1); - auto delta_shape = get_input_shape(2); NodeVector sliced_inputs; - for (size_t i = 0; i < get_groups(); ++i) - { - size_t channel_step = filters_shape.at(1); - - const Coordinate data_lower_bound{0, i * channel_step, 0, 0}; - const Coordinate data_upper_bound{ - data_shape.at(0), (i + 1) * channel_step, data_shape.at(2), data_shape.at(3)}; - auto sliced_data = - std::make_shared(data_batch, data_lower_bound, data_upper_bound); - - size_t filters_step = filters_shape.at(0) / get_groups(); - - const Coordinate filters_lower_bound{i * filters_step, 0, 0, 0}; - const Coordinate filters_upper_bound{ - (i + 1) * filters_step, filters_shape.at(1), filters_shape.at(2), filters_shape.at(3)}; - auto sliced_filters = - std::make_shared(filters, filters_lower_bound, filters_upper_bound); + auto groups = get_groups(); + // slice data shape + data_shape[1] /= groups; + // slice delta + auto sliced_delta = builder::split(output_delta, groups, 1); + // slice filters + auto sliced_filters = builder::split(filters, groups, 0); - const Coordinate delta_lower_bound{0, i * filters_step, 0, 0}; - const Coordinate delta_upper_bound{ - delta_shape.at(0), (i + 1) * filters_step, delta_shape.at(2), delta_shape.at(3)}; - auto sliced_delta = - std::make_shared(output_delta, delta_lower_bound, delta_upper_bound); + auto num_spatials = get_window_movement_strides().size(); - auto sliced_conv = - std::make_shared(sliced_data->get_shape(), - sliced_filters, - sliced_delta, - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - Strides{1, 1}); + for (size_t i = 0; i < groups; ++i) + { + auto sliced_conv = std::make_shared( + data_shape, + sliced_filters[i], + sliced_delta[i], + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + Strides(num_spatials, 1)); // default data dilation strides sliced_inputs.push_back(sliced_conv); } diff --git a/src/ngraph/op/fused/group_conv.hpp b/src/ngraph/op/fused/group_conv.hpp index 189db564a42..eb2c4c40342 100644 --- a/src/ngraph/op/fused/group_conv.hpp +++ b/src/ngraph/op/fused/group_conv.hpp @@ -69,7 +69,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; /// \return The strides. const Strides& get_strides() const { return m_strides; } @@ -155,7 +155,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; @@ -247,7 +247,7 @@ namespace ngraph virtual void post_validate_and_infer_types() override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; bool has_groups_in_filters() const { return m_groups_in_filters; } protected: diff --git a/src/ngraph/op/fused/group_conv_transpose.cpp b/src/ngraph/op/fused/group_conv_transpose.cpp index 31d60841905..264fdec78a3 100644 --- a/src/ngraph/op/fused/group_conv_transpose.cpp +++ b/src/ngraph/op/fused/group_conv_transpose.cpp @@ -328,7 +328,7 @@ NodeVector op::GroupConvolutionTranspose::decompose_op() const } void op::GroupConvolutionTranspose::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error( "Generating adjoints is not yet implemented for GroupConvolutionTranspose node."); diff --git a/src/ngraph/op/fused/group_conv_transpose.hpp b/src/ngraph/op/fused/group_conv_transpose.hpp index 8ac9956c5cd..3224c6c6c5b 100644 --- a/src/ngraph/op/fused/group_conv_transpose.hpp +++ b/src/ngraph/op/fused/group_conv_transpose.hpp @@ -138,7 +138,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: /// diff --git a/src/ngraph/op/fused/layer_norm.cpp b/src/ngraph/op/fused/layer_norm.cpp index 8f675f76e54..a36282ba54b 100644 --- a/src/ngraph/op/fused/layer_norm.cpp +++ b/src/ngraph/op/fused/layer_norm.cpp @@ -234,7 +234,7 @@ void op::LayerNorm::pre_validate_and_infer_types() set_output_type(0, input_element_type, norm_shape); } -void op::LayerNorm::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::LayerNorm::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); auto data = input_value(0); diff --git a/src/ngraph/op/fused/layer_norm.hpp b/src/ngraph/op/fused/layer_norm.hpp index 851e7a9ecd5..1bf34288acd 100644 --- a/src/ngraph/op/fused/layer_norm.hpp +++ b/src/ngraph/op/fused/layer_norm.hpp @@ -67,7 +67,7 @@ namespace ngraph int64_t get_begin_norm_axis() const { return m_begin_norm_axis; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_keep_stats{true}; diff --git a/src/ngraph/op/fused/lstm_sequence.cpp b/src/ngraph/op/fused/lstm_sequence.cpp index c95202f9d42..d8b8aecf44e 100644 --- a/src/ngraph/op/fused/lstm_sequence.cpp +++ b/src/ngraph/op/fused/lstm_sequence.cpp @@ -101,24 +101,24 @@ shared_ptr op::LSTMSequence::copy_with_new_args(const NodeVector& new_args } } -shared_ptr op::LSTMSequence::get_masked_node(const shared_ptr& data, +shared_ptr op::LSTMSequence::get_masked_node(const Output& data, int32_t time_step, size_t batch_axis, - const shared_ptr& default_value) const + const Output& default_value) const { - shared_ptr mask_value = default_value; + Output mask_value = default_value; // Create zero mask value node. - if (!mask_value) + if (!mask_value.get_node_shared_ptr()) { - mask_value = op::Constant::create(data->get_element_type(), - data->get_shape(), - vector(shape_size(data->get_shape()), 0.f)); + mask_value = op::Constant::create(data.get_element_type(), + data.get_shape(), + vector(shape_size(data.get_shape()), 0.f)); } // Create predicate nodes. The condition is whether current time step value // is greater than sequence length for respective batch inputs. shared_ptr curr_time_step_node = op::Constant::create( - element::i32, data->get_shape(), vector(shape_size(data->get_shape()), time_step)); + element::i32, data.get_shape(), vector(shape_size(data.get_shape()), time_step)); shared_ptr batch_seq_length = op::legacy_style_broadcast_for_binary_operation( @@ -197,8 +197,8 @@ NodeVector op::LSTMSequence::lstm_pass(bool is_reverse) const m_clip_threshold, m_input_forget); - shared_ptr H = get_output_element(lstm_cell, 0); - shared_ptr C = get_output_element(lstm_cell, 1); + Output H = lstm_cell->output(0); + Output C = lstm_cell->output(1); // Expand tensors with empty outermost dim, so we can later concatenate // them. diff --git a/src/ngraph/op/fused/lstm_sequence.hpp b/src/ngraph/op/fused/lstm_sequence.hpp index 943def72e6c..ec36186b263 100644 --- a/src/ngraph/op/fused/lstm_sequence.hpp +++ b/src/ngraph/op/fused/lstm_sequence.hpp @@ -162,11 +162,11 @@ namespace ngraph /// /// \return The masked value. /// - std::shared_ptr get_masked_node(const std::shared_ptr& data, - std::int32_t time_step, - std::size_t batch_axis = 0, - const std::shared_ptr& default_value = { - nullptr}) const; + std::shared_ptr + get_masked_node(const Output& data, + std::int32_t time_step, + std::size_t batch_axis = 0, + const Output& default_value = Output()) const; NodeVector lstm_pass(bool is_reverse = false) const; diff --git a/src/ngraph/op/fused/partial_slice.cpp b/src/ngraph/op/fused/partial_slice.cpp index 4e24a4df92f..f5db94d1b70 100644 --- a/src/ngraph/op/fused/partial_slice.cpp +++ b/src/ngraph/op/fused/partial_slice.cpp @@ -140,7 +140,7 @@ void op::PartialSlice::pre_validate_and_infer_types() ")."); } -void op::PartialSlice::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::PartialSlice::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { throw ngraph_error("op::PartialSlice::generate_adjoints function is not implemented yet"); } diff --git a/src/ngraph/op/fused/partial_slice.hpp b/src/ngraph/op/fused/partial_slice.hpp index 73905ded628..a68386c7b40 100644 --- a/src/ngraph/op/fused/partial_slice.hpp +++ b/src/ngraph/op/fused/partial_slice.hpp @@ -60,7 +60,7 @@ namespace ngraph const AxisVector& get_decrease_axes() const { return m_decrease_axes; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: AxisVector m_axes; diff --git a/src/ngraph/op/gather.cpp b/src/ngraph/op/gather.cpp index 2e42b43b329..ea067e49b01 100644 --- a/src/ngraph/op/gather.cpp +++ b/src/ngraph/op/gather.cpp @@ -97,7 +97,7 @@ void op::v0::Gather::validate_and_infer_types() } void op::v0::Gather::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Not yet implemented"); } @@ -197,7 +197,7 @@ size_t op::v1::Gather::get_axis() const } void op::v1::Gather::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Not yet implemented"); } diff --git a/src/ngraph/op/gather.hpp b/src/ngraph/op/gather.hpp index 8f8485b3140..ead8646101d 100644 --- a/src/ngraph/op/gather.hpp +++ b/src/ngraph/op/gather.hpp @@ -39,7 +39,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; size_t get_axis() const { return m_axis; } void set_axis(size_t axis) { m_axis = axis; } @@ -73,7 +73,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; diff --git a/src/ngraph/op/gather_nd.hpp b/src/ngraph/op/gather_nd.hpp index 8ba5ad81756..d6e414cf6c9 100644 --- a/src/ngraph/op/gather_nd.hpp +++ b/src/ngraph/op/gather_nd.hpp @@ -42,7 +42,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { throw ngraph_error("Not yet implemented"); } diff --git a/src/ngraph/op/gather_tree.cpp b/src/ngraph/op/gather_tree.cpp index ba345ef2370..d857dfed00e 100644 --- a/src/ngraph/op/gather_tree.cpp +++ b/src/ngraph/op/gather_tree.cpp @@ -78,7 +78,7 @@ void op::v1::GatherTree::validate_and_infer_types() } void op::v1::GatherTree::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints is not implemented for GatherTree"); } diff --git a/src/ngraph/op/gather_tree.hpp b/src/ngraph/op/gather_tree.hpp index 95cc3937926..88181327c37 100644 --- a/src/ngraph/op/gather_tree.hpp +++ b/src/ngraph/op/gather_tree.hpp @@ -51,7 +51,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/op/get_output_element.cpp b/src/ngraph/op/get_output_element.cpp index dc1e4ec1612..6a0b33ba913 100644 --- a/src/ngraph/op/get_output_element.cpp +++ b/src/ngraph/op/get_output_element.cpp @@ -59,11 +59,12 @@ NodeVector op::GetOutputElement::get_arguments() const return NodeVector{input_value(0).get_node_shared_ptr()}; } -void op::GetOutputElement::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::GetOutputElement::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto delta = deltas.at(0); - adjoints.add_delta(input_value(0).get_node_shared_ptr(), delta, get_n()); + adjoints.add_delta(input_value(0), delta); } NodeVector op::get_output_elements(const shared_ptr& mon) diff --git a/src/ngraph/op/get_output_element.hpp b/src/ngraph/op/get_output_element.hpp index 4c5d3b604bc..00ca61d901a 100644 --- a/src/ngraph/op/get_output_element.hpp +++ b/src/ngraph/op/get_output_element.hpp @@ -52,7 +52,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; size_t m_n; }; } diff --git a/src/ngraph/op/log.cpp b/src/ngraph/op/log.cpp index a6cd6113477..6125b959d16 100644 --- a/src/ngraph/op/log.cpp +++ b/src/ngraph/op/log.cpp @@ -34,7 +34,7 @@ shared_ptr op::Log::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Log::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Log::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/log.hpp b/src/ngraph/op/log.hpp index 77237686991..5e581f129c9 100644 --- a/src/ngraph/op/log.hpp +++ b/src/ngraph/op/log.hpp @@ -41,7 +41,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Log; diff --git a/src/ngraph/op/lrn.cpp b/src/ngraph/op/lrn.cpp index 2ace854192e..b952ec6b6e0 100644 --- a/src/ngraph/op/lrn.cpp +++ b/src/ngraph/op/lrn.cpp @@ -116,7 +116,8 @@ shared_ptr op::LRN::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0), new_args.at(1), m_alpha, m_beta, m_bias, m_size); } -void op::LRN::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& /* deltas */) +void op::LRN::generate_adjoints(autodiff::Adjoints& /* adjoints */, + const OutputVector& /* deltas */) { throw ngraph_error("NYI"); } diff --git a/src/ngraph/op/lrn.hpp b/src/ngraph/op/lrn.hpp index f0f46c52b54..3baeade62da 100644 --- a/src/ngraph/op/lrn.hpp +++ b/src/ngraph/op/lrn.hpp @@ -74,7 +74,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; double m_alpha; double m_beta; diff --git a/src/ngraph/op/max_pool.cpp b/src/ngraph/op/max_pool.cpp index 805c52c8f4d..7cb70ec49ce 100644 --- a/src/ngraph/op/max_pool.cpp +++ b/src/ngraph/op/max_pool.cpp @@ -251,7 +251,7 @@ shared_ptr op::v0::MaxPoolBackprop::copy_with_new_args(const NodeVector& n m_padding_above); } -void op::v0::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (m_ceil_mode) { @@ -459,7 +459,7 @@ shared_ptr op::v1::MaxPoolBackprop::copy_with_new_args(const NodeVector& n new_args.at(0), new_args.at(1), m_strides, m_pads_begin, m_pads_end, m_kernel); } -void op::v1::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (m_rounding_type == op::RoundingType::CEIL) { diff --git a/src/ngraph/op/max_pool.hpp b/src/ngraph/op/max_pool.hpp index 5d2ef4f3d06..0349718d130 100644 --- a/src/ngraph/op/max_pool.hpp +++ b/src/ngraph/op/max_pool.hpp @@ -136,7 +136,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Shape m_window_shape; Strides m_window_movement_strides; @@ -279,7 +279,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Shape m_kernel; Strides m_strides; diff --git a/src/ngraph/op/maximum.cpp b/src/ngraph/op/maximum.cpp index 5e171e28ba3..09a08b0d50b 100644 --- a/src/ngraph/op/maximum.cpp +++ b/src/ngraph/op/maximum.cpp @@ -43,7 +43,7 @@ shared_ptr op::v0::Maximum::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v0::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -80,7 +80,7 @@ shared_ptr op::v1::Maximum::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Maximum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/maximum.hpp b/src/ngraph/op/maximum.hpp index 0de1965c520..fdd511b4c6d 100644 --- a/src/ngraph/op/maximum.hpp +++ b/src/ngraph/op/maximum.hpp @@ -50,7 +50,7 @@ namespace ngraph virtual bool is_commutative() const override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -85,7 +85,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/minimum.cpp b/src/ngraph/op/minimum.cpp index 5218418c6e2..ae1761c12ad 100644 --- a/src/ngraph/op/minimum.cpp +++ b/src/ngraph/op/minimum.cpp @@ -43,7 +43,7 @@ shared_ptr op::v0::Minimum::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v0::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -79,7 +79,7 @@ shared_ptr op::v1::Minimum::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Minimum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/minimum.hpp b/src/ngraph/op/minimum.hpp index 6ddbdff5e1e..f250baee3b8 100644 --- a/src/ngraph/op/minimum.hpp +++ b/src/ngraph/op/minimum.hpp @@ -50,7 +50,7 @@ namespace ngraph virtual bool is_commutative() const override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -85,7 +85,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/multiply.cpp b/src/ngraph/op/multiply.cpp index 3221588cc39..db889300f90 100644 --- a/src/ngraph/op/multiply.cpp +++ b/src/ngraph/op/multiply.cpp @@ -37,7 +37,7 @@ shared_ptr op::v0::Multiply::copy_with_new_args(const NodeVector& new_args return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v0::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -71,7 +71,7 @@ shared_ptr op::v1::Multiply::copy_with_new_args(const NodeVector& new_args return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Multiply::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/multiply.hpp b/src/ngraph/op/multiply.hpp index 33a6c66b95c..ecc9e19ab82 100644 --- a/src/ngraph/op/multiply.hpp +++ b/src/ngraph/op/multiply.hpp @@ -50,7 +50,7 @@ namespace ngraph virtual bool is_commutative() const override { return true; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -85,7 +85,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/negative.cpp b/src/ngraph/op/negative.cpp index ac1e120cea9..8a7f237be6c 100644 --- a/src/ngraph/op/negative.cpp +++ b/src/ngraph/op/negative.cpp @@ -33,7 +33,7 @@ shared_ptr op::Negative::copy_with_new_args(const NodeVector& new_args) co return make_shared(new_args.at(0)); } -void op::Negative::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Negative::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/negative.hpp b/src/ngraph/op/negative.hpp index b36d9d566a2..404e8309058 100644 --- a/src/ngraph/op/negative.hpp +++ b/src/ngraph/op/negative.hpp @@ -42,7 +42,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Negative; diff --git a/src/ngraph/op/one_hot.cpp b/src/ngraph/op/one_hot.cpp index 7432d25212b..b25316b3e3e 100644 --- a/src/ngraph/op/one_hot.cpp +++ b/src/ngraph/op/one_hot.cpp @@ -165,10 +165,8 @@ void op::v1::OneHot::validate_and_infer_types() auto depth_element_type = depth->get_output_element_type(0); NODE_VALIDATION_CHECK(this, - depth_element_type == element::i8 || - depth_element_type == element::i32 || - depth_element_type == element::i64, - "'depth' input element type must be i8, i32 or i64 (got ", + depth_element_type.is_integral(), + "'depth' input element type must be an integer (got ", depth_element_type, ")."); @@ -179,7 +177,8 @@ void op::v1::OneHot::validate_and_infer_types() depth->get_shape(), " elements)."); - int64_t depth_val = read_scalar_int_from_constant_node(depth); + const auto depth_constant = as_type_ptr(depth); + int64_t depth_val = depth_constant->cast_vector()[0]; NODE_VALIDATION_CHECK(this, depth_val > 0, @@ -201,38 +200,3 @@ shared_ptr op::v1::OneHot::copy_with_new_args(const NodeVector& new_args) return make_shared( new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis); } - -size_t op::v1::OneHot::read_scalar_int_from_constant_node(const shared_ptr& node) const -{ - size_t scalar; - auto node_element_type = node->get_output_element_type(0); - const auto constant = as_type_ptr(node); - -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wswitch-enum" -#endif - switch (static_cast(node_element_type)) - { - case element::Type_t::i8: - scalar = static_cast(constant->get_vector()[0]); - break; - case element::Type_t::i32: - scalar = static_cast(constant->get_vector()[0]); - break; - case element::Type_t::i64: - scalar = static_cast(constant->get_vector()[0]); - break; - default: - NODE_VALIDATION_CHECK(node.get(), - false, - "Expected integer input of element type i8, i32 or i64 (got ", - node_element_type, - ")."); - } -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - - return scalar; -} diff --git a/src/ngraph/op/one_hot.hpp b/src/ngraph/op/one_hot.hpp index d6cf24dd3af..a8194772ecb 100644 --- a/src/ngraph/op/one_hot.hpp +++ b/src/ngraph/op/one_hot.hpp @@ -107,8 +107,6 @@ namespace ngraph void set_axis(int64_t axis) { m_axis = axis; } protected: int64_t m_axis; - - size_t read_scalar_int_from_constant_node(const std::shared_ptr& node) const; }; } // default opset version diff --git a/src/ngraph/op/pad.cpp b/src/ngraph/op/pad.cpp index f6d4d073e12..1084d84bdca 100644 --- a/src/ngraph/op/pad.cpp +++ b/src/ngraph/op/pad.cpp @@ -162,7 +162,7 @@ shared_ptr op::v0::Pad::copy_with_new_args(const NodeVector& new_args) con and push that back. */ void op::v0::Pad::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw invalid_argument("Autodiff is not yet implemented for Pad"); } @@ -357,7 +357,7 @@ shared_ptr op::v1::Pad::copy_with_new_args(const NodeVector& new_args) con } void op::v1::Pad::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw invalid_argument("Autodiff is not yet implemented for Pad:v1"); } diff --git a/src/ngraph/op/pad.hpp b/src/ngraph/op/pad.hpp index 4e5537a7f14..f411701e467 100644 --- a/src/ngraph/op/pad.hpp +++ b/src/ngraph/op/pad.hpp @@ -77,7 +77,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; CoordinateDiff m_padding_below; CoordinateDiff m_padding_above; Shape m_padding_interior_fake; // LEGACY: This is all zeros. @@ -142,7 +142,7 @@ namespace ngraph void set_pad_mode(PadMode pad_mode) { m_pad_mode = pad_mode; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: PadMode m_pad_mode; diff --git a/src/ngraph/op/parameter.cpp b/src/ngraph/op/parameter.cpp index 8293864ddd9..04c6f5a77bc 100644 --- a/src/ngraph/op/parameter.cpp +++ b/src/ngraph/op/parameter.cpp @@ -55,7 +55,8 @@ shared_ptr op::Parameter::copy_with_new_args(const NodeVector& new_args) c return make_shared(m_element_type, m_partial_shape); } -void op::Parameter::generate_adjoints(autodiff::Adjoints& /* adjoints */, const NodeVector& deltas) +void op::Parameter::generate_adjoints(autodiff::Adjoints& /* adjoints */, + const OutputVector& deltas) { auto delta = deltas.at(0); } diff --git a/src/ngraph/op/parameter.hpp b/src/ngraph/op/parameter.hpp index bfd3384e819..95488741c9b 100644 --- a/src/ngraph/op/parameter.hpp +++ b/src/ngraph/op/parameter.hpp @@ -32,7 +32,7 @@ namespace ngraph { protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; public: static constexpr NodeTypeInfo type_info{"Parameter", 0}; diff --git a/src/ngraph/op/power.cpp b/src/ngraph/op/power.cpp index 5e0f5cdb9fa..a0a239e8d7f 100644 --- a/src/ngraph/op/power.cpp +++ b/src/ngraph/op/power.cpp @@ -40,7 +40,7 @@ shared_ptr op::v0::Power::copy_with_new_args(const NodeVector& new_args) c return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v0::Power::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Power::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -76,7 +76,7 @@ shared_ptr op::v1::Power::copy_with_new_args(const NodeVector& new_args) c return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Power::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Power::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/power.hpp b/src/ngraph/op/power.hpp index 2de8fc6428b..18b8b5da837 100644 --- a/src/ngraph/op/power.hpp +++ b/src/ngraph/op/power.hpp @@ -63,7 +63,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -110,7 +110,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/quantize.cpp b/src/ngraph/op/quantize.cpp index 92b2f256975..fcbf0149543 100644 --- a/src/ngraph/op/quantize.cpp +++ b/src/ngraph/op/quantize.cpp @@ -161,7 +161,7 @@ shared_ptr op::Quantize::copy_with_new_args(const NodeVector& new_args) co } void op::Quantize::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/quantize.hpp b/src/ngraph/op/quantize.hpp index 3ba3a1d6713..0c44c36906f 100644 --- a/src/ngraph/op/quantize.hpp +++ b/src/ngraph/op/quantize.hpp @@ -105,7 +105,7 @@ namespace ngraph RoundMode get_round_mode() const { return m_round_mode; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: ngraph::element::Type m_type; diff --git a/src/ngraph/op/quantized_convolution.cpp b/src/ngraph/op/quantized_convolution.cpp index 323271932a3..4177057f45f 100644 --- a/src/ngraph/op/quantized_convolution.cpp +++ b/src/ngraph/op/quantized_convolution.cpp @@ -194,7 +194,7 @@ shared_ptr op::QuantizedConvolution::copy_with_new_args(const NodeVector& } void op::QuantizedConvolution::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/quantized_convolution.hpp b/src/ngraph/op/quantized_convolution.hpp index 67c33bfbe04..b47bd165687 100644 --- a/src/ngraph/op/quantized_convolution.hpp +++ b/src/ngraph/op/quantized_convolution.hpp @@ -91,7 +91,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; protected: Strides m_window_movement_strides; diff --git a/src/ngraph/op/quantized_dot.cpp b/src/ngraph/op/quantized_dot.cpp index 5dbd7150b5a..017bcfd90bb 100644 --- a/src/ngraph/op/quantized_dot.cpp +++ b/src/ngraph/op/quantized_dot.cpp @@ -218,7 +218,7 @@ shared_ptr op::QuantizedDot::copy_with_new_args(const NodeVector& new_args } void op::QuantizedDot::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/quantized_dot.hpp b/src/ngraph/op/quantized_dot.hpp index a933d79bef4..e5cb835a77d 100644 --- a/src/ngraph/op/quantized_dot.hpp +++ b/src/ngraph/op/quantized_dot.hpp @@ -74,7 +74,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; protected: size_t m_reduction_axes_count; diff --git a/src/ngraph/op/reduce_logical_and.hpp b/src/ngraph/op/reduce_logical_and.hpp index de28818d55e..76ef75eb6bc 100644 --- a/src/ngraph/op/reduce_logical_and.hpp +++ b/src/ngraph/op/reduce_logical_and.hpp @@ -28,10 +28,9 @@ namespace ngraph /// /// The reduction is performed over slices of the first input. The slices shape depends /// on the values passed to the second input - the axes. - class ReduceLogicalAnd : public util::LogicalReductionKeepDims + class NGRAPH_API ReduceLogicalAnd : public util::LogicalReductionKeepDims { public: - NGRAPH_API static constexpr NodeTypeInfo type_info{"ReduceLogicalAnd", 1}; const NodeTypeInfo& get_type_info() const override { return type_info; } ReduceLogicalAnd() = default; diff --git a/src/ngraph/op/reduce_logical_or.hpp b/src/ngraph/op/reduce_logical_or.hpp index 189028960fd..1e2bec6721a 100644 --- a/src/ngraph/op/reduce_logical_or.hpp +++ b/src/ngraph/op/reduce_logical_or.hpp @@ -28,10 +28,9 @@ namespace ngraph /// /// The reduction is performed over slices of the first input. The slices shape depends /// on the values passed to the second input - the axes. - class ReduceLogicalOr : public util::LogicalReductionKeepDims + class NGRAPH_API ReduceLogicalOr : public util::LogicalReductionKeepDims { public: - NGRAPH_API static constexpr NodeTypeInfo type_info{"ReduceLogicalOr", 1}; const NodeTypeInfo& get_type_info() const override { return type_info; } ReduceLogicalOr() = default; diff --git a/src/ngraph/op/reduce_sum.cpp b/src/ngraph/op/reduce_sum.cpp index e55d4c8991b..60b440d8e87 100644 --- a/src/ngraph/op/reduce_sum.cpp +++ b/src/ngraph/op/reduce_sum.cpp @@ -42,7 +42,7 @@ shared_ptr op::v1::ReduceSum::copy_with_new_args(const NodeVector& new_arg return make_shared(new_args.at(0), new_args.at(1), get_keep_dims()); } -void op::v1::ReduceSum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::ReduceSum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/reduce_sum.hpp b/src/ngraph/op/reduce_sum.hpp index 531036d23d6..12f55ee3a92 100644 --- a/src/ngraph/op/reduce_sum.hpp +++ b/src/ngraph/op/reduce_sum.hpp @@ -100,7 +100,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/op/relu.cpp b/src/ngraph/op/relu.cpp index 98892c2ed02..c94d174fb53 100644 --- a/src/ngraph/op/relu.cpp +++ b/src/ngraph/op/relu.cpp @@ -35,7 +35,7 @@ shared_ptr op::Relu::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -op::ReluBackprop::ReluBackprop(shared_ptr arg, shared_ptr delta) +op::ReluBackprop::ReluBackprop(const Output& arg, const Output& delta) : BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE) { constructor_validate_and_infer_types(); @@ -47,10 +47,10 @@ shared_ptr op::ReluBackprop::copy_with_new_args(const NodeVector& new_args return make_shared(new_args.at(0), new_args.at(1)); } -void op::Relu::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Relu::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); - auto backprop = make_shared(shared_from_this(), delta); + auto backprop = make_shared(output(0), delta); adjoints.add_delta(input_value(0), backprop); } diff --git a/src/ngraph/op/relu.hpp b/src/ngraph/op/relu.hpp index 653fa671ea4..a2db2f9544c 100644 --- a/src/ngraph/op/relu.hpp +++ b/src/ngraph/op/relu.hpp @@ -46,7 +46,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; /// \brief Elementwise ReluBackprop operation. @@ -63,8 +63,7 @@ namespace ngraph /// \brief Constructs a ReluBackprop operation. /// /// \param arg Node that produces the relu forward input tensor. - ReluBackprop(std::shared_ptr arg, - std::shared_ptr delta); + ReluBackprop(const Output& arg, const Output& delta); virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; diff --git a/src/ngraph/op/replace_slice.cpp b/src/ngraph/op/replace_slice.cpp index dc2d33ae619..e4752778a0f 100644 --- a/src/ngraph/op/replace_slice.cpp +++ b/src/ngraph/op/replace_slice.cpp @@ -174,7 +174,7 @@ shared_ptr op::ReplaceSlice::copy_with_new_args(const NodeVector& new_args new_args.at(0), new_args.at(1), m_lower_bounds, m_upper_bounds, m_strides); } -void op::ReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::ReplaceSlice::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/replace_slice.hpp b/src/ngraph/op/replace_slice.hpp index 2576d443977..6f43fea1b42 100644 --- a/src/ngraph/op/replace_slice.hpp +++ b/src/ngraph/op/replace_slice.hpp @@ -107,7 +107,7 @@ namespace ngraph void set_strides(const Strides& strides) { m_strides = strides; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Coordinate m_lower_bounds; Coordinate m_upper_bounds; diff --git a/src/ngraph/op/reshape.cpp b/src/ngraph/op/reshape.cpp index fce224ede01..42de277c179 100644 --- a/src/ngraph/op/reshape.cpp +++ b/src/ngraph/op/reshape.cpp @@ -114,7 +114,7 @@ shared_ptr op::Reshape::copy_with_new_args(const NodeVector& new_args) con return make_shared(new_args.at(0), m_input_order, m_output_shape); } -void op::Reshape::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Reshape::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); @@ -278,7 +278,7 @@ shared_ptr op::v1::Reshape::copy_with_new_args(const NodeVector& new_args) } void op::v1::Reshape::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for Reshape"); } diff --git a/src/ngraph/op/reshape.hpp b/src/ngraph/op/reshape.hpp index f9a3ef17d55..e6b4e724816 100644 --- a/src/ngraph/op/reshape.hpp +++ b/src/ngraph/op/reshape.hpp @@ -101,7 +101,7 @@ namespace ngraph void set_is_transpose(bool is_transpose) { m_is_transpose = is_transpose; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; AxisVector m_input_order; Shape m_output_shape; @@ -147,7 +147,7 @@ namespace ngraph void set_special_zero(bool special_zero) { m_special_zero = special_zero; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_special_zero; diff --git a/src/ngraph/op/result.cpp b/src/ngraph/op/result.cpp index f014cbc628c..b6d7fd55373 100644 --- a/src/ngraph/op/result.cpp +++ b/src/ngraph/op/result.cpp @@ -51,7 +51,7 @@ shared_ptr op::Result::copy_with_new_args(const NodeVector& new_args) cons return std::move(res); } -void op::Result::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Result::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/result.hpp b/src/ngraph/op/result.hpp index 03dfeae337c..b9f4dd69e4c 100644 --- a/src/ngraph/op/result.hpp +++ b/src/ngraph/op/result.hpp @@ -46,7 +46,7 @@ namespace ngraph bool needs_default_layout() const { return m_needs_default_layout; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: bool m_needs_default_layout{false}; diff --git a/src/ngraph/op/reverse.cpp b/src/ngraph/op/reverse.cpp index a44c6737f0f..d2ac686eec5 100644 --- a/src/ngraph/op/reverse.cpp +++ b/src/ngraph/op/reverse.cpp @@ -62,7 +62,7 @@ shared_ptr op::v0::Reverse::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), m_reversed_axes); } -void op::v0::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); @@ -175,7 +175,7 @@ shared_ptr op::v1::Reverse::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), m_mode); } -void op::v1::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Reverse::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { const auto delta = deltas.at(0); diff --git a/src/ngraph/op/reverse.hpp b/src/ngraph/op/reverse.hpp index e54710262a6..63754323a23 100644 --- a/src/ngraph/op/reverse.hpp +++ b/src/ngraph/op/reverse.hpp @@ -74,7 +74,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; AxisSet m_reversed_axes; }; @@ -119,7 +119,7 @@ namespace ngraph virtual size_t get_version() const override { return 1; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Mode mode_from_string(const std::string& mode) const; diff --git a/src/ngraph/op/reverse_sequence.cpp b/src/ngraph/op/reverse_sequence.cpp index 38303cd17ed..7c90787df2e 100644 --- a/src/ngraph/op/reverse_sequence.cpp +++ b/src/ngraph/op/reverse_sequence.cpp @@ -113,7 +113,8 @@ shared_ptr op::ReverseSequence::copy_with_new_args(const NodeVector& new_a return move(res); } -void op::ReverseSequence::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::ReverseSequence::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto x = input_value(0); auto rs_delta = diff --git a/src/ngraph/op/reverse_sequence.hpp b/src/ngraph/op/reverse_sequence.hpp index ad02860d7d2..43d49c42bd0 100644 --- a/src/ngraph/op/reverse_sequence.hpp +++ b/src/ngraph/op/reverse_sequence.hpp @@ -51,7 +51,7 @@ namespace ngraph void set_sequence_axis(int64_t sequence_axis) { m_seq_axis = sequence_axis; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: int64_t m_batch_axis; diff --git a/src/ngraph/op/scatter_add.hpp b/src/ngraph/op/scatter_add.hpp index 025c5a37c20..e486ad633ab 100644 --- a/src/ngraph/op/scatter_add.hpp +++ b/src/ngraph/op/scatter_add.hpp @@ -45,7 +45,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { throw ngraph_error("Not yet implemented"); } diff --git a/src/ngraph/op/scatter_nd_add.hpp b/src/ngraph/op/scatter_nd_add.hpp index a7fd94d54e7..f41f02f3062 100644 --- a/src/ngraph/op/scatter_nd_add.hpp +++ b/src/ngraph/op/scatter_nd_add.hpp @@ -45,7 +45,7 @@ namespace ngraph void validate_and_infer_types() override; void generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) override + const OutputVector& /* deltas */) override { throw ngraph_error("Not yet implemented"); } diff --git a/src/ngraph/op/select.cpp b/src/ngraph/op/select.cpp index 816b4a88d38..cca7ce9a79d 100644 --- a/src/ngraph/op/select.cpp +++ b/src/ngraph/op/select.cpp @@ -94,7 +94,7 @@ bool op::v1::Select::visit_attributes(AttributeVisitor& visitor) return true; } -void op::v1::Select::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Select::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_auto_broadcast().m_type != op::AutoBroadcastType::NONE) { @@ -156,7 +156,7 @@ shared_ptr op::v0::Select::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } -void op::v0::Select::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Select::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/select.hpp b/src/ngraph/op/select.hpp index 1a61799378b..c30fcfefdb8 100644 --- a/src/ngraph/op/select.hpp +++ b/src/ngraph/op/select.hpp @@ -63,7 +63,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } @@ -126,7 +126,7 @@ namespace ngraph const AutoBroadcastSpec& get_autob() const override { return m_auto_broadcast; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: AutoBroadcastSpec m_auto_broadcast; diff --git a/src/ngraph/op/sigmoid.cpp b/src/ngraph/op/sigmoid.cpp index c32fe6314f9..56b128d6c60 100644 --- a/src/ngraph/op/sigmoid.cpp +++ b/src/ngraph/op/sigmoid.cpp @@ -48,7 +48,7 @@ shared_ptr op::SigmoidBackprop::copy_with_new_args(const NodeVector& new_a return make_shared(new_args.at(0), new_args.at(1)); } -void op::Sigmoid::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Sigmoid::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/sigmoid.hpp b/src/ngraph/op/sigmoid.hpp index 692fcb497cd..1136ca95e9d 100644 --- a/src/ngraph/op/sigmoid.hpp +++ b/src/ngraph/op/sigmoid.hpp @@ -37,7 +37,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; /// \brief Elementwise SigmoidBackprop operation. diff --git a/src/ngraph/op/sin.cpp b/src/ngraph/op/sin.cpp index 82186b491c7..5a79aeb472e 100644 --- a/src/ngraph/op/sin.cpp +++ b/src/ngraph/op/sin.cpp @@ -35,7 +35,7 @@ shared_ptr op::Sin::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Sin::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Sin::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/sin.hpp b/src/ngraph/op/sin.hpp index 004cc85a444..403e37feb63 100644 --- a/src/ngraph/op/sin.hpp +++ b/src/ngraph/op/sin.hpp @@ -55,7 +55,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Sin; diff --git a/src/ngraph/op/sinh.cpp b/src/ngraph/op/sinh.cpp index f2c43e775cc..39605a2e200 100644 --- a/src/ngraph/op/sinh.cpp +++ b/src/ngraph/op/sinh.cpp @@ -35,7 +35,7 @@ shared_ptr op::Sinh::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Sinh::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Sinh::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/sinh.hpp b/src/ngraph/op/sinh.hpp index 888a092a843..ba207f90ff5 100644 --- a/src/ngraph/op/sinh.hpp +++ b/src/ngraph/op/sinh.hpp @@ -41,7 +41,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Sinh; diff --git a/src/ngraph/op/slice.cpp b/src/ngraph/op/slice.cpp index 350b4826988..24ec02f699f 100644 --- a/src/ngraph/op/slice.cpp +++ b/src/ngraph/op/slice.cpp @@ -131,7 +131,7 @@ shared_ptr op::Slice::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0), m_lower_bounds, m_upper_bounds, m_strides); } -void op::Slice::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Slice::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/slice.hpp b/src/ngraph/op/slice.hpp index 8b36d80bd35..2b2d38beffe 100644 --- a/src/ngraph/op/slice.hpp +++ b/src/ngraph/op/slice.hpp @@ -70,7 +70,7 @@ namespace ngraph const Strides& get_strides() const { return m_strides; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Coordinate m_lower_bounds; Coordinate m_upper_bounds; diff --git a/src/ngraph/op/softmax.cpp b/src/ngraph/op/softmax.cpp index a262e64694f..d3948894b7d 100644 --- a/src/ngraph/op/softmax.cpp +++ b/src/ngraph/op/softmax.cpp @@ -124,7 +124,7 @@ shared_ptr op::v0::Softmax::copy_with_new_args(const NodeVector& new_args) return make_shared(new_args.at(0), new_args.at(1)); } -void op::v0::Softmax::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Softmax::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); NGRAPH_CHECK(are_axes_constant(), "axes need to be constant"); @@ -188,7 +188,7 @@ shared_ptr op::v1::Softmax::copy_with_new_args(const NodeVector& new_args) } void op::v1::Softmax::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("op::v1::Softmax::generate_adjoints function is not implemented yet"); diff --git a/src/ngraph/op/softmax.hpp b/src/ngraph/op/softmax.hpp index 74dc7c9c49b..49ec1c09d61 100644 --- a/src/ngraph/op/softmax.hpp +++ b/src/ngraph/op/softmax.hpp @@ -63,7 +63,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } @@ -98,7 +98,7 @@ namespace ngraph void set_axis(const size_t axis) { m_axis = axis; } protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: size_t m_axis; diff --git a/src/ngraph/op/sqrt.cpp b/src/ngraph/op/sqrt.cpp index d2d745ddf12..f0e8754f9d8 100644 --- a/src/ngraph/op/sqrt.cpp +++ b/src/ngraph/op/sqrt.cpp @@ -35,7 +35,7 @@ shared_ptr op::Sqrt::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Sqrt::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Sqrt::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/sqrt.hpp b/src/ngraph/op/sqrt.hpp index 089efc89305..6242dad0a5c 100644 --- a/src/ngraph/op/sqrt.hpp +++ b/src/ngraph/op/sqrt.hpp @@ -55,7 +55,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Sqrt; diff --git a/src/ngraph/op/strided_slice.cpp b/src/ngraph/op/strided_slice.cpp index 811a2d55019..306601f10fc 100644 --- a/src/ngraph/op/strided_slice.cpp +++ b/src/ngraph/op/strided_slice.cpp @@ -179,7 +179,7 @@ shared_ptr op::v1::StridedSlice::copy_with_new_args(const NodeVector& new_ } void op::v1::StridedSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("generate_adjoints not implemented for StridedSlice"); } diff --git a/src/ngraph/op/strided_slice.hpp b/src/ngraph/op/strided_slice.hpp index f998b790b21..fe1bfbddc1a 100644 --- a/src/ngraph/op/strided_slice.hpp +++ b/src/ngraph/op/strided_slice.hpp @@ -103,7 +103,7 @@ namespace ngraph size_t get_version() const override { return 1; } protected: void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; private: AxisSet convert_mask_to_axis_set(const std::vector& mask) const; diff --git a/src/ngraph/op/subtract.cpp b/src/ngraph/op/subtract.cpp index caab4c8a627..2a9ef4ccfd1 100644 --- a/src/ngraph/op/subtract.cpp +++ b/src/ngraph/op/subtract.cpp @@ -38,7 +38,7 @@ shared_ptr op::v0::Subtract::copy_with_new_args(const NodeVector& new_args return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v0::Subtract::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Subtract::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { @@ -77,7 +77,7 @@ shared_ptr op::v1::Subtract::copy_with_new_args(const NodeVector& new_args return make_shared(new_args.at(0), new_args.at(1), this->get_autob()); } -void op::v1::Subtract::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v1::Subtract::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { if (get_autob().m_type != op::AutoBroadcastType::NONE) { diff --git a/src/ngraph/op/subtract.hpp b/src/ngraph/op/subtract.hpp index e719f185892..907ee509202 100644 --- a/src/ngraph/op/subtract.hpp +++ b/src/ngraph/op/subtract.hpp @@ -48,7 +48,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -79,7 +79,7 @@ namespace ngraph copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v1 diff --git a/src/ngraph/op/sum.cpp b/src/ngraph/op/sum.cpp index a7b84e218b8..37141773a85 100644 --- a/src/ngraph/op/sum.cpp +++ b/src/ngraph/op/sum.cpp @@ -41,7 +41,7 @@ shared_ptr op::Sum::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0), new_args.at(1)); } -void op::v0::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::v0::Sum::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/sum.hpp b/src/ngraph/op/sum.hpp index 2d1b0efc3f9..5b20464e86c 100644 --- a/src/ngraph/op/sum.hpp +++ b/src/ngraph/op/sum.hpp @@ -100,7 +100,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // default opset version diff --git a/src/ngraph/op/tan.cpp b/src/ngraph/op/tan.cpp index c57dd43ca9c..2bfaf402b06 100644 --- a/src/ngraph/op/tan.cpp +++ b/src/ngraph/op/tan.cpp @@ -36,7 +36,7 @@ shared_ptr op::Tan::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Tan::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Tan::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/tan.hpp b/src/ngraph/op/tan.hpp index 2f98eb871de..9f042ba0ee0 100644 --- a/src/ngraph/op/tan.hpp +++ b/src/ngraph/op/tan.hpp @@ -55,7 +55,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Tan; diff --git a/src/ngraph/op/tanh.cpp b/src/ngraph/op/tanh.cpp index 93b2068e5b6..d3eee312b4c 100644 --- a/src/ngraph/op/tanh.cpp +++ b/src/ngraph/op/tanh.cpp @@ -35,7 +35,7 @@ shared_ptr op::Tanh::copy_with_new_args(const NodeVector& new_args) const return make_shared(new_args.at(0)); } -void op::Tanh::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::Tanh::generate_adjoints(autodiff::Adjoints& adjoints, const OutputVector& deltas) { auto delta = deltas.at(0); diff --git a/src/ngraph/op/tanh.hpp b/src/ngraph/op/tanh.hpp index a1eb604cc81..5267786929c 100644 --- a/src/ngraph/op/tanh.hpp +++ b/src/ngraph/op/tanh.hpp @@ -41,7 +41,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } using v0::Tanh; diff --git a/src/ngraph/op/tensor_iterator.cpp b/src/ngraph/op/tensor_iterator.cpp index 86cc3e92dfc..2bfb892095e 100644 --- a/src/ngraph/op/tensor_iterator.cpp +++ b/src/ngraph/op/tensor_iterator.cpp @@ -16,6 +16,8 @@ #include "ngraph/op/tensor_iterator.hpp" #include "ngraph/graph_util.hpp" +#include "ngraph/pass/get_output_element_elimination.hpp" +#include "ngraph/specialize_function.hpp" using namespace std; using namespace ngraph; @@ -220,7 +222,7 @@ void op::TensorIterator::revalidate_and_infer_types_for_body_ops() std::stack, std::vector>> nodes_to_do; std::unordered_set> nodes_done; - for (auto r : m_body->get_results()) + for (const auto& r : m_body->get_results()) { nodes_to_do.push(r); } @@ -281,7 +283,7 @@ void op::TensorIterator::validate_and_infer_types() // Input uint64_t index_it = 0; - for (auto input_description : m_input_descriptions) + for (const auto& input_description : m_input_descriptions) { auto index = input_description->m_input_index; NODE_VALIDATION_CHECK(this, index == index_it, "Input_index not in order"); @@ -398,7 +400,7 @@ void op::TensorIterator::validate_and_infer_types() // Output index_it = 0; - for (auto output_description : m_output_descriptions) + for (const auto& output_description : m_output_descriptions) { auto index = output_description->m_output_index; NODE_VALIDATION_CHECK(this, index == index_it, "Output_index not in order"); @@ -437,6 +439,48 @@ void op::TensorIterator::validate_and_infer_types() std::shared_ptr op::TensorIterator::copy_with_new_args(const NodeVector& new_args) const { auto op = make_shared(as_output_vector(new_args)); + op->set_output_size(m_output_descriptions.size()); + + std::vector<::ngraph::element::Type> types(m_body->get_parameters().size()); + std::vector<::ngraph::PartialShape> new_shapes(m_body->get_parameters().size()); + + for (size_t input_index = 0; input_index < new_args.size(); ++input_index) + { + for (auto& input_description : m_input_descriptions) + { + if (input_description->m_input_index == input_index) + { + types[input_description->m_body_parameter_index] = + new_args[input_index]->get_element_type(); + new_shapes[input_description->m_body_parameter_index] = + new_args[input_index]->get_output_partial_shape(0); + + if (new_shapes[input_description->m_body_parameter_index].is_static()) + { + if (auto slice_in = ::ngraph::as_type_ptr< + ngraph::op::TensorIterator::SliceInputDescription>(input_description)) + { + new_shapes[slice_in->m_body_parameter_index][slice_in->m_axis] = + slice_in->m_part_size; + } + } + } + } + } + + auto func = std::make_shared(m_body->get_results(), m_body->get_parameters()); + auto spec_func = specialize_function( + func, types, new_shapes, std::vector(new_args.size(), nullptr), false, true); + op->m_body = + std::make_shared(spec_func->get_results(), spec_func->get_parameters()); + + // TODO: remove this code after the fix on the nGraph side (GetOutputElements) + ::ngraph::pass::GetOutputElementElimination goe_elimination; + for (const auto& n : spec_func->get_ops()) + { + goe_elimination.run_on_node(n); + } + for (auto& input_description : m_input_descriptions) { op->m_input_descriptions.push_back(input_description->copy()); diff --git a/src/ngraph/op/topk.cpp b/src/ngraph/op/topk.cpp index 298018b7853..3d0807dca34 100644 --- a/src/ngraph/op/topk.cpp +++ b/src/ngraph/op/topk.cpp @@ -219,7 +219,7 @@ shared_ptr op::v0::TopK::copy_with_new_args(const NodeVector& new_args) co } void op::v0::TopK::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } @@ -353,7 +353,8 @@ size_t op::v1::TopK::validate_and_get_k(const shared_ptr& k_consta return static_cast(k_const_contents[0]); } -void op::v1::TopK::generate_adjoints(autodiff::Adjoints& /*adjoints*/, const NodeVector& /*deltas*/) +void op::v1::TopK::generate_adjoints(autodiff::Adjoints& /*adjoints*/, + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/topk.hpp b/src/ngraph/op/topk.hpp index 92287c06e3f..49e25972d35 100644 --- a/src/ngraph/op/topk.hpp +++ b/src/ngraph/op/topk.hpp @@ -105,7 +105,7 @@ namespace ngraph bool m_compute_max{false}; SortType m_sort{SortType::NONE}; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } // namespace v0 @@ -186,7 +186,7 @@ namespace ngraph element::Type m_index_element_type; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; size_t read_k_from_constant_node(const std::shared_ptr& node, const element::Type& k_element_type) const; diff --git a/src/ngraph/op/util/fused_op.cpp b/src/ngraph/op/util/fused_op.cpp index 09aac90e9df..ecdd1e23338 100644 --- a/src/ngraph/op/util/fused_op.cpp +++ b/src/ngraph/op/util/fused_op.cpp @@ -71,7 +71,7 @@ void op::util::FusedOp::validate_and_infer_types() } void op::util::FusedOp::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /*deltas*/) + const OutputVector& /* deltas */) { // TODO throw ngraph_error("Autodiff on fused ops not supported yet"); diff --git a/src/ngraph/op/util/fused_op.hpp b/src/ngraph/op/util/fused_op.hpp index fc01bc3fcc6..f2fd9d59853 100644 --- a/src/ngraph/op/util/fused_op.hpp +++ b/src/ngraph/op/util/fused_op.hpp @@ -50,7 +50,7 @@ namespace ngraph // in validate_and_infer_types(). virtual void post_validate_and_infer_types() {} void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; protected: FusedOp(); diff --git a/src/ngraph/op/util/index_reduction.cpp b/src/ngraph/op/util/index_reduction.cpp index 735763aa80b..78472be59df 100644 --- a/src/ngraph/op/util/index_reduction.cpp +++ b/src/ngraph/op/util/index_reduction.cpp @@ -121,7 +121,7 @@ void op::util::IndexReduction::validate_and_infer_types() } void op::util::IndexReduction::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Forward-propagation-only operation"); } diff --git a/src/ngraph/op/util/index_reduction.hpp b/src/ngraph/op/util/index_reduction.hpp index c036b5670a8..2c013b8e2d4 100644 --- a/src/ngraph/op/util/index_reduction.hpp +++ b/src/ngraph/op/util/index_reduction.hpp @@ -60,7 +60,7 @@ namespace ngraph element::Type m_index_element_type; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; }; } } diff --git a/src/ngraph/opsets/opset.hpp b/src/ngraph/opsets/opset.hpp index 5e6444a3ce8..47a4979e668 100644 --- a/src/ngraph/opsets/opset.hpp +++ b/src/ngraph/opsets/opset.hpp @@ -98,6 +98,6 @@ namespace ngraph std::map m_name_type_info_map; }; - const OpSet& get_opset0(); - const OpSet& get_opset1(); + const NGRAPH_API OpSet& get_opset0(); + const NGRAPH_API OpSet& get_opset1(); } \ No newline at end of file diff --git a/src/ngraph/opsets/opset0.hpp b/src/ngraph/opsets/opset0.hpp new file mode 100644 index 00000000000..c3a60ccf714 --- /dev/null +++ b/src/ngraph/opsets/opset0.hpp @@ -0,0 +1,29 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/ops.hpp" + +namespace ngraph +{ + namespace opset0 + { +#define NGRAPH_OP(a, b) using b::a; +#include "ngraph/opsets/opset0_tbl.hpp" +#undef NGRAPH_OP + } +} diff --git a/src/ngraph/pass/opset0_downgrade.cpp b/src/ngraph/pass/opset0_downgrade.cpp index 71c2098d6b1..93d80ff37f1 100644 --- a/src/ngraph/pass/opset0_downgrade.cpp +++ b/src/ngraph/pass/opset0_downgrade.cpp @@ -22,12 +22,14 @@ #include "ngraph/builder/reshape.hpp" #include "ngraph/graph_util.hpp" #include "ngraph/node.hpp" +#include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/broadcasting.hpp" #include "ngraph/ops.hpp" #include "ngraph/pass/implicit_broadcast_elimination.hpp" #include "ngraph/pass/opset0_downgrade.hpp" #include "ngraph/slice_plan.hpp" #include "ngraph/type.hpp" +#include "ngraph/validation_util.hpp" using namespace std; using namespace ngraph; @@ -219,8 +221,19 @@ namespace bool op_cast(shared_ptr node) { - auto replacement_node = make_shared( - node->input_value(0), node->input_value(1), node->get_special_zero()); + shared_ptr replacement_node; + + const auto target_shape_input = node->input_value(1).get_node_shared_ptr(); + if (target_shape_input->is_constant() && node->get_output_partial_shape(0).is_static()) + { + replacement_node = builder::reshape(node->input_value(0), node->get_output_shape(0)); + } + else + { + replacement_node = make_shared( + node->input_value(0), node->input_value(1), node->get_special_zero()); + } + replace_node(node, replacement_node); return true; } @@ -231,6 +244,27 @@ namespace return true; } + bool op_cast(shared_ptr node) + { + auto axis_node = as_type_ptr(node->input_value(2).get_node_shared_ptr()); + + NGRAPH_CHECK(axis_node, + "Unable to convert Gather:v1 to Gather:v0 if axis is not constant. Node: ", + *node); + + NGRAPH_CHECK( + axis_node->get_element_type() == element::i64, + "Unable to convert Gather:v1 to Gather:v0 with axis other type than int64. Node: ", + *node); + + int64_t axis = axis_node->get_vector()[0]; + + auto replacement_node = + make_shared(node->input_value(0), node->input_value(1), axis); + replace_node(node, replacement_node); + return true; + } + bool op_cast(shared_ptr node) { NGRAPH_CHECK(node->input_value(1).get_node_shared_ptr()->is_constant()); @@ -279,6 +313,82 @@ namespace return true; } + bool op_cast(shared_ptr node) + { + auto output_shape_input = + as_type_ptr(node->input_value(2).get_node_shared_ptr()); + const auto data_arg = node->input_value(0); + const auto filters_arg = node->input_value(1); + const auto strides = node->get_strides(); + const auto dilations = node->get_dilations(); + + NGRAPH_CHECK( + output_shape_input, + "Unable to convert GroupConvolutionBackpropData:v1 to GroupConvolutionBackpropData:v0 " + "if output_shape is not constant. Node: ", + *node); + + auto output_padding = node->get_output_padding(); + + bool is_op_valid = all_of( + output_padding.begin(), output_padding.end(), [](size_t value) { return value == 0; }); + + NGRAPH_CHECK( + is_op_valid, + "Unable to convert GroupConvolutionBackpropData:v1 to GroupConvolutionBackpropData:v0 " + "with output padding other than `0`. Node: ", + *node); + + NGRAPH_CHECK(data_arg.get_partial_shape().is_static(), + "Unable to convert GroupConvolution:1 to GroupConvolution:0" + "with dynamic data shape. Node: ", + *node); + + NGRAPH_CHECK(filters_arg.get_partial_shape().is_static(), + "Unable to convert GroupConvolution:1 to GroupConvolution:0" + "with dynamic filters shape. Node: ", + *node); + + auto filters_shape = filters_arg.get_shape(); + auto data_shape = data_arg.get_shape(); + auto groups = filters_shape.at(0); + filters_shape[1] *= groups; + filters_shape.erase(filters_shape.begin()); + + auto reshaped_filters = builder::reshape(node->input_value(1), filters_shape); + + auto pads_begin = node->get_pads_begin(); + auto pads_end = node->get_pads_end(); + + auto auto_pad = node->get_auto_pad(); + + auto output_shape = output_shape_input->get_shape_val(); + if (auto_pad == op::PadType::SAME_UPPER || auto_pad == op::PadType::SAME_LOWER) + { + infer_auto_padding(output_shape, + Shape(filters_shape.begin() + 2, filters_shape.end()), + strides, + dilations, + auto_pad, + pads_begin, + pads_end); + } + + output_shape.insert(output_shape.begin(), filters_shape[1]); + output_shape.insert(output_shape.begin(), data_shape[0]); + auto replacement_node = make_shared( + op::Constant::create(data_arg.get_element_type(), output_shape, {0}), + reshaped_filters, + data_arg, + node->get_strides(), + node->get_dilations(), + pads_begin, + pads_end, + groups); + replace_node(node, replacement_node); + return true; + } + bool op_cast(shared_ptr node) { op_cast_binary_elementwise_node(node); @@ -707,7 +817,7 @@ namespace }; return dispatch_map; } -} +} // namespace bool pass::Opset0Downgrade::run_on_node(shared_ptr node) { diff --git a/src/ngraph/pass/opset1_upgrade.cpp b/src/ngraph/pass/opset1_upgrade.cpp index 11a0a14c5b2..459559a4370 100644 --- a/src/ngraph/pass/opset1_upgrade.cpp +++ b/src/ngraph/pass/opset1_upgrade.cpp @@ -327,6 +327,46 @@ namespace return true; } + bool op_cast(shared_ptr node) + { + auto strides = node->get_window_movement_strides(); + auto dilations = node->get_window_dilation_strides(); + auto pads_begin = node->get_padding_below(); + auto pads_end = node->get_padding_above(); + auto data_batch_pshape = node->get_input_partial_shape(0); + + NGRAPH_CHECK(data_batch_pshape.is_static(), + "Unable to convert GroupConvolution:0 to GroupConvolution:1" + "with dynamic data_batch shape. Node: ", + *node); + + auto data_batch_shape = data_batch_pshape.to_shape(); + data_batch_shape.erase(data_batch_shape.begin(), data_batch_shape.end()); + + NGRAPH_CHECK(node->get_input_partial_shape(1).is_static(), + "Unable to convert GroupConvolution:0 to GroupConvolution:1" + "with dynamic filters shape. Node: ", + *node); + + auto filters_shape = node->get_input_shape(1); + auto groups = node->get_groups(); + filters_shape[0] /= groups; + filters_shape.insert(filters_shape.begin(), groups); + + auto reshaped_filters = builder::reshape(node->input_value(1), filters_shape); + + auto replacement_node = make_shared( + node->input_value(2), + reshaped_filters, + op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape), + strides, + pads_begin, + pads_end, + dilations); + replace_node(node, replacement_node); + return true; + } + bool op_cast(shared_ptr node) { op_cast_binary_elementwise_node(node); @@ -657,7 +697,7 @@ namespace }; return dispatch_map; } -} +} // namespace bool pass::Opset1Upgrade::run_on_node(shared_ptr node) { diff --git a/src/ngraph/runtime/cpu/builder/matmul_bias.cpp b/src/ngraph/runtime/cpu/builder/matmul_bias.cpp index 834b3ca12db..e73804467e6 100644 --- a/src/ngraph/runtime/cpu/builder/matmul_bias.cpp +++ b/src/ngraph/runtime/cpu/builder/matmul_bias.cpp @@ -23,6 +23,7 @@ using namespace std; using namespace ngraph; +using namespace ngraph::element; namespace ngraph { @@ -44,6 +45,9 @@ namespace ngraph const auto& arg0_shape = mm->get_a_shape(); const auto& arg1_shape = mm->get_b_shape(); const auto& arg2_shape = node->get_shape(); + const auto element_type = mm->get_input_element_type(0); + NGRAPH_CHECK(element_type == element::f32 || element_type == element::f64, + "MatmulBias element type not supported"); auto m = arg0_shape[0]; auto n = arg1_shape[1]; @@ -80,23 +84,47 @@ namespace ngraph arg2_shape, arg0_buffer_index, arg1_buffer_index, - out0_buffer_index](CPURuntimeContext* ctx, - CPUExecutionContext* /* ectx */) { - cblas::cblas_sgemm( - cblas::Layout::RowMajor, - transpose_A ? cblas::Transpose::Transpose : cblas::Transpose::None, - transpose_B ? cblas::Transpose::Transpose : cblas::Transpose::None, - m, - n, - k, - 1.0f, - static_cast(ctx->buffer_data[arg0_buffer_index]), - max(1, lda), - static_cast(ctx->buffer_data[arg1_buffer_index]), - max(1, ldb), - beta, - static_cast(ctx->buffer_data[out0_buffer_index]), - max(1, arg2_shape[1])); + out0_buffer_index, + element_type](CPURuntimeContext* ctx, + CPUExecutionContext* /* ectx */) { + switch (element_type) + { + case Type_t::f32: + cblas::cblas_sgemm( + cblas::Layout::RowMajor, + transpose_A ? cblas::Transpose::Transpose : cblas::Transpose::None, + transpose_B ? cblas::Transpose::Transpose : cblas::Transpose::None, + m, + n, + k, + 1.0f, + static_cast(ctx->buffer_data[arg0_buffer_index]), + max(1, lda), + static_cast(ctx->buffer_data[arg1_buffer_index]), + max(1, ldb), + beta, + static_cast(ctx->buffer_data[out0_buffer_index]), + max(1, arg2_shape[1])); + break; + case Type_t::f64: + cblas::cblas_dgemm( + cblas::Layout::RowMajor, + transpose_A ? cblas::Transpose::Transpose : cblas::Transpose::None, + transpose_B ? cblas::Transpose::Transpose : cblas::Transpose::None, + m, + n, + k, + 1.0f, + static_cast(ctx->buffer_data[arg0_buffer_index]), + max(1, lda), + static_cast(ctx->buffer_data[arg1_buffer_index]), + max(1, ldb), + beta, + static_cast(ctx->buffer_data[out0_buffer_index]), + max(1, arg2_shape[1])); + break; + default: NGRAPH_UNREACHABLE("Matmul element type is not supported"); + } }; CPUKernelFunctor bias_functor = [](CPURuntimeContext* /* ctx */, @@ -104,6 +132,8 @@ namespace ngraph if (args.size() > 2) { + NGRAPH_CHECK(element_type == element::f32, + "Bias element type is not supported"); auto arg2_buffer_index = external_function->get_buffer_index(args[2].get_name()); @@ -400,6 +430,6 @@ namespace ngraph REGISTER_OP_BUILDER(BatchMatMul); REGISTER_OP_BUILDER(BatchMatMulTranspose); } - } - } -} + } // namespace cpu + } // namespace runtime +} // namespace ngraph diff --git a/src/ngraph/runtime/cpu/cpu_emitter.cpp b/src/ngraph/runtime/cpu/cpu_emitter.cpp index 1f20f3da674..f49422183ad 100644 --- a/src/ngraph/runtime/cpu/cpu_emitter.cpp +++ b/src/ngraph/runtime/cpu/cpu_emitter.cpp @@ -945,7 +945,8 @@ namespace ngraph dot->get_reduction_axes_count() == 1) { // Emit an MKL SGEMM call if possible - if (args[0].get_element_type() == element::f32) + auto element_type = args[0].get_element_type(); + if (element_type == element::f32) { writer.block_begin(); writer << "cblas::cblas_sgemm(" @@ -960,6 +961,21 @@ namespace ngraph << ");\n"; writer.block_end(); } + else if (element_type == element::f64) + { + writer.block_begin(); + writer << "cblas::cblas_dgemm(" + << "cblas::Layout::RowMajor, " + << "cblas::Transpose::None, " + << "cblas::Transpose::None, " << arg0_shape[0] << ", " + << arg1_shape[1] << ", " << arg0_shape[1] << ",\n" + << " 1.0f, " << args[0].get_name() << ", " + << max(1UL, arg0_shape[1]) << ", " << args[1].get_name() << ", " + << max(1UL, arg1_shape[1]) << ", 0.0f,\n" + << " " << out[0].get_name() << ", " << max(1UL, arg1_shape[1]) + << ");\n"; + writer.block_end(); + } else { writer.block_begin(); diff --git a/src/ngraph/runtime/cpu/cpu_kernels.hpp b/src/ngraph/runtime/cpu/cpu_kernels.hpp index 001c009495c..ca333607de6 100644 --- a/src/ngraph/runtime/cpu/cpu_kernels.hpp +++ b/src/ngraph/runtime/cpu/cpu_kernels.hpp @@ -92,6 +92,21 @@ namespace cblas float* C, const int64_t ldc); + void cblas_dgemm(const Layout layout, + const Transpose TransA, + const Transpose TransB, + const int64_t M, + const int64_t N, + const int64_t K, + const double alpha, + const double* A, + const int64_t lda, + const double* B, + const int64_t ldb, + const double beta, + double* C, + const int64_t ldc); + void cblas_sgemm_batch(const Layout Layout, const Transpose* transa_array, const Transpose* transb_array, diff --git a/src/ngraph/runtime/cpu/mkldnn_utils.cpp b/src/ngraph/runtime/cpu/mkldnn_utils.cpp index 85e1143ca59..567024aebca 100644 --- a/src/ngraph/runtime/cpu/mkldnn_utils.cpp +++ b/src/ngraph/runtime/cpu/mkldnn_utils.cpp @@ -271,6 +271,7 @@ std::map& // Mapping from POD types to MKLDNN data types static std::map s_mkldnn_data_type_map = { {element::boolean, mkldnn::memory::data_type::s8}, + {element::bf16, mkldnn::memory::data_type::bf16}, {element::f32, mkldnn::memory::data_type::f32}, {element::f64, mkldnn::memory::data_type::data_undef}, {element::i8, mkldnn::memory::data_type::s8}, @@ -290,6 +291,7 @@ std::map& { static std::map s_mkldnn_data_type_string_map{ {element::boolean, "mkldnn::memory::data_type::s8"}, + {element::bf16, "mkldnn::memory::data_type::bf16"}, {element::f32, "mkldnn::memory::data_type::f32"}, {element::f64, "mkldnn::memory::data_type::data_undef"}, {element::i8, "mkldnn::memory::data_type::s8"}, @@ -778,6 +780,26 @@ mkldnn::memory::desc runtime::cpu::mkldnn_utils::create_blocked_mkldnn_md_helper return memory::desc(md); } + +bool runtime::cpu::mkldnn_utils::is_bf16_supported() +{ + try + { + mkldnn::memory::dims dims{2, 3, 4, 5}; + auto input_desc = + mkldnn::memory::desc(dims, mkldnn::memory::data_type::f32, memory::format::nchw); + auto result_desc = + mkldnn::memory::desc(dims, mkldnn::memory::data_type::bf16, memory::format::nchw); + auto reorder_prim_desc = mkldnn::reorder::primitive_desc( + {input_desc, executor::global_cpu_engine}, {result_desc, executor::global_cpu_engine}); + } + catch (const mkldnn::error& e) + { + return false; + } + return true; +} + #else std::map& runtime::cpu::mkldnn_utils::get_mkldnn_data_type_map() @@ -1719,4 +1741,22 @@ bool runtime::cpu::mkldnn_utils::is_mkldnn_desc_blocked_data_format( #endif return blk.inner_nblks != 0; } + +bool runtime::cpu::mkldnn_utils::is_bf16_supported() +{ + try + { + mkldnn::memory::dims dims{2, 3, 4, 5}; + mkldnn::memory::dims strides{60, 20, 5, 1}; + auto input_desc = mkldnn::memory::desc(dims, mkldnn::memory::data_type::f32, strides); + auto result_desc = mkldnn::memory::desc(dims, mkldnn::memory::data_type::bf16, strides); + auto reorder_prim_desc = mkldnn::reorder::primitive_desc( + executor::global_cpu_engine, input_desc, executor::global_cpu_engine, result_desc); + } + catch (const mkldnn::error& e) + { + return false; + } + return true; +} #endif diff --git a/src/ngraph/runtime/cpu/mkldnn_utils.hpp b/src/ngraph/runtime/cpu/mkldnn_utils.hpp index 26be62a7b30..590e904a912 100644 --- a/src/ngraph/runtime/cpu/mkldnn_utils.hpp +++ b/src/ngraph/runtime/cpu/mkldnn_utils.hpp @@ -20,6 +20,7 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/node.hpp" #include "ngraph/op/batch_norm.hpp" +#include "ngraph/runtime/cpu/cpu_backend_visibility.h" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/op/batch_norm_relu.hpp" #include "ngraph/type/element_type.hpp" @@ -149,6 +150,8 @@ namespace ngraph bool can_use_mkldnn_batchnorm_fprop(const ngraph::Node* node); bool can_use_mkldnn_batchnorm_bprop(const ngraph::Node* node); + bool CPU_BACKEND_API is_bf16_supported(); + // // Intel(R) MKL-DNN supports the Winograd algorithm for convolutions with the // following sizes: @@ -226,6 +229,15 @@ namespace ngraph { return false; } + + // Check if bf16 is supported on the platform + if (!is_bf16_supported() && (node->get_input_element_type(0) == element::bf16 || + node->get_input_element_type(1) == element::bf16 || + node->get_output_element_type(0) == element::bf16)) + { + return false; + } + return true; } diff --git a/src/ngraph/runtime/cpu/op/deconv.cpp b/src/ngraph/runtime/cpu/op/deconv.cpp index b21b24516af..8eeebe145eb 100644 --- a/src/ngraph/runtime/cpu/op/deconv.cpp +++ b/src/ngraph/runtime/cpu/op/deconv.cpp @@ -157,7 +157,7 @@ void op::DeconvolutionBias::validate_and_infer_types() } void op::DeconvolutionBias::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("DeconvolutionBias generate_adjoints not supported implemented"); } diff --git a/src/ngraph/runtime/cpu/op/deconv.hpp b/src/ngraph/runtime/cpu/op/deconv.hpp index ba6b38d94e5..06a5cf35ed6 100644 --- a/src/ngraph/runtime/cpu/op/deconv.hpp +++ b/src/ngraph/runtime/cpu/op/deconv.hpp @@ -58,7 +58,8 @@ namespace ngraph void validate_and_infer_types() override; - void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override; + void generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) override; virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; diff --git a/src/ngraph/runtime/cpu/op/group_conv_bias.cpp b/src/ngraph/runtime/cpu/op/group_conv_bias.cpp index 3432c507530..f161c651d55 100644 --- a/src/ngraph/runtime/cpu/op/group_conv_bias.cpp +++ b/src/ngraph/runtime/cpu/op/group_conv_bias.cpp @@ -194,7 +194,7 @@ shared_ptr op::GroupConvolutionBias::copy_with_new_args(const NodeVector& } void op::GroupConvolutionBias::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("GroupConvolutionBias generate_adjoints not supported implemented"); } diff --git a/src/ngraph/runtime/cpu/op/group_conv_bias.hpp b/src/ngraph/runtime/cpu/op/group_conv_bias.hpp index 2f67e6eb98d..7aadcfaa4e4 100644 --- a/src/ngraph/runtime/cpu/op/group_conv_bias.hpp +++ b/src/ngraph/runtime/cpu/op/group_conv_bias.hpp @@ -66,7 +66,8 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; - void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override; + void generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) override; protected: Strides m_window_movement_strides; diff --git a/src/ngraph/runtime/cpu/op/max_pool_with_indices.cpp b/src/ngraph/runtime/cpu/op/max_pool_with_indices.cpp index fce67cde158..e7545730aa2 100644 --- a/src/ngraph/runtime/cpu/op/max_pool_with_indices.cpp +++ b/src/ngraph/runtime/cpu/op/max_pool_with_indices.cpp @@ -359,7 +359,7 @@ shared_ptr } void op::MaxPoolWithIndices::generate_adjoints(autodiff::Adjoints& /* adjoints */, - const NodeVector& /* deltas */) + const OutputVector& /* deltas */) { throw ngraph_error("Differentation of MaxPoolWithIndices isn't supported"); } diff --git a/src/ngraph/runtime/cpu/op/max_pool_with_indices.hpp b/src/ngraph/runtime/cpu/op/max_pool_with_indices.hpp index a4c367e4889..442151702a6 100644 --- a/src/ngraph/runtime/cpu/op/max_pool_with_indices.hpp +++ b/src/ngraph/runtime/cpu/op/max_pool_with_indices.hpp @@ -55,7 +55,7 @@ namespace ngraph protected: virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; Shape m_window_shape; Strides m_window_movement_strides; diff --git a/src/ngraph/runtime/cpu/op/sigmoid_mul.cpp b/src/ngraph/runtime/cpu/op/sigmoid_mul.cpp index c1f93474913..cf51731465b 100644 --- a/src/ngraph/runtime/cpu/op/sigmoid_mul.cpp +++ b/src/ngraph/runtime/cpu/op/sigmoid_mul.cpp @@ -92,7 +92,8 @@ shared_ptr op::SigmoidMultiply::copy_with_new_args(const NodeVector& new_a new_args.at(0), new_args.at(1), m_input_type[0], m_input_type[1]); } -void op::SigmoidMultiply::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) +void op::SigmoidMultiply::generate_adjoints(autodiff::Adjoints& adjoints, + const OutputVector& deltas) { auto delta = deltas.at(0); auto input_0 = get_argument(0); diff --git a/src/ngraph/runtime/cpu/op/sigmoid_mul.hpp b/src/ngraph/runtime/cpu/op/sigmoid_mul.hpp index 893d3b3081c..c18e7808392 100644 --- a/src/ngraph/runtime/cpu/op/sigmoid_mul.hpp +++ b/src/ngraph/runtime/cpu/op/sigmoid_mul.hpp @@ -52,7 +52,7 @@ namespace ngraph virtual std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override; virtual void generate_adjoints(autodiff::Adjoints& adjoints, - const NodeVector& deltas) override; + const OutputVector& deltas) override; FunctionType get_input_func_type(const unsigned int index) const { return m_input_type[index]; diff --git a/src/ngraph/runtime/cpu/pass/cpu_assignment.cpp b/src/ngraph/runtime/cpu/pass/cpu_assignment.cpp index a8e3d976f4f..6978b70c6fd 100644 --- a/src/ngraph/runtime/cpu/pass/cpu_assignment.cpp +++ b/src/ngraph/runtime/cpu/pass/cpu_assignment.cpp @@ -411,7 +411,8 @@ namespace ngraph (node->get_input_element_type(0) == element::f32 || node->get_input_element_type(0) == element::u8 || node->get_input_element_type(0) == element::i8 || - node->get_input_element_type(0) == element::bf16)) + (node->get_input_element_type(0) == element::bf16 && + runtime::cpu::mkldnn_utils::is_bf16_supported()))) { runtime::cpu::mkldnn_utils::assign_mkldnn_kernel(node); } diff --git a/src/ngraph/runtime/cpu/pass/cpu_fusion.cpp b/src/ngraph/runtime/cpu/pass/cpu_fusion.cpp index 520532c28fc..4681a7688de 100644 --- a/src/ngraph/runtime/cpu/pass/cpu_fusion.cpp +++ b/src/ngraph/runtime/cpu/pass/cpu_fusion.cpp @@ -166,6 +166,9 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_matmulbias() auto m_bias = m_broadcast->get_argument(0); auto pattern_map = m.get_pattern_map(); + NGRAPH_CHECK(mpattern->get_element_type() != element::f64 || m_bias == nullptr, + "Bias in DP MatMulBias is not supported yet"); + auto mmb = std::make_shared(pattern_map[W], pattern_map[x], m_bias, @@ -207,10 +210,12 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_matmul() auto mpattern = m.get_match_root(); auto dot = m.get_match_root(); + auto element_type = mpattern->get_element_type(); - if (mpattern->get_element_type() != element::f32) + if (element_type != element::f32 && element_type != element::f64) { - NGRAPH_DEBUG << "mpattern = " << mpattern->get_name() << " type is not float!"; + NGRAPH_DEBUG << "mpattern = " << mpattern->get_name() + << " type is not float or double!"; return false; } diff --git a/src/ngraph/runtime/cpu/unit_test.manifest b/src/ngraph/runtime/cpu/unit_test.manifest index caeb5507eac..d25a25e9bdc 100644 --- a/src/ngraph/runtime/cpu/unit_test.manifest +++ b/src/ngraph/runtime/cpu/unit_test.manifest @@ -23,3 +23,6 @@ lrn_2d_across_outermost_axis # ONNX TopK with dynamic K top_k_opset_10 + +# ONNX GatherND with int32 +model_gatherND_int32 diff --git a/src/ngraph/runtime/interpreter/int_executable.cpp b/src/ngraph/runtime/interpreter/int_executable.cpp index 5b54b034563..3c6fa2c6a87 100644 --- a/src/ngraph/runtime/interpreter/int_executable.cpp +++ b/src/ngraph/runtime/interpreter/int_executable.cpp @@ -25,7 +25,6 @@ #include "ngraph/pass/like_replacement.hpp" #include "ngraph/pass/liveness.hpp" #include "ngraph/pass/manager.hpp" -#include "ngraph/pass/memory_layout.hpp" #include "ngraph/pass/opset0_downgrade.hpp" #include "ngraph/runtime/backend_manager.hpp" #include "ngraph/runtime/chrome_trace.hpp" @@ -69,6 +68,8 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr& f pass_manager.register_pass(); pass_manager.register_pass(); pass_manager.register_pass(); + // Need to decompose any v0 fused ops, which were produced by the downgrade pass + pass_manager.register_pass(); pass_manager.register_pass>(); pass_manager.register_pass(); pass_manager.run_passes(m_function); diff --git a/src/ngraph/runtime/interpreter/unit_test.manifest b/src/ngraph/runtime/interpreter/unit_test.manifest index c5cc3d9df36..581d3c8a098 100644 --- a/src/ngraph/runtime/interpreter/unit_test.manifest +++ b/src/ngraph/runtime/interpreter/unit_test.manifest @@ -22,8 +22,3 @@ top_k_opset_11_const_k_smallest # Tile op case that the number of elements in "repeats" and shape of "data" are different tile_3d_small_data_rank tile_3d_few_repeats - -# Another fused op decomposition pass required after the downgrade pass -model_split_equal_parts_default -model_split_equal_parts_2d -model_split_variable_parts_2d diff --git a/src/ngraph/runtime/plaidml/unit_test.manifest b/src/ngraph/runtime/plaidml/unit_test.manifest index d59876720d2..950939da59c 100644 --- a/src/ngraph/runtime/plaidml/unit_test.manifest +++ b/src/ngraph/runtime/plaidml/unit_test.manifest @@ -263,6 +263,8 @@ model_lstm_fwd_hardsigmoid_activation model_lstm_fwd_with_clip model_lstm_fwd_mixed_seq model_lstm_fwd_large_batch_no_clip +model_gatherND_int32 +model_gatherND_float model_global_lp_pool_p3 model_argmin_no_keepdims model_reduce_log_sum_exp diff --git a/src/ngraph/serializer.cpp b/src/ngraph/serializer.cpp index 395dbdfe54c..43dbcb9f4bd 100644 --- a/src/ngraph/serializer.cpp +++ b/src/ngraph/serializer.cpp @@ -2386,11 +2386,16 @@ shared_ptr JSONDeserializer::deserialize_node(json node_js) } case OP_TYPEID::Product: { - auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes")); + set reduction_axes = + get_or_default>(node_js, "reduction_axes", set()); if (reduction_axes.empty()) + { node = make_shared(args[0], args[1]); + } else + { node = make_shared(args[0], reduction_axes); + } break; } case OP_TYPEID::ReduceProd_v1: @@ -2807,11 +2812,16 @@ shared_ptr JSONDeserializer::deserialize_node(json node_js) } case OP_TYPEID::Sum: { - auto reduction_axes = deserialize_axis_set(node_js.at("reduction_axes")); + set reduction_axes = + get_or_default>(node_js, "reduction_axes", set()); if (reduction_axes.empty()) + { node = make_shared(args[0], args[1]); + } else + { node = make_shared(args[0], reduction_axes); + } break; } case OP_TYPEID::Tan: @@ -3056,7 +3066,6 @@ json JSONSerializer::serialize_node(const Node& n) node["friendly_name"] = n.get_friendly_name(); } node["op"] = type_info.name; - // TODO Multiple outputs json inputs = json::array(); json control_deps = json::array(); @@ -4203,7 +4212,11 @@ json JSONSerializer::serialize_node(const Node& n) } case OP_TYPEID::PRelu: { break; } - case OP_TYPEID::Product: { break; + case OP_TYPEID::Product: + { + auto tmp = static_cast(&n); + node["reduction_axes"] = tmp->get_reduction_axes(); + break; } case OP_TYPEID::ReduceProd_v1: { @@ -4481,7 +4494,11 @@ json JSONSerializer::serialize_node(const Node& n) } break; } - case OP_TYPEID::Sum: { break; + case OP_TYPEID::Sum: + { + auto tmp = static_cast(&n); + node["reduction_axes"] = tmp->get_reduction_axes(); + break; } case OP_TYPEID::ReduceSum_v1: { diff --git a/src/ngraph/specialize_function.cpp b/src/ngraph/specialize_function.cpp index 9e9ad379a8a..31f58addc84 100644 --- a/src/ngraph/specialize_function.cpp +++ b/src/ngraph/specialize_function.cpp @@ -17,6 +17,7 @@ #include "ngraph/specialize_function.hpp" #include #include "ngraph/op/constant.hpp" +#include "ngraph/op/tensor_iterator.hpp" using namespace ngraph; @@ -84,6 +85,11 @@ std::shared_ptr else { m[old_node.get()] = old_node->copy_with_new_inputs(new_args); + // TODO: workaround for shape inference, delete it after fix + if (::ngraph::as_type_ptr(m[old_node.get()])) + { + m[old_node.get()]->validate_and_infer_types(); + } m[old_node.get()]->get_rt_info() = old_node->get_rt_info(); } diff --git a/src/ngraph/specialize_function.hpp b/src/ngraph/specialize_function.hpp index bb5895f1a4d..6079a96cd94 100644 --- a/src/ngraph/specialize_function.hpp +++ b/src/ngraph/specialize_function.hpp @@ -103,6 +103,7 @@ namespace ngraph /// parameter_shapes[i] can be created. /// /// TODO(amprocte): convert this to a pass. + NGRAPH_API std::shared_ptr specialize_function(std::shared_ptr f, const std::vector& parameter_element_types, @@ -195,6 +196,7 @@ namespace ngraph /// parameter_shapes[i] can be created. /// /// TODO(amprocte): convert this to a pass. + NGRAPH_API std::shared_ptr specialize_function(std::shared_ptr f, const std::vector& parameter_element_types, diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 8feb0e829ec..f15200ccbd4 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -115,6 +115,7 @@ set(SRC type_prop/binary_elementwise.cpp type_prop/broadcast.cpp type_prop/clamp.cpp + type_prop/compat.cpp type_prop/concat.cpp type_prop/constant.cpp type_prop/convert.cpp diff --git a/test/backend/batch_norm.in.cpp b/test/backend/batch_norm.in.cpp index e6336fb52f3..9573bbd433f 100644 --- a/test/backend/batch_norm.in.cpp +++ b/test/backend/batch_norm.in.cpp @@ -736,11 +736,11 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_bprop_n4c3h2w2) ngraph::autodiff::Adjoints adjoints(OutputVector{bn_dx, bn_dgamma, bn_dbeta}, OutputVector{C, zero, zero}); - auto dinput = adjoints.backprop_node(input); - auto dgamma = adjoints.backprop_node(gamma); - auto dbeta = adjoints.backprop_node(beta); + auto dinput = adjoints.backprop_output(input); + auto dgamma = adjoints.backprop_output(gamma); + auto dbeta = adjoints.backprop_output(beta); - auto df = make_shared(NodeVector{dinput, dgamma, dbeta}, + auto df = make_shared(OutputVector{dinput, dgamma, dbeta}, ParameterVector{mean, var, input, gamma, beta, C}); #ifndef NGRAPH_JSON_DISABLE diff --git a/test/backend/distributed.in.cpp b/test/backend/distributed.in.cpp index 994f9024253..e4023046ff9 100644 --- a/test/backend/distributed.in.cpp +++ b/test/backend/distributed.in.cpp @@ -57,8 +57,7 @@ static void test_allreduce_common(reduction::Type reduce_type) { case reduction::Type::SUM: copy_data(a, v); - std::transform( - v.begin(), v.end(), v.begin(), std::bind1st(std::multiplies(), comm_size)); + std::transform(v.begin(), v.end(), v.begin(), [=](float x) { return x * comm_size; }); break; case reduction::Type::PROD: copy_data(a, v); diff --git a/test/backend/divide.in.cpp b/test/backend/divide.in.cpp index 07eb9b2fafa..4ac8eebdc55 100644 --- a/test/backend/divide.in.cpp +++ b/test/backend/divide.in.cpp @@ -169,10 +169,10 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_adjoint_stability) auto Xs = f->get_parameters(); auto C = std::make_shared(Y_out.get_element_type(), Y_out.get_shape()); ngraph::autodiff::Adjoints adjoints(OutputVector{Y_out}, OutputVector{C}); - std::vector> dYdXs(Xs.size()); + std::vector> dYdXs(Xs.size()); transform( Xs.begin(), Xs.end(), dYdXs.begin(), [C, &adjoints](const std::shared_ptr& X) { - return adjoints.backprop_node(X); + return adjoints.backprop_output(X); }); std::vector> params(Xs); params.push_back(C); diff --git a/test/backend/layer_norm.in.cpp b/test/backend/layer_norm.in.cpp index dd10fc31d19..3fb609cb26a 100644 --- a/test/backend/layer_norm.in.cpp +++ b/test/backend/layer_norm.in.cpp @@ -247,7 +247,7 @@ NGRAPH_TEST(${BACKEND_NAME}, layer_norm_bprop_4d_input) vector expected_scale(60, 0); vector expected_bias(60, 2); - EXPECT_TRUE(test::all_close_f(expected_data, read_vector(d_data), 1e-6f, 1e-6f)); - EXPECT_TRUE(test::all_close_f(expected_scale, read_vector(d_scale), 1e-6f, 1e-6f)); - EXPECT_TRUE(test::all_close_f(expected_bias, read_vector(d_bias), 1e-6f, 1e-6f)); + EXPECT_TRUE(test::all_close_f(expected_data, read_vector(d_data), 1e-5f, 1e-6f)); + EXPECT_TRUE(test::all_close_f(expected_scale, read_vector(d_scale), 1e-5f, 1e-6f)); + EXPECT_TRUE(test::all_close_f(expected_bias, read_vector(d_bias), 1e-5f, 1e-6f)); } diff --git a/test/convert_u1_to_string.cpp b/test/convert_u1_to_string.cpp index d671bf85bdc..ded79b9da4f 100644 --- a/test/convert_u1_to_string.cpp +++ b/test/convert_u1_to_string.cpp @@ -32,4 +32,4 @@ TEST(convert_u1_to_string, convert_u1_to_string) { ASSERT_EQ(constant->convert_value_to_string(i), ref[i]); } -} \ No newline at end of file +} diff --git a/test/cpu_fusion.cpp b/test/cpu_fusion.cpp index 970caf7bbe5..73c41a838e7 100644 --- a/test/cpu_fusion.cpp +++ b/test/cpu_fusion.cpp @@ -295,6 +295,25 @@ TEST(cpu_fusion, cpu_fusion_pass_basic) ASSERT_NE(as_type_ptr(graph->get_argument(0)), nullptr); } +TEST(cpu_fusion, matmul_f64) +{ + Shape shape{}; + Shape shape_w{2, 4}; + Shape shape_x{4, 1}; + Shape shape_b{1}; + auto A = make_shared(element::f64, shape_w); + auto B = make_shared(element::f64, shape_x); + auto C = make_shared(element::f64, shape_b); + + auto dot = make_shared(A, B); + auto graph = make_shared(dot); + pass::Manager pass_manager; + pass_manager.register_pass(pass::FusionType::REGULAR_FUSIONS); + auto func = make_shared(graph, ParameterVector{A, B, C}); + pass_manager.run_passes(func); + ASSERT_NE(as_type_ptr(graph->get_argument(0)), nullptr); +} + TEST(cpu_fusion, commutative_matmul_bias) { Shape shape{}; @@ -509,12 +528,12 @@ TEST(cpu_fusion, conv_bias_bprop_n1c1h3w3) ngraph::autodiff::Adjoints adjoints(OutputVector{convolution_bias}, OutputVector{conv_test.delta}); - auto d_data = adjoints.backprop_node(conv_test.data); - auto d_weights = adjoints.backprop_node(conv_test.weights); - auto d_bias = adjoints.backprop_node(conv_test.bias); + auto d_data = adjoints.backprop_output(conv_test.data); + auto d_weights = adjoints.backprop_output(conv_test.weights); + auto d_bias = adjoints.backprop_output(conv_test.bias); auto df = make_shared( - NodeVector{d_data, d_weights, d_bias}, + OutputVector{d_data, d_weights, d_bias}, ParameterVector{conv_test.data, conv_test.weights, conv_test.bias, conv_test.delta}); auto handle = backend->compile(df); handle->call_with_validate( @@ -548,11 +567,11 @@ TEST(cpu_fusion, conv_bias_bprop) ngraph::autodiff::Adjoints adjoints(OutputVector{conv_bias}, OutputVector{delta}); - auto d_data = adjoints.backprop_node(data_batch); - auto d_weights = adjoints.backprop_node(filters); - auto d_bias = adjoints.backprop_node(bias); + auto d_data = adjoints.backprop_output(data_batch); + auto d_weights = adjoints.backprop_output(filters); + auto d_bias = adjoints.backprop_output(bias); - auto df = make_shared(NodeVector{d_data, d_weights, d_bias}, + auto df = make_shared(OutputVector{d_data, d_weights, d_bias}, ParameterVector{data_batch, filters, bias, delta}); pass_manager.run_passes(df); @@ -1520,9 +1539,9 @@ TEST(cpu_fusion, max_pool_with_indices) ngraph::autodiff::Adjoints adjoints(ngraph::OutputVector{max_pool}, ngraph::OutputVector{C}); - auto dinput = adjoints.backprop_node(input); + auto dinput = adjoints.backprop_output(input); - auto df = std::make_shared(NodeVector{dinput}, ParameterVector{input, C}); + auto df = std::make_shared(OutputVector{dinput}, ParameterVector{input, C}); auto f = std::make_shared(NodeVector{max_pool}, ParameterVector{input}); @@ -1630,7 +1649,7 @@ static std::pair, OutputVector> transform(back_parameters.begin(), back_parameters.end(), dYdXs.begin(), - [&adjoint](const std::shared_ptr& X) { return adjoint.backprop_node(X); }); + [&adjoint](const std::shared_ptr& X) { return adjoint.backprop_output(X); }); // create the backward function std::vector> param_adjoints; @@ -2399,9 +2418,9 @@ void sigmoid_multiply_fusion_backward_compute(runtime::Backend* backend, make_shared(input_0_alt, input_1_alt, input_0_type, input_1_type); ngraph::autodiff::Adjoints adjoints(OutputVector{sigmoid_mul}, OutputVector{delta_param}); - auto d_input_0 = adjoints.backprop_node(input_0_adjoint); - auto d_input_1 = adjoints.backprop_node(input_1_adjoint); - auto df = make_shared(NodeVector{d_input_0, d_input_1}, back_params); + auto d_input_0 = adjoints.backprop_output(input_0_adjoint); + auto d_input_1 = adjoints.backprop_output(input_1_adjoint); + auto df = make_shared(OutputVector{d_input_0, d_input_1}, back_params); auto handle = backend->compile(df); handle->call_with_validate({d_input_0_tensor, d_input_1_tensor}, input_tensors); EXPECT_TRUE(test::all_close(read_vector(d_input_0_tensor), expected_0)); diff --git a/test/cpu_test.cpp b/test/cpu_test.cpp index 10f05ab39ff..72d768535dd 100644 --- a/test/cpu_test.cpp +++ b/test/cpu_test.cpp @@ -2155,9 +2155,15 @@ TEST(cpu_test, tensor_copy_from_different_layout) EXPECT_EQ((vector{1, 4, 2, 5, 3, 6}), read_vector(b)); } -#if MKLDNN_VERSION_MAJOR >= 1 TEST(cpu_test, max_pool_bf16) { + if (!runtime::cpu::mkldnn_utils::is_bf16_supported()) + { + // TODO change to skip when there is a new release of gtest + NGRAPH_WARN << "This test is skipped for platform without bf16 support."; + return; + } + Shape shape_a{1, 1, 3, 5}; Shape window_shape{2, 3}; auto window_movement_strides = Strides{1, 1}; @@ -2186,6 +2192,13 @@ TEST(cpu_test, max_pool_bf16) TEST(cpu_test, convolution_simple_bf16) { + if (!runtime::cpu::mkldnn_utils::is_bf16_supported()) + { + // TODO change to skip when there is a new release of gtest + NGRAPH_WARN << "This test is skipped for platform without bf16 support."; + return; + } + Shape shape_a{1, 2, 2, 2}; auto A = make_shared(element::f32, shape_a); Shape shape_b{2, 2, 1, 1}; @@ -2221,7 +2234,6 @@ TEST(cpu_test, convolution_simple_bf16) EXPECT_EQ((vector{18.0, 24.0, 30.0, 36.0, 18.0, 24.0, 30.0, 36.0}), read_vector(result)); } -#endif // This tests a backend's implementation of the three parameter version of create_tensor // Testing using this tensor as a Function input diff --git a/test/models/onnx/gatherND_float.prototxt b/test/models/onnx/gatherND_float.prototxt new file mode 100644 index 00000000000..39a990a63a6 --- /dev/null +++ b/test/models/onnx/gatherND_float.prototxt @@ -0,0 +1,65 @@ +ir_version: 3 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "x" + input: "i" + output: "y" + op_type: "GatherND" + } + name: "test_gatherND_float" + input { + name: "x" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } + input { + name: "i" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } +} +opset_import { + version: 7 +} diff --git a/test/models/onnx/gatherND_int32.prototxt b/test/models/onnx/gatherND_int32.prototxt new file mode 100644 index 00000000000..a10e901a44a --- /dev/null +++ b/test/models/onnx/gatherND_int32.prototxt @@ -0,0 +1,62 @@ +ir_version: 3 +producer_name: "nGraph ONNX Importer" +graph { + node { + input: "x" + input: "i" + output: "y" + op_type: "GatherND" + } + name: "test_gatherND_int32" + input { + name: "x" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } + input { + name: "i" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } +} +opset_import { + version: 7 +} diff --git a/test/models/onnx/reshape_negative_dim.prototxt b/test/models/onnx/reshape_negative_dim.prototxt index 19586b87be6..7e5e3cc0dcb 100644 --- a/test/models/onnx/reshape_negative_dim.prototxt +++ b/test/models/onnx/reshape_negative_dim.prototxt @@ -1,21 +1,23 @@ -ir_version: 3 +ir_version: 6 producer_name: "nGraph ONNX Importer" graph { node { - input: "A" - output: "B" + input: "data" + input: "shape" + output: "reshaped" op_type: "Reshape" - attribute { - name: "shape" - ints: 6 - ints: 2 - ints: 2 - type: INTS - } } - name: "compute_graph" + name: "test_reshape_negative_dim" + initializer { + dims: 3 + data_type: 7 + int64_data: 2 + int64_data: -1 + int64_data: 2 + name: "shape" + } input { - name: "A" + name: "data" type { tensor_type { elem_type: 1 @@ -33,17 +35,30 @@ graph { } } } + input { + name: "shape" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 3 + } + } + } + } + } output { - name: "B" + name: "reshaped" type { tensor_type { elem_type: 1 shape { dim { - dim_value: 6 + dim_value: 2 } dim { - dim_value: 2 + dim_value: 6 } dim { dim_value: 2 diff --git a/test/onnx/onnx_import.in.cpp b/test/onnx/onnx_import.in.cpp index a0ad77dbbec..78757e36c21 100644 --- a/test/onnx/onnx_import.in.cpp +++ b/test/onnx/onnx_import.in.cpp @@ -1776,3 +1776,29 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_mod) test_case.run(); } + +NGRAPH_TEST(onnx_${BACKEND_NAME}, model_gatherND_int32) +{ + const auto gatherND_fn = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/gatherND_int32.prototxt")); + auto test_case = ngraph::test::NgraphTestCase(gatherND_fn, "${BACKEND_NAME}"); + + test_case.add_input({0, 1, 2, 3}); + test_case.add_input({1, 0}); + test_case.add_expected_output(Shape{2, 2}, {2, 3, 0, 1}); + + test_case.run(); +} + +NGRAPH_TEST(onnx_${BACKEND_NAME}, model_gatherND_float) +{ + const auto gatherND_fn = onnx_import::import_onnx_model( + file_util::path_join(SERIALIZED_ZOO, "onnx/gatherND_float.prototxt")); + auto test_case = ngraph::test::NgraphTestCase(gatherND_fn, "${BACKEND_NAME}"); + + test_case.add_input({0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f}); + test_case.add_input({0, 1, 1, 0}); + test_case.add_expected_output(Shape{2, 2}, {2.f, 3.f, 4.f, 5.f}); + + test_case.run(); +} diff --git a/test/onnx/onnx_import_reshape.in.cpp b/test/onnx/onnx_import_reshape.in.cpp index 5ed7b723c66..394a34bbac2 100644 --- a/test/onnx/onnx_import_reshape.in.cpp +++ b/test/onnx/onnx_import_reshape.in.cpp @@ -124,21 +124,34 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_reshape_single_dim) NGRAPH_TEST(onnx_${BACKEND_NAME}, model_reshape_negative_dim) { - auto function = onnx_import::import_onnx_model( + // the model contains the target shape in the initializers: [2, -1, 2] + const auto function = onnx_import::import_onnx_model( file_util::path_join(SERIALIZED_ZOO, "onnx/reshape_negative_dim.prototxt")); - // input data shape (2, 3, 4) - Inputs inputs{test::NDArray({{{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}}, - {{12, 13, 14, 15}, {16, 17, 18, 19}, {20, 21, 22, 23}}}) + // 2x3x4 + Inputs inputs{test::NDArray({{{0.5488135, 0.71518934, 0.60276335, 0.5448832}, + {0.4236548, 0.6458941, 0.4375872, 0.891773}, + {0.96366274, 0.3834415, 0.79172504, 0.5288949}}, + + {{0.56804454, 0.92559665, 0.07103606, 0.0871293}, + {0.0202184, 0.83261985, 0.77815676, 0.87001216}, + {0.9786183, 0.7991586, 0.46147937, 0.7805292}}}) .get_vector()}; - // output data shape (6, 2, 2) - Outputs expected_outputs{test::NDArray({{{0, 1}, {2, 3}}, - {{4, 5}, {6, 7}}, - {{8, 9}, {10, 11}}, - {{12, 13}, {14, 15}}, - {{16, 17}, {18, 19}}, - {{20, 21}, {22, 23}}}) + // 2x6x2 + Outputs expected_outputs{test::NDArray({{{0.5488135, 0.71518934}, + {0.60276335, 0.5448832}, + {0.4236548, 0.6458941}, + {0.4375872, 0.891773}, + {0.96366274, 0.3834415}, + {0.79172504, 0.5288949}}, + + {{0.56804454, 0.92559665}, + {0.07103606, 0.0871293}, + {0.0202184, 0.83261985}, + {0.77815676, 0.87001216}, + {0.9786183, 0.7991586}, + {0.46147937, 0.7805292}}}) .get_vector()}; Outputs outputs{execute(function, inputs, "${BACKEND_NAME}")}; diff --git a/test/opset_pass/convolution_opset_pass.cpp b/test/opset_pass/convolution_opset_pass.cpp index 8832ec96ca7..64c0f76b8f3 100644 --- a/test/opset_pass/convolution_opset_pass.cpp +++ b/test/opset_pass/convolution_opset_pass.cpp @@ -138,3 +138,63 @@ TEST(opset_transform, opset1_convolution_backprop_filters_downgrade_pass) EXPECT_EQ(conv_v0_node->get_padding_above_forward(), padding_end); EXPECT_EQ(conv_v0_node->get_data_dilation_strides_forward(), (Strides{1})); } + +TEST(opset_transform, opset1_group_convolution_backprop_data_downgrade_pass) +{ + auto output_shape = op::Constant::create(element::i64, Shape{1}, {100}); + auto filters = make_shared(element::f32, Shape{1, 128, 3, 10}); + auto delta = make_shared(element::f32, Shape{64, 128, 96}); + auto strides = Strides{1}; + auto dilations = Strides{1}; + auto padding_begin = CoordinateDiff{2}; + auto padding_end = CoordinateDiff{3}; + + auto group_conv_backprop = make_shared( + delta, filters, output_shape, strides, padding_begin, padding_end, dilations); + auto result = make_shared(group_conv_backprop); + auto f = make_shared(ResultVector{result}, ParameterVector{filters, delta}); + + ngraph::pass::Manager pass_manager; + pass_manager.register_pass(); + pass_manager.run_passes(f); + + auto group_conv_backprop_s0_result = f->get_results().at(0); + auto node = group_conv_backprop_s0_result->input(0).get_source_output().get_node_shared_ptr(); + auto group_conv_backprop_v0_node = as_type_ptr(node); + + ASSERT_TRUE(group_conv_backprop_v0_node); + EXPECT_EQ(group_conv_backprop_v0_node->get_window_movement_strides(), strides); + EXPECT_EQ(group_conv_backprop_v0_node->get_window_dilation_strides(), dilations); + EXPECT_EQ(group_conv_backprop_v0_node->get_padding_below(), padding_begin); + EXPECT_EQ(group_conv_backprop_v0_node->get_padding_above(), padding_end); +} + +TEST(opset_transform, opset1_group_convolution_backprop_data_upgrade_pass) +{ + auto data_batch_shape = op::Constant::create(element::f32, Shape{64, 3, 100}, {0}); + auto filters = make_shared(element::f32, Shape{128, 3, 10}); + auto delta = make_shared(element::f32, Shape{64, 128, 96}); + auto strides = Strides{1}; + auto dilations = Strides{1}; + auto padding_begin = CoordinateDiff{2}; + auto padding_end = CoordinateDiff{3}; + + auto group_conv_backprop = make_shared( + data_batch_shape, filters, delta, strides, dilations, padding_begin, padding_end, 1); + auto result = make_shared(group_conv_backprop); + auto f = make_shared(ResultVector{result}, ParameterVector{filters, delta}); + + ngraph::pass::Manager pass_manager; + pass_manager.register_pass(); + pass_manager.run_passes(f); + + auto group_conv_backprop_s1_result = f->get_results().at(0); + auto node = group_conv_backprop_s1_result->input(0).get_source_output().get_node_shared_ptr(); + auto group_conv_backprop_v1_node = as_type_ptr(node); + + ASSERT_TRUE(group_conv_backprop_v1_node); + EXPECT_EQ(group_conv_backprop_v1_node->get_strides(), strides); + EXPECT_EQ(group_conv_backprop_v1_node->get_dilations(), dilations); + EXPECT_EQ(group_conv_backprop_v1_node->get_pads_begin(), padding_begin); + EXPECT_EQ(group_conv_backprop_v1_node->get_pads_end(), padding_end); +} diff --git a/test/type_prop/compat.cpp b/test/type_prop/compat.cpp new file mode 100644 index 00000000000..8f7f38d356e --- /dev/null +++ b/test/type_prop/compat.cpp @@ -0,0 +1,74 @@ +//***************************************************************************** +// Copyright 2017-2019 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "util/type_prop.hpp" + +using namespace std; +using namespace ngraph; + +class CompatOp : public ngraph::op::Op +{ +public: + static constexpr NodeTypeInfo type_info{"CompatOp", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + CompatOp() = default; + + CompatOp(const Output& value) + : Op({value}) + { + } + + // Test for API compatibility + bool visit_attributes(AttributeVisitor& visitor) override { return true; } + std::shared_ptr copy_with_new_args(const NodeVector& new_args) const override + { + return make_shared(new_args.at(0)); + } + void validate_and_infer_types() override + { + auto arg = input_value(0); + set_output_type(0, arg.get_element_type(), arg.get_shape()); + } + +protected: + // Deprecated method + virtual void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) override + { + adjoints.add_delta(input_value(0), input_value(0) * deltas.at(0)); + } +}; + +constexpr NodeTypeInfo CompatOp::type_info; + +TEST(compat, node) +{ + auto param = make_shared(element::f32, Shape{10}); + auto c = make_shared(element::f32, Shape{10}); + auto x = make_shared(param); + auto result = make_shared(x); + auto f = make_shared(ResultVector{result}, ParameterVector{param}); + autodiff::Adjoints adjoints({result}, {c}); + auto bprop = adjoints.backprop_output(param); + ASSERT_TRUE(bprop.get_index() == 0); + ASSERT_TRUE(is_type(bprop.get_node_shared_ptr())); + set> params; + params.insert(bprop.get_node_shared_ptr()->input_value(0)); + params.insert(bprop.get_node_shared_ptr()->input_value(1)); + EXPECT_TRUE(params.count(param) == 1); + EXPECT_TRUE(params.count(c) == 1); +} \ No newline at end of file diff --git a/test/util/autodiff/backprop_derivative.hpp b/test/util/autodiff/backprop_derivative.hpp index d77d438c472..7ce88d252c4 100644 --- a/test/util/autodiff/backprop_derivative.hpp +++ b/test/util/autodiff/backprop_derivative.hpp @@ -142,7 +142,7 @@ namespace ngraph auto c_arg = backend->create_tensor(y_shape); // df/dX* - std::vector> df_output_params; + std::vector> df_output_params; Adjoints adjoints(OutputVector{f->output(0)}, OutputVector{c_param}); @@ -150,7 +150,7 @@ namespace ngraph for (auto x : indep_params) { // add df/dx to df/dX* - df_output_params.push_back(adjoints.backprop_node(x)); + df_output_params.push_back(adjoints.backprop_output(x)); } // (c, X) diff --git a/test/util/autodiff/backprop_function.cpp b/test/util/autodiff/backprop_function.cpp index 48f312a54ce..11351068cc4 100644 --- a/test/util/autodiff/backprop_function.cpp +++ b/test/util/autodiff/backprop_function.cpp @@ -36,9 +36,9 @@ std::shared_ptr autodiff::backprop_function(const std::shared_ptrget_parameters(); auto C = std::make_shared(Y_out.get_element_type(), Y_out.get_shape()); Adjoints adjoints(OutputVector{Y_out}, OutputVector{C}); - std::vector> dYdXs(Xs.size()); + std::vector> dYdXs(Xs.size()); transform(Xs.begin(), Xs.end(), dYdXs.begin(), [C, &adjoints](const std::shared_ptr& X) { - return adjoints.backprop_node(X); + return adjoints.backprop_output(X); }); std::vector> params(Xs); params.push_back(C);