From a775ee61f0433b2fbede18d494fc47894152dd02 Mon Sep 17 00:00:00 2001 From: "Efode, Irina" Date: Wed, 16 Sep 2020 16:25:29 +0300 Subject: [PATCH 1/3] Select --- .../include/ngraph/runtime/reference/select.hpp | 10 ++++++---- ngraph/core/src/pass/constant_folding_select.cpp | 4 ++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp index 3f6da667026666..c627a67a3ba125 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp @@ -32,11 +32,13 @@ namespace ngraph const T* arg1, const T* arg2, T* out, - size_t count) // TODO: using char for bool, is this right? + size_t arg0_count, + size_t arg1_count, + size_t arg2_count, + size_t arg3_count) // TODO: using char for bool, is this right? { - for (size_t i = 0; i < count; i++) - { - out[i] = arg0[i] ? arg1[i] : arg2[i]; + for (size_t i = 0; i < arg3_count; i++) { + out[i] = arg0[i % arg0_count] ? arg1[i % arg1_count] : arg2[i % arg2_count]; } } diff --git a/ngraph/core/src/pass/constant_folding_select.cpp b/ngraph/core/src/pass/constant_folding_select.cpp index 495d0dc80ad812..bb58987c5c0eca 100644 --- a/ngraph/core/src/pass/constant_folding_select.cpp +++ b/ngraph/core/src/pass/constant_folding_select.cpp @@ -36,10 +36,14 @@ shared_ptr fold_constant_select(const shared_ptr& se if (auto select_v0 = as_type_ptr(select)) { + runtime::reference::select(selection->get_data_ptr(), t->get_data_ptr(), f->get_data_ptr(), data_ptr, + shape_size(selection->get_shape()), + shape_size(t->get_shape()), + shape_size(f->get_shape()), shape_size(out_shape)); } else if (auto select_v1 = as_type_ptr(select)) From af0683efe9f004d98f476ff99400aa3b01ebedc7 Mon Sep 17 00:00:00 2001 From: "Efode, Irina" Date: Wed, 16 Sep 2020 16:33:01 +0300 Subject: [PATCH 2/3] Fix code style --- .../ngraph/runtime/reference/select.hpp | 3 +- .../core/src/pass/constant_folding_select.cpp | 1 - .../runtime/interpreter/evaluates_map.cpp | 755 +++++++++--------- .../runtime/interpreter/reference/mod.hpp | 6 +- ngraph/test/runtime/pass/opset0_downgrade.cpp | 1 - ngraph/test/runtime/pass/opset1_upgrade.cpp | 5 - 6 files changed, 388 insertions(+), 383 deletions(-) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp index c627a67a3ba125..9803d24164fb30 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/select.hpp @@ -37,7 +37,8 @@ namespace ngraph size_t arg2_count, size_t arg3_count) // TODO: using char for bool, is this right? { - for (size_t i = 0; i < arg3_count; i++) { + for (size_t i = 0; i < arg3_count; i++) + { out[i] = arg0[i % arg0_count] ? arg1[i % arg1_count] : arg2[i % arg2_count]; } } diff --git a/ngraph/core/src/pass/constant_folding_select.cpp b/ngraph/core/src/pass/constant_folding_select.cpp index bb58987c5c0eca..3ca958da0cecf0 100644 --- a/ngraph/core/src/pass/constant_folding_select.cpp +++ b/ngraph/core/src/pass/constant_folding_select.cpp @@ -36,7 +36,6 @@ shared_ptr fold_constant_select(const shared_ptr& se if (auto select_v0 = as_type_ptr(select)) { - runtime::reference::select(selection->get_data_ptr(), t->get_data_ptr(), f->get_data_ptr(), diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 3757a6928c0967..536b095c2dedf4 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -15,6 +15,7 @@ //***************************************************************************** #include "evaluates_map.hpp" +#include #include #include #include @@ -26,13 +27,12 @@ #include #include #include +#include #include #include #include #include #include -#include -#include #include "ngraph/ops.hpp" #include "ngraph/runtime/reference/avg_pool.hpp" #include "ngraph/runtime/reference/batch_norm.hpp" @@ -57,128 +57,135 @@ using namespace ngraph; using namespace std; -namespace { - template +namespace +{ + template bool evaluate(shared_ptr op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { return false; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { const auto filter_data = inputs[1]->get_data_ptr(); auto out_data_ptr = outputs[0]->get_data_ptr(); const auto in_data_ptr = inputs[0]->get_data_ptr(); - const auto &out_shape = outputs[0]->get_shape(); - const auto &in_shape = inputs[0]->get_shape(); - const auto &filter_shape = inputs[1]->get_shape(); + const auto& out_shape = outputs[0]->get_shape(); + const auto& in_shape = inputs[0]->get_shape(); + const auto& filter_shape = inputs[1]->get_shape(); Strides in_dilation(std::vector(in_shape.size() - 2)); std::fill(in_dilation.begin(), in_dilation.end(), 1); runtime::reference::convolution::value_type>( - in_data_ptr, - filter_data, - out_data_ptr, - in_shape, - filter_shape, - out_shape, - op->get_strides(), - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - in_dilation); + in_data_ptr, + filter_data, + out_data_ptr, + in_shape, + filter_shape, + out_shape, + op->get_strides(), + op->get_dilations(), + op->get_pads_begin(), + op->get_pads_end(), + in_dilation); return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { const auto filter_data = inputs[1]->get_data_ptr(); auto out_data_ptr = outputs[0]->get_data_ptr(); const auto in_data_ptr = inputs[0]->get_data_ptr(); - const auto &out_shape = outputs[0]->get_shape(); - const auto &in_shape = inputs[0]->get_shape(); - const auto &filter_shape = inputs[1]->get_shape(); + const auto& out_shape = outputs[0]->get_shape(); + const auto& in_shape = inputs[0]->get_shape(); + const auto& filter_shape = inputs[1]->get_shape(); Strides in_dilation(std::vector(in_shape.size() - 2)); std::fill(in_dilation.begin(), in_dilation.end(), 1); runtime::reference::convolution_backprop_in::value_type>( - in_data_ptr, - filter_data, - out_data_ptr, - in_shape, - filter_shape, - out_shape, - in_dilation, - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - op->get_strides()); + in_data_ptr, + filter_data, + out_data_ptr, + in_shape, + filter_shape, + out_shape, + in_dilation, + op->get_dilations(), + op->get_pads_begin(), + op->get_pads_end(), + op->get_strides()); return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { const auto filter_data = inputs[1]->get_data_ptr(); auto out_data_ptr = outputs[0]->get_data_ptr(); const auto in_data_ptr = inputs[0]->get_data_ptr(); - const auto &out_shape = outputs[0]->get_shape(); - const auto &in_shape = inputs[0]->get_shape(); - const auto &filter_shape = inputs[1]->get_shape(); + const auto& out_shape = outputs[0]->get_shape(); + const auto& in_shape = inputs[0]->get_shape(); + const auto& filter_shape = inputs[1]->get_shape(); Strides in_dilation(std::vector(in_shape.size() - 2)); std::fill(in_dilation.begin(), in_dilation.end(), 1); runtime::reference::convolution::value_type>( - in_data_ptr, - filter_data, - out_data_ptr, - in_shape, - filter_shape, - out_shape, - op->get_strides(), - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - in_dilation, - filter_shape.at(0)); + in_data_ptr, + filter_data, + out_data_ptr, + in_shape, + filter_shape, + out_shape, + op->get_strides(), + op->get_dilations(), + op->get_pads_begin(), + op->get_pads_end(), + in_dilation, + filter_shape.at(0)); return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { const auto filter_data = inputs[1]->get_data_ptr(); auto out_data_ptr = outputs[0]->get_data_ptr(); const auto in_data_ptr = inputs[0]->get_data_ptr(); - const auto &out_shape = outputs[0]->get_shape(); - const auto &in_shape = inputs[0]->get_shape(); - const auto &filter_shape = inputs[1]->get_shape(); + const auto& out_shape = outputs[0]->get_shape(); + const auto& in_shape = inputs[0]->get_shape(); + const auto& filter_shape = inputs[1]->get_shape(); Strides in_dilation(std::vector(in_shape.size() - 2)); std::fill(in_dilation.begin(), in_dilation.end(), 1); runtime::reference::convolution_backprop_in::value_type>( - in_data_ptr, - filter_data, - out_data_ptr, - in_shape, - filter_shape, - out_shape, - in_dilation, - op->get_dilations(), - op->get_pads_begin(), - op->get_pads_end(), - op->get_strides(), - filter_shape.at(0)); + in_data_ptr, + filter_data, + out_data_ptr, + in_shape, + filter_shape, + out_shape, + in_dilation, + op->get_dilations(), + op->get_pads_begin(), + op->get_pads_end(), + op->get_strides(), + filter_shape.at(0)); return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; #define REF_CALL(U) \ @@ -191,21 +198,21 @@ namespace { op->is_reverse()); \ break; - switch (inputs[1]->get_element_type()) { - case element::Type_t::i64: { - REF_CALL(element::Type_t::i64); - } - default: - REF_CALL(element::Type_t::i32); + switch (inputs[1]->get_element_type()) + { + case element::Type_t::i64: { REF_CALL(element::Type_t::i64); + } + default: REF_CALL(element::Type_t::i32); } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; #define REF_CALL(elType) \ runtime::reference::embeddingSegmentsSum::value_type>( \ @@ -220,22 +227,21 @@ namespace { outputs[0]->get_shape()); \ break; - switch (inputs[1]->get_element_type()) { - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - default: - return false; + switch (inputs[1]->get_element_type()) + { + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + default: return false; } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; #define REF_CALL(elType) \ runtime::reference::embeddingBagOffsetsSumget_shape()); \ break; - switch (inputs[1]->get_element_type()) { - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - default: - return false; + switch (inputs[1]->get_element_type()) + { + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + default: return false; } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; #define REF_CALL(elType) \ runtime::reference::embeddingBagPackedSumget_shape()); \ break; - switch (inputs[1]->get_element_type()) { - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - default: - return false; + switch (inputs[1]->get_element_type()) + { + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + default: return false; } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::mvn(inputs[0]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -304,10 +308,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::lrn(inputs[0]->get_data_ptr(), op->get_reduction_axes(), @@ -320,40 +325,48 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::referenceDetectionOutput refDetOut( - op->get_attrs(), op->get_input_shape(0), op->get_input_shape(2)); - if (op->get_input_size() == 3) { + op->get_attrs(), op->get_input_shape(0), op->get_input_shape(2)); + if (op->get_input_size() == 3) + { refDetOut.run(input[0]->get_data_ptr(), input[1]->get_data_ptr(), input[2]->get_data_ptr(), nullptr, nullptr, outputs[0]->get_data_ptr()); - } else if (op->get_input_size() == 5) { + } + else if (op->get_input_size() == 5) + { refDetOut.run(input[0]->get_data_ptr(), input[1]->get_data_ptr(), input[2]->get_data_ptr(), input[3]->get_data_ptr(), input[4]->get_data_ptr(), outputs[0]->get_data_ptr()); - } else { + } + else + { throw ngraph_error("DetectionOutput layer supports only 3 or 5 inputs"); } return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; auto idxType = op->get_input_element_type(1); - if (idxType == element::i32) { + if (idxType == element::i32) + { runtime::reference::scatterNdUpdate(input[0]->get_data_ptr(), input[1]->get_data_ptr(), input[2]->get_data_ptr(), @@ -361,7 +374,9 @@ namespace { op->get_input_shape(0), op->get_input_shape(1), op->get_input_shape(2)); - } else if (idxType == element::i64) { + } + else if (idxType == element::i64) + { runtime::reference::scatterNdUpdate(input[0]->get_data_ptr(), input[1]->get_data_ptr(), input[2]->get_data_ptr(), @@ -369,17 +384,20 @@ namespace { op->get_input_shape(0), op->get_input_shape(1), op->get_input_shape(2)); - } else { + } + else + { throw ngraph_error( - "ScatterNDUpdate layer support only i32 and i64 'indices' input precision!"); + "ScatterNDUpdate layer support only i32 and i64 'indices' input precision!"); } return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::select(input[0]->get_data_ptr(), input[1]->get_data_ptr(), @@ -392,10 +410,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::avg_pool(input[0]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -409,10 +428,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::hard_sigmoid(input[0]->get_data_ptr(), input[1]->get_data_ptr(), @@ -424,10 +444,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::elu(input[0]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -443,14 +464,16 @@ namespace { { using T = typename element_type_traits::value_type; std::cout << "djdkldld" << std::endl; - std:: cout << input[0]->get_data_ptr()[0] << " " << input[0]->get_data_ptr()[1] << std::endl; - auto cons = dynamic_pointer_cast(op->input_value(0).get_node_shared_ptr()); - auto vec = cons->get_vector(); + std::cout << input[0]->get_data_ptr()[0] << " " << input[0]->get_data_ptr()[1] + << std::endl; + auto cons = + dynamic_pointer_cast(op->input_value(0).get_node_shared_ptr()); + auto vec = cons->get_vector(); runtime::reference::prior_box(input[0]->get_data_ptr(), input[1]->get_data_ptr(), outputs[0]->get_data_ptr(), outputs[0]->get_shape(), - op->get_attrs()); + op->get_attrs()); return true; } @@ -468,10 +491,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::selu(input[0]->get_data_ptr(), input[1]->get_data_ptr(), @@ -483,10 +507,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::ceiling(input[0]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -494,10 +519,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::gelu(input[0]->get_data_ptr(), outputs[0]->get_data_ptr(), @@ -505,10 +531,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; #define REF_CALL(elType) \ runtime::reference::CTCLoss::value_type>( \ @@ -524,22 +551,21 @@ namespace { outputs[0]->get_data_ptr()); \ break; - switch (input[1]->get_element_type()) { - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - default: - return false; + switch (input[1]->get_element_type()) + { + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + default: return false; } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; runtime::reference::batch_norm_inference(op->get_eps_value(), input[0]->get_data_ptr(), @@ -552,10 +578,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using T = typename element_type_traits::value_type; #define REF_CALL(U) \ @@ -568,80 +595,60 @@ namespace { input[1]->get_data_ptr()); \ break; - switch (input[1]->get_element_type()) { - case element::Type_t::boolean: - REF_CALL(element::Type_t::boolean) - case element::Type_t::i8: - REF_CALL(element::Type_t::i8); - case element::Type_t::i16: - REF_CALL(element::Type_t::i16); - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - case element::Type_t::u8: - REF_CALL(element::Type_t::u8); - case element::Type_t::u16: - REF_CALL(element::Type_t::u16); - case element::Type_t::u32: - REF_CALL(element::Type_t::u32); - case element::Type_t::u64: - REF_CALL(element::Type_t::u64); - case element::Type_t::f16: - REF_CALL(element::Type_t::f16); - case element::Type_t::f32: - REF_CALL(element::Type_t::f32); - case element::Type_t::f64: - REF_CALL(element::Type_t::f64); - default: - return false; + switch (input[1]->get_element_type()) + { + case element::Type_t::boolean: REF_CALL(element::Type_t::boolean) + case element::Type_t::i8: REF_CALL(element::Type_t::i8); + case element::Type_t::i16: REF_CALL(element::Type_t::i16); + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + case element::Type_t::u8: REF_CALL(element::Type_t::u8); + case element::Type_t::u16: REF_CALL(element::Type_t::u16); + case element::Type_t::u32: REF_CALL(element::Type_t::u32); + case element::Type_t::u64: REF_CALL(element::Type_t::u64); + case element::Type_t::f16: REF_CALL(element::Type_t::f16); + case element::Type_t::f32: REF_CALL(element::Type_t::f32); + case element::Type_t::f64: REF_CALL(element::Type_t::f64); + default: return false; } #undef REF_CALL return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &input) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& input) + { using TO = typename element_type_traits::value_type; - if (OUT_ET == element::Type_t::boolean) { + if (OUT_ET == element::Type_t::boolean) + { #define REF_CALL_BOOL(TI) \ runtime::reference::convert_to_bool::value_type>( \ input[0]->get_data_ptr(), \ outputs[0]->get_data_ptr(), \ shape_size(input[0]->get_shape())); \ break; - switch (input[0]->get_element_type()) { - case element::Type_t::boolean: - REF_CALL_BOOL(element::Type_t::boolean); - case element::Type_t::i8: - REF_CALL_BOOL(element::Type_t::i8); - case element::Type_t::i16: - REF_CALL_BOOL(element::Type_t::i16); - case element::Type_t::i32: - REF_CALL_BOOL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL_BOOL(element::Type_t::i64); - case element::Type_t::u8: - REF_CALL_BOOL(element::Type_t::u8); - case element::Type_t::u16: - REF_CALL_BOOL(element::Type_t::u16); - case element::Type_t::u32: - REF_CALL_BOOL(element::Type_t::u32); - case element::Type_t::u64: - REF_CALL_BOOL(element::Type_t::u64); - case element::Type_t::f16: - REF_CALL_BOOL(element::Type_t::f16); - case element::Type_t::f32: - REF_CALL_BOOL(element::Type_t::f32); - case element::Type_t::f64: - REF_CALL_BOOL(element::Type_t::f64); - default: - return false; + switch (input[0]->get_element_type()) + { + case element::Type_t::boolean: REF_CALL_BOOL(element::Type_t::boolean); + case element::Type_t::i8: REF_CALL_BOOL(element::Type_t::i8); + case element::Type_t::i16: REF_CALL_BOOL(element::Type_t::i16); + case element::Type_t::i32: REF_CALL_BOOL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL_BOOL(element::Type_t::i64); + case element::Type_t::u8: REF_CALL_BOOL(element::Type_t::u8); + case element::Type_t::u16: REF_CALL_BOOL(element::Type_t::u16); + case element::Type_t::u32: REF_CALL_BOOL(element::Type_t::u32); + case element::Type_t::u64: REF_CALL_BOOL(element::Type_t::u64); + case element::Type_t::f16: REF_CALL_BOOL(element::Type_t::f16); + case element::Type_t::f32: REF_CALL_BOOL(element::Type_t::f32); + case element::Type_t::f64: REF_CALL_BOOL(element::Type_t::f64); + default: return false; } #undef REF_CALL_BOOL - } else { + } + else + { #define REF_CALL(TI) \ runtime::reference::convert::value_type, TO>( \ input[0]->get_data_ptr(), \ @@ -649,33 +656,21 @@ namespace { shape_size(input[0]->get_shape())); \ break; - switch (input[0]->get_element_type()) { - case element::Type_t::boolean: - REF_CALL(element::Type_t::boolean); - case element::Type_t::i8: - REF_CALL(element::Type_t::i8); - case element::Type_t::i16: - REF_CALL(element::Type_t::i16); - case element::Type_t::i32: - REF_CALL(element::Type_t::i32); - case element::Type_t::i64: - REF_CALL(element::Type_t::i64); - case element::Type_t::u8: - REF_CALL(element::Type_t::u8); - case element::Type_t::u16: - REF_CALL(element::Type_t::u16); - case element::Type_t::u32: - REF_CALL(element::Type_t::u32); - case element::Type_t::u64: - REF_CALL(element::Type_t::u64); - case element::Type_t::f16: - REF_CALL(element::Type_t::f16); - case element::Type_t::f32: - REF_CALL(element::Type_t::f32); - case element::Type_t::f64: - REF_CALL(element::Type_t::f64); - default: - return false; + switch (input[0]->get_element_type()) + { + case element::Type_t::boolean: REF_CALL(element::Type_t::boolean); + case element::Type_t::i8: REF_CALL(element::Type_t::i8); + case element::Type_t::i16: REF_CALL(element::Type_t::i16); + case element::Type_t::i32: REF_CALL(element::Type_t::i32); + case element::Type_t::i64: REF_CALL(element::Type_t::i64); + case element::Type_t::u8: REF_CALL(element::Type_t::u8); + case element::Type_t::u16: REF_CALL(element::Type_t::u16); + case element::Type_t::u32: REF_CALL(element::Type_t::u32); + case element::Type_t::u64: REF_CALL(element::Type_t::u64); + case element::Type_t::f16: REF_CALL(element::Type_t::f16); + case element::Type_t::f32: REF_CALL(element::Type_t::f32); + case element::Type_t::f64: REF_CALL(element::Type_t::f64); + default: return false; } #undef REF_CALL } @@ -683,45 +678,48 @@ namespace { } // TODO: Rewrite to v1 - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; - switch (inputs[0]->get_element_type()) { - case element::Type_t::i32: - runtime::reference::one_hot::value_type, T>( - inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - inputs[0]->get_shape(), - outputs[0]->get_shape(), - op->get_axis(), - inputs[2]->get_data_ptr()[0], - inputs[3]->get_data_ptr()[0]); - break; - case element::Type_t::i64: - runtime::reference::one_hot::value_type, T>( - inputs[0]->get_data_ptr(), - outputs[0]->get_data_ptr(), - inputs[0]->get_shape(), - outputs[0]->get_shape(), - op->get_axis(), - inputs[2]->get_data_ptr()[0], - inputs[3]->get_data_ptr()[0]); - break; - default: - std::stringstream ss; - ss << "Unhandled input precision " << inputs[0]->get_element_type().get_type_name() << - " in v1::OneHot evaluate call"; - throw ngraph_error(ss.str()); + switch (inputs[0]->get_element_type()) + { + case element::Type_t::i32: + runtime::reference::one_hot::value_type, T>( + inputs[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + outputs[0]->get_shape(), + op->get_axis(), + inputs[2]->get_data_ptr()[0], + inputs[3]->get_data_ptr()[0]); + break; + case element::Type_t::i64: + runtime::reference::one_hot::value_type, T>( + inputs[0]->get_data_ptr(), + outputs[0]->get_data_ptr(), + inputs[0]->get_shape(), + outputs[0]->get_shape(), + op->get_axis(), + inputs[2]->get_data_ptr()[0], + inputs[3]->get_data_ptr()[0]); + break; + default: + std::stringstream ss; + ss << "Unhandled input precision " << inputs[0]->get_element_type().get_type_name() + << " in v1::OneHot evaluate call"; + throw ngraph_error(ss.str()); } return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::rnn_cell(inputs[0]->get_data_ptr(), inputs[0]->get_shape(), @@ -739,10 +737,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::lstm_cell(inputs[0]->get_data_ptr(), inputs[0]->get_shape(), @@ -765,10 +764,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::gru_cell(inputs[0]->get_data_ptr(), inputs[0]->get_shape(), @@ -788,10 +788,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::pad(inputs[0]->get_data_ptr(), inputs[1]->get_data_ptr(), @@ -805,10 +806,11 @@ namespace { return true; } - template - bool evaluate(const shared_ptr &op, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + template + bool evaluate(const shared_ptr& op, + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { using T = typename element_type_traits::value_type; runtime::reference::gather_tree(inputs[0]->get_data_ptr(), inputs[1]->get_data_ptr(), @@ -823,55 +825,64 @@ namespace { return true; } - template + template bool evaluate_node(std::shared_ptr node, - const HostTensorVector &outputs, - const HostTensorVector &inputs) { + const HostTensorVector& outputs, + const HostTensorVector& inputs) + { auto element_type = node->get_output_element_type(0); - if (is_type(node)) { + if (is_type(node)) + { element_type = node->get_input_element_type(1); - } else if (is_type(node)) { + } + else if (is_type(node)) + { element_type = node->get_input_element_type(0); } - for (size_t i = 1; i < node->outputs().size(); i++) { - if (element_type != node->get_output_element_type(i)) { + for (size_t i = 1; i < node->outputs().size(); i++) + { + if (element_type != node->get_output_element_type(i)) + { throw std::logic_error("Output node element types is not equal"); } } - switch (element_type) { - case element::Type_t::boolean: - return evaluate(as_type_ptr(node), outputs, inputs);; - // case element::Type_t::bf16: - // break; - case element::Type_t::f16: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::f64: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::f32: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::i8: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::i16: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::i32: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::i64: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::u8: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::u16: - return evaluate(as_type_ptr(node), outputs, inputs); - case element::Type_t::u32: - return evaluate(as_type_ptr(node), outputs, inputs); - default: - throw ngraph_error(std::string("Unhandled data type ") + - node->get_element_type().get_type_name() + - std::string("in evaluate_node()")); + switch (element_type) + { + case element::Type_t::boolean: + return evaluate(as_type_ptr(node), outputs, inputs); + ; + // case element::Type_t::bf16: + // break; + case element::Type_t::f16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::f64: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::f32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::i8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::i16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::i32: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::i64: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::u8: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::u16: + return evaluate(as_type_ptr(node), outputs, inputs); + case element::Type_t::u32: + return evaluate(as_type_ptr(node), outputs, inputs); + default: + throw ngraph_error(std::string("Unhandled data type ") + + node->get_element_type().get_type_name() + + std::string("in evaluate_node()")); } } } // namespace -runtime::interpreter::EvaluatorsMap &runtime::interpreter::get_evaluators_map() { +runtime::interpreter::EvaluatorsMap& runtime::interpreter::get_evaluators_map() +{ static runtime::interpreter::EvaluatorsMap evaluatorsMap{ #define NGRAPH_OP(NAME, NAMESPACE) {NAMESPACE::NAME::type_info, evaluate_node}, diff --git a/ngraph/test/runtime/interpreter/reference/mod.hpp b/ngraph/test/runtime/interpreter/reference/mod.hpp index 72289c50179240..07f5ebee79ebeb 100644 --- a/ngraph/test/runtime/interpreter/reference/mod.hpp +++ b/ngraph/test/runtime/interpreter/reference/mod.hpp @@ -33,9 +33,9 @@ namespace ngraph const op::AutoBroadcastSpec& broadcast_spec) { autobroadcast_binop( - arg0, arg1, out, arg_shape, arg_shape, broadcast_spec, [](T x, T y) -> T { - return T(x - std::trunc(x / y) * y); - }); + arg0, arg1, out, arg_shape, arg_shape, broadcast_spec, [](T x, T y) -> T { + return T(x - std::trunc(x / y) * y); + }); } } } diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 0d668c87253cfc..aff2269652798d 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -95,7 +95,6 @@ namespace // Default is that we did nothing shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) { auto arg = node->input_value(0); diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 08ca76a6be9f7e..301d55e6dc6d14 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -49,7 +49,6 @@ namespace // Default is that we didn nothing shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) { auto replacement_node = ngraph::builder::opset1::make_broadcast( @@ -151,7 +150,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) { auto strides = node->get_window_movement_strides(); @@ -249,7 +247,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) { bool keep_dims = false; @@ -259,7 +256,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) { bool keep_dims = false; @@ -276,7 +272,6 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) { const auto indices = node->input_value(0).get_node_shared_ptr(); From 1cc0f9b897e09a27f74c346cb15ebd2af5228a12 Mon Sep 17 00:00:00 2001 From: "Efode, Irina" Date: Wed, 16 Sep 2020 16:36:21 +0300 Subject: [PATCH 3/3] Fix select messages --- ngraph/test/type_prop/select.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ngraph/test/type_prop/select.cpp b/ngraph/test/type_prop/select.cpp index 488098d64ba201..7ccf34e4887199 100644 --- a/ngraph/test/type_prop/select.cpp +++ b/ngraph/test/type_prop/select.cpp @@ -132,7 +132,7 @@ TEST(type_prop, select_elem_mismatch_bc) catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), - std::string("Argument 1 and 2 element types are inconsistent")); + std::string("Argument 1 and 2 element types must match")); } catch (...) { @@ -167,7 +167,7 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mis catch (const NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), - std::string("Argument 1 and 2 element types are inconsistent")); + std::string("Argument 1 and 2 element types must match")); } catch (...) {