From 760355253b96cf34a97ceec6bc73a84d658ba727 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 8 Jul 2022 02:09:41 +0000 Subject: [PATCH 01/20] Add in changes for onnx Mod operator Initial operator for mod implimentation and test cases for integer and floating based types. Need to use fmod from stdlib for floating point types. half_float::half thankfully is specced to the use the existing std::fmod() call when looking at the half.hpp implimentation. fmod_flag should mirror the onnx fmod attribute. Right now using a floating point type without setting that on the user side to true will result in an exception. Ref ticket #1283 Double,float and half use their own typename specification to achieve this, otherwise we rely on the % operator to get the integer remainder while preserving sign of the dividend to the result. --- src/CMakeLists.txt | 1 + src/include/migraphx/op/mod.hpp | 128 ++++++++++++++++++++++++++++++++ test/ref_ops_test.cpp | 37 +++++++++ test/verify/test_mod.cpp | 42 +++++++++++ 4 files changed, 208 insertions(+) create mode 100644 src/include/migraphx/op/mod.hpp create mode 100644 test/verify/test_mod.cpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6d53ded56d4..c3fcb0ed3fb 100755 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -155,6 +155,7 @@ register_migraphx_ops( lstm max min + mod mul multibroadcast multinomial diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp new file mode 100644 index 00000000000..d2a8e1d51ee --- /dev/null +++ b/src/include/migraphx/op/mod.hpp @@ -0,0 +1,128 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MIGRAPHX_GUARD_OPERATORS_MUL_HPP +#define MIGRAPHX_GUARD_OPERATORS_MUL_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace migraphx { +inline namespace MIGRAPHX_INLINE_NS { +namespace op { + +template +T mod_op(T x, T y) +{ + return (x % y); +} + +template <> +float mod_op(float x, float y) +{ + return std::fmod(x, y); +} + +template <> +double mod_op(double x, double y) +{ + return std::fmod(x, y); +} + +template <> +half_float::half mod_op(half_float::half x, half_float::half y) +{ + return half_float::fmod(x, y); +} + +struct mod : binary +{ + bool fmod_flag; + + template + static auto reflect(Self& self, F f) + { + return pack(f(self.fmod_flag, "fmod_flag")); + } + + value attributes() const + { + auto a = base_attributes(); + a["fmod_flag"] = fmod_flag; + return a; + } + + shape compute_shape(std::vector inputs) const + { + check_shapes{inputs, (*this)}.has(2).same_type().same_dims(); + auto s0 = inputs.at(0); + auto s1 = inputs.at(1); + + if((s0.type() == shape::float_type || s0.type() == shape::double_type || + s0.type() == shape::half_type) && + (fmod_flag == false)) + { + MIGRAPHX_THROW("fmod must be true for floating data types"); + } + + if(s0 == s1 and s0.packed()) + { + return s0; + } + else if(s0.packed() != s1.packed()) + { + return s0.packed() ? s0 : s1; + } + else if(s0.broadcasted() != s1.broadcasted()) + { + return s0.broadcasted() ? s1.with_lens(s0.lens()) : s0.with_lens(s0.lens()); + } + else + { + return {s0.type(), s0.lens()}; + } + } + + std::string point_function() const { return "mod"; } + auto apply() const + { + return [&](auto x, auto y) { return mod_op(x, y); }; + } + + mod(bool fmod = false) : fmod_flag{fmod} {} +}; + +} // namespace op +} // namespace MIGRAPHX_INLINE_NS +} // namespace migraphx + +#endif diff --git a/test/ref_ops_test.cpp b/test/ref_ops_test.cpp index 366f8b38924..9971e60dbea 100644 --- a/test/ref_ops_test.cpp +++ b/test/ref_ops_test.cpp @@ -3030,6 +3030,43 @@ TEST_CASE(min_test) EXPECT(migraphx::verify_range(results_vector, gold)); } +TEST_CASE(mod_test) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::int32_type, {3}}; + auto l0 = mm->add_literal(migraphx::literal{s, {-7, 8, 3}}); + auto l1 = mm->add_literal(migraphx::literal{s, {2, 4, 6}}); + auto l2 = mm->add_literal(migraphx::literal{s, {7, 5, 9}}); + auto curr_mod = mm->add_instruction(migraphx::make_op("mod"), l0, l1); + mm->add_instruction(migraphx::make_op("mod"), curr_mod, l2); + p.compile(migraphx::ref::target{}); + auto result = p.eval({}).back(); + std::vector results_vector(4); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + std::vector gold{-1, 0, 3}; + EXPECT(migraphx::verify_range(results_vector, gold)); +} + +TEST_CASE(mod_floatingPoint_test) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::float_type, {3}}; + auto l0 = mm->add_literal(migraphx::literal{s, {7.2f, 8.5f, 3.3f}}); + auto l1 = mm->add_literal(migraphx::literal{s, {2.0f, 4.0f, 6.0f}}); + auto l2 = mm->add_literal(migraphx::literal{s, {7.0f, 5.0f, 9.0f}}); + auto curr_mod = mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), l0, l1); + mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), curr_mod, l2); + + p.compile(migraphx::ref::target{}); + auto result = p.eval({}).back(); + std::vector results_vector(4); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + std::vector gold{1.2f, 0.5f, 3.3f}; + EXPECT(migraphx::verify_range(results_vector, gold)); +} + TEST_CASE(mul_test) { migraphx::program p; diff --git a/test/verify/test_mod.cpp b/test/verify/test_mod.cpp new file mode 100644 index 00000000000..392ccc4af8f --- /dev/null +++ b/test/verify/test_mod.cpp @@ -0,0 +1,42 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "verify_program.hpp" +#include +#include +#include + +struct test_mod : verify_program +{ + migraphx::program create_program() const + { + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::float_type, {3}}; + auto x = mm->add_parameter("x", s); + auto y = mm->add_parameter("y", s); + mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), x, y); + return p; + } +}; From 8ff4b151fff25bcd03cc8ccf4bb63203801ea967 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Tue, 12 Jul 2022 16:42:21 +0000 Subject: [PATCH 02/20] Split mod operation into fmod & mod equivalents Since onnx's Mod operation changes behavior based on whether the fmod flag is set, functionality is now split to mirror python's fmod() functionality. For the integer mod case, I had to use a componsition of std::fmod() so that floating and integral types are all handled while also perserving sign to be identital to the python numpy::mod() case. --- src/CMakeLists.txt | 1 + src/include/migraphx/op/fmod.hpp | 73 ++++++++++++++++++++++++++++++++ src/include/migraphx/op/mod.hpp | 61 ++------------------------ src/onnx/parse_mod.cpp | 62 +++++++++++++++++++++++++++ test/ref_ops_test.cpp | 55 ++++++++++++++++++++---- test/verify/test_fmod.cpp | 42 ++++++++++++++++++ test/verify/test_mod.cpp | 2 +- 7 files changed, 228 insertions(+), 68 deletions(-) create mode 100644 src/include/migraphx/op/fmod.hpp create mode 100644 src/onnx/parse_mod.cpp create mode 100644 test/verify/test_fmod.cpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c3fcb0ed3fb..6110fca45b6 100755 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -133,6 +133,7 @@ register_migraphx_ops( exp flatten floor + fmod gather gathernd get_tuple_elem diff --git a/src/include/migraphx/op/fmod.hpp b/src/include/migraphx/op/fmod.hpp new file mode 100644 index 00000000000..c1938afca4d --- /dev/null +++ b/src/include/migraphx/op/fmod.hpp @@ -0,0 +1,73 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef MIGRAPHX_GUARD_OPERATORS_FMOD_HPP +#define MIGRAPHX_GUARD_OPERATORS_FMOD_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace migraphx { +inline namespace MIGRAPHX_INLINE_NS { +namespace op { + +struct fmod : binary +{ + bool fmod_flag; + + template + static auto reflect(Self& self, F f) + { + return pack(f(self.fmod_flag, "fmod_flag")); + } + + value attributes() const + { + auto a = base_attributes(); + a["fmod_flag"] = fmod_flag; + return a; + } + + std::string point_function() const { return "fmod(${0}, ${1})"; } + auto apply() const + { + return [&](auto x, auto y) { return std::fmod(x, y); }; + } + + fmod(bool fmod = true) : fmod_flag{fmod} {} +}; + +} // namespace op +} // namespace MIGRAPHX_INLINE_NS +} // namespace migraphx + +#endif diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index d2a8e1d51ee..9973dba8e4b 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -#ifndef MIGRAPHX_GUARD_OPERATORS_MUL_HPP -#define MIGRAPHX_GUARD_OPERATORS_MUL_HPP +#ifndef MIGRAPHX_GUARD_OPERATORS_MOD_HPP +#define MIGRAPHX_GUARD_OPERATORS_MOD_HPP #include #include @@ -40,30 +40,6 @@ namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { namespace op { -template -T mod_op(T x, T y) -{ - return (x % y); -} - -template <> -float mod_op(float x, float y) -{ - return std::fmod(x, y); -} - -template <> -double mod_op(double x, double y) -{ - return std::fmod(x, y); -} - -template <> -half_float::half mod_op(half_float::half x, half_float::half y) -{ - return half_float::fmod(x, y); -} - struct mod : binary { bool fmod_flag; @@ -81,41 +57,10 @@ struct mod : binary return a; } - shape compute_shape(std::vector inputs) const - { - check_shapes{inputs, (*this)}.has(2).same_type().same_dims(); - auto s0 = inputs.at(0); - auto s1 = inputs.at(1); - - if((s0.type() == shape::float_type || s0.type() == shape::double_type || - s0.type() == shape::half_type) && - (fmod_flag == false)) - { - MIGRAPHX_THROW("fmod must be true for floating data types"); - } - - if(s0 == s1 and s0.packed()) - { - return s0; - } - else if(s0.packed() != s1.packed()) - { - return s0.packed() ? s0 : s1; - } - else if(s0.broadcasted() != s1.broadcasted()) - { - return s0.broadcasted() ? s1.with_lens(s0.lens()) : s0.with_lens(s0.lens()); - } - else - { - return {s0.type(), s0.lens()}; - } - } - std::string point_function() const { return "mod"; } auto apply() const { - return [&](auto x, auto y) { return mod_op(x, y); }; + return [&](auto x, auto y) { return std::fmod((std::fmod(x, y) + y), y); }; } mod(bool fmod = false) : fmod_flag{fmod} {} diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp new file mode 100644 index 00000000000..38fcef17629 --- /dev/null +++ b/src/onnx/parse_mod.cpp @@ -0,0 +1,62 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include +#include +#include + +namespace migraphx { +inline namespace MIGRAPHX_INLINE_NS { +namespace onnx { + +struct parse_mod : op_parser +{ + std::vector operators() const { return {{"Mod"}}; } + + instruction_ref parse(const op_desc& /*opd*/, + const onnx_parser& parser, + onnx_parser::node_info info, + std::vector args) const + { + int fmod_flag = 0; + + if(contains(info.attributes, "fmod")) + { + fmod_flag = parser.parse_value(info.attributes.at("fmod")).at(); + } + + if(fmod_flag == 1) + { + return info.add_common_op("fmod", args[0], args[1]); + } + else + { + return info.add_common_op("mod", args[0], args[1]); + } + } +}; + +} // namespace onnx +} // namespace MIGRAPHX_INLINE_NS +} // namespace migraphx diff --git a/test/ref_ops_test.cpp b/test/ref_ops_test.cpp index 9971e60dbea..e4ed3ee53da 100644 --- a/test/ref_ops_test.cpp +++ b/test/ref_ops_test.cpp @@ -3030,21 +3030,58 @@ TEST_CASE(min_test) EXPECT(migraphx::verify_range(results_vector, gold)); } -TEST_CASE(mod_test) +TEST_CASE(fmod_test) { migraphx::program p; auto* mm = p.get_main_module(); migraphx::shape s{migraphx::shape::int32_type, {3}}; - auto l0 = mm->add_literal(migraphx::literal{s, {-7, 8, 3}}); + auto l0 = mm->add_literal(migraphx::literal{s, {-7, 8, -3}}); auto l1 = mm->add_literal(migraphx::literal{s, {2, 4, 6}}); auto l2 = mm->add_literal(migraphx::literal{s, {7, 5, 9}}); + auto curr_mod = mm->add_instruction(migraphx::make_op("fmod"), l0, l1); + mm->add_instruction(migraphx::make_op("fmod"), curr_mod, l2); + p.compile(migraphx::ref::target{}); + auto result = p.eval({}).back(); + std::vector results_vector(4); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + std::vector gold{-1, 0, -3}; + EXPECT(migraphx::verify_range(results_vector, gold)); +} + +TEST_CASE(fmod_floatingPoint_test) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::float_type, {3}}; + auto l0 = mm->add_literal(migraphx::literal{s, {-7.2f, 8.5f, -3.3f}}); + auto l1 = mm->add_literal(migraphx::literal{s, {2.0f, 4.0f, 6.0f}}); + auto l2 = mm->add_literal(migraphx::literal{s, {7.0f, 5.0f, 9.0f}}); + auto curr_mod = mm->add_instruction(migraphx::make_op("fmod"), l0, l1); + mm->add_instruction(migraphx::make_op("fmod"), curr_mod, l2); + + p.compile(migraphx::ref::target{}); + auto result = p.eval({}).back(); + std::vector results_vector(4); + result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); + std::vector gold{-1.2f, 0.5f, -3.3f}; + EXPECT(migraphx::verify_range(results_vector, gold)); +} + +TEST_CASE(mod_test) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::int32_type, {3}}; + auto l0 = mm->add_literal(migraphx::literal{s, {-3, 8, -7}}); + auto l1 = mm->add_literal(migraphx::literal{s, {3, 3, 3}}); + auto l2 = mm->add_literal(migraphx::literal{s, {10, 2, 9}}); auto curr_mod = mm->add_instruction(migraphx::make_op("mod"), l0, l1); mm->add_instruction(migraphx::make_op("mod"), curr_mod, l2); p.compile(migraphx::ref::target{}); auto result = p.eval({}).back(); std::vector results_vector(4); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); - std::vector gold{-1, 0, 3}; + std::vector gold{0, 0, 2}; EXPECT(migraphx::verify_range(results_vector, gold)); } @@ -3053,17 +3090,17 @@ TEST_CASE(mod_floatingPoint_test) migraphx::program p; auto* mm = p.get_main_module(); migraphx::shape s{migraphx::shape::float_type, {3}}; - auto l0 = mm->add_literal(migraphx::literal{s, {7.2f, 8.5f, 3.3f}}); - auto l1 = mm->add_literal(migraphx::literal{s, {2.0f, 4.0f, 6.0f}}); - auto l2 = mm->add_literal(migraphx::literal{s, {7.0f, 5.0f, 9.0f}}); - auto curr_mod = mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), l0, l1); - mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), curr_mod, l2); + auto l0 = mm->add_literal(migraphx::literal{s, {-3.0f, 8.5f, -7.0f}}); + auto l1 = mm->add_literal(migraphx::literal{s, {2.0f, 3.0f, 3.0f}}); + auto l2 = mm->add_literal(migraphx::literal{s, {3.0f, 3.0f, 4.0f}}); + auto curr_mod = mm->add_instruction(migraphx::make_op("mod"), l0, l1); + mm->add_instruction(migraphx::make_op("mod"), curr_mod, l2); p.compile(migraphx::ref::target{}); auto result = p.eval({}).back(); std::vector results_vector(4); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.2f, 0.5f, 3.3f}; + std::vector gold{1.0f, 2.5f, 2.0f}; EXPECT(migraphx::verify_range(results_vector, gold)); } diff --git a/test/verify/test_fmod.cpp b/test/verify/test_fmod.cpp new file mode 100644 index 00000000000..a822055159f --- /dev/null +++ b/test/verify/test_fmod.cpp @@ -0,0 +1,42 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "verify_program.hpp" +#include +#include +#include + +struct test_fmod : verify_program +{ + migraphx::program create_program() const + { + migraphx::program p; + auto* mm = p.get_main_module(); + migraphx::shape s{migraphx::shape::float_type, {3}}; + auto x = mm->add_parameter("x", s); + auto y = mm->add_parameter("y", s); + mm->add_instruction(migraphx::make_op("fmod"), x, y); + return p; + } +}; diff --git a/test/verify/test_mod.cpp b/test/verify/test_mod.cpp index 392ccc4af8f..1a2a0c9301b 100644 --- a/test/verify/test_mod.cpp +++ b/test/verify/test_mod.cpp @@ -36,7 +36,7 @@ struct test_mod : verify_program migraphx::shape s{migraphx::shape::float_type, {3}}; auto x = mm->add_parameter("x", s); auto y = mm->add_parameter("y", s); - mm->add_instruction(migraphx::make_op("mod", {{"fmod_flag", true}}), x, y); + mm->add_instruction(migraphx::make_op("mod"), x, y); return p; } }; From 8288e882b0a814394692435f181ff1bb7bf9373d Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Wed, 13 Jul 2022 13:31:37 +0000 Subject: [PATCH 03/20] fixup! Split mod operation into fmod & mod equivalents --- src/include/migraphx/op/fmod.hpp | 21 ++------------------- src/include/migraphx/op/mod.hpp | 19 +------------------ src/include/migraphx/operators.hpp | 2 ++ src/onnx/parse_mod.cpp | 19 ++++++++----------- src/targets/gpu/jit/pointwise.cpp | 2 ++ 5 files changed, 15 insertions(+), 48 deletions(-) diff --git a/src/include/migraphx/op/fmod.hpp b/src/include/migraphx/op/fmod.hpp index c1938afca4d..aeba0a9e79a 100644 --- a/src/include/migraphx/op/fmod.hpp +++ b/src/include/migraphx/op/fmod.hpp @@ -42,28 +42,11 @@ namespace op { struct fmod : binary { - bool fmod_flag; - - template - static auto reflect(Self& self, F f) - { - return pack(f(self.fmod_flag, "fmod_flag")); - } - - value attributes() const - { - auto a = base_attributes(); - a["fmod_flag"] = fmod_flag; - return a; - } - - std::string point_function() const { return "fmod(${0}, ${1})"; } + std::string point_function() const { return "fmod"; } auto apply() const { - return [&](auto x, auto y) { return std::fmod(x, y); }; + return [](auto x, auto y) { return std::fmod(x, y); }; } - - fmod(bool fmod = true) : fmod_flag{fmod} {} }; } // namespace op diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index 9973dba8e4b..9f3c3224227 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -42,28 +42,11 @@ namespace op { struct mod : binary { - bool fmod_flag; - - template - static auto reflect(Self& self, F f) - { - return pack(f(self.fmod_flag, "fmod_flag")); - } - - value attributes() const - { - auto a = base_attributes(); - a["fmod_flag"] = fmod_flag; - return a; - } - std::string point_function() const { return "mod"; } auto apply() const { - return [&](auto x, auto y) { return std::fmod((std::fmod(x, y) + y), y); }; + return [](auto x, auto y) { return std::fmod((std::fmod(x, y) + y), y); }; } - - mod(bool fmod = false) : fmod_flag{fmod} {} }; } // namespace op diff --git a/src/include/migraphx/operators.hpp b/src/include/migraphx/operators.hpp index 5b913aec276..fc4102c2927 100644 --- a/src/include/migraphx/operators.hpp +++ b/src/include/migraphx/operators.hpp @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -79,6 +80,7 @@ #include #include #include +#include #include #include #include diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 38fcef17629..74480f1ff31 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -39,21 +39,18 @@ struct parse_mod : op_parser onnx_parser::node_info info, std::vector args) const { - int fmod_flag = 0; + if(args.size() < 2) + MIGRAPHX_THROW("mod operators should have 2 operands"); + std::string mod = "mod"; if(contains(info.attributes, "fmod")) { - fmod_flag = parser.parse_value(info.attributes.at("fmod")).at(); - } - - if(fmod_flag == 1) - { - return info.add_common_op("fmod", args[0], args[1]); - } - else - { - return info.add_common_op("mod", args[0], args[1]); + if(parser.parse_value(info.attributes.at("fmod")).at() == 1) + { + mod = "fmod"; + } } + return info.add_common_op(mod, args[0], args[1]); } }; diff --git a/src/targets/gpu/jit/pointwise.cpp b/src/targets/gpu/jit/pointwise.cpp index 04cf8745d78..d91ae9c4a95 100644 --- a/src/targets/gpu/jit/pointwise.cpp +++ b/src/targets/gpu/jit/pointwise.cpp @@ -138,6 +138,8 @@ struct pointwise_compiler : compiler g.add_point_op("less", "migraphx::abs(${0} < ${1})"); g.add_point_op("greater", "migraphx::abs(${0} > ${1})"); g.add_point_op("not", "migraphx::abs(not ${0})"); + g.add_point_op("mod", "migraphx::mod(${0}, ${1})"); + g.add_point_op("fmod", "migraphx::fmod(${0}, ${1})"); // Add explict conversions g.fresult([](const shape& s) { return "migraphx::convert<" + shape::cpp_type(s.type()) + ">"; From 23a82ba513b45233a6aaa10be652126debd67257 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Wed, 13 Jul 2022 13:34:49 +0000 Subject: [PATCH 04/20] Added Test related changes for mod operator Generate files for modulus operator from onnx. Used to verify parsing. --- test/onnx/gen_onnx.py | 27 ++++++++++++++++++++ test/onnx/mod_test.onnx | 16 ++++++++++++ test/onnx/mod_test_fmod.onnx | 17 +++++++++++++ test/onnx/verify_onnx.cpp | 49 ++++++++++++++++++++++++++++++++++++ test/py/onnx_backend_test.py | 2 ++ 5 files changed, 111 insertions(+) create mode 100644 test/onnx/mod_test.onnx create mode 100644 test/onnx/mod_test_fmod.onnx diff --git a/test/onnx/gen_onnx.py b/test/onnx/gen_onnx.py index 796adaeda09..326a013d537 100755 --- a/test/onnx/gen_onnx.py +++ b/test/onnx/gen_onnx.py @@ -3231,6 +3231,33 @@ def min_test(): return ([node], [a, b, c], [y]) +@onnx_test +def mod_test(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2]) + + node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2']) + + return ([node], [a, b], [y]) + + +@onnx_test +def mod_test_fmod(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2]) + + node = onnx.helper.make_node( + 'Mod', + inputs=['0', '1'], + outputs=['2'], + fmod=1 #fmod flag = 1 + ) + + return ([node], [a, b], [y]) + + @onnx_test def multinomial_test(): sample_size = 10 diff --git a/test/onnx/mod_test.onnx b/test/onnx/mod_test.onnx new file mode 100644 index 00000000000..14e5ea92979 --- /dev/null +++ b/test/onnx/mod_test.onnx @@ -0,0 +1,16 @@ +mod_test:M + +0 +12"Modmod_testZ +0 + + +Z +1 + + +b +2 + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_fmod.onnx b/test/onnx/mod_test_fmod.onnx new file mode 100644 index 00000000000..fbfbebdd42c --- /dev/null +++ b/test/onnx/mod_test_fmod.onnx @@ -0,0 +1,17 @@ + mod_test_fmod:_ + +0 +12"Mod* +fmod  mod_test_fmodZ +0 + + +Z +1 + + +b +2 + + +B \ No newline at end of file diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index 10529e0d41a..43cbaf54362 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -631,6 +631,55 @@ TEST_CASE(mean_integral_test) EXPECT(migraphx::verify_range(result_vector, gold)); } +TEST_CASE(mod_test) +{ + migraphx::program p = migraphx::parse_onnx("mod_test.onnx"); + p.compile(migraphx::ref::target{}); + + migraphx::shape s{migraphx::shape::float_type, {2, 2}}; + + std::vector data = { + 3.0, 2.0, -3.0, 2.0, 9.0, 5.0, -9.0, 5.0, 0.0, 10.0, -0.0, 5.0, 6.0, 9.0}; + + migraphx::parameter_map p_map; + p_map["x"] = migraphx::argument(s, data.data()); + + auto result = p.eval(p_map).back(); + std::vector result_vector; + result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); + + std::vector gold{1.0, 1.0, 4.0, 4.0, 0.0, 0.0, 6.0}; + EXPECT(migraphx::verify_range(result_vector, gold)); +} + +TEST_CASE(mod_test_fmod) +{ + migraphx::program p = migraphx::parse_onnx("mod_test_fmod.onnx"); + p.compile(migraphx::ref::target{}); + + migraphx::shape s{migraphx::shape::float_type, {2, 2}}; + + std::vector data = { + 3.0, 2.0, -3.0, 2.0, 9.0, 5.0, -9.0, 5.0, 0.0, 10.0, -0.0, 5.0, 6.0, 9.0}; + + migraphx::parameter_map p_map; + p_map["x"] = migraphx::argument(s, data.data()); + + auto result = p.eval(p_map).back(); + std::vector result_vector; + result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); + + for(auto i : result_vector) + { + std::cout << i; + std::cout << " "; + } + std::cout << std::endl; + + std::vector gold{1.0, -1.0, 4.0, -4.0, 0.0, 0.0, 6.0}; + EXPECT(migraphx::verify_range(result_vector, gold)); +} + TEST_CASE(nonzero_test) { migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx"); diff --git a/test/py/onnx_backend_test.py b/test/py/onnx_backend_test.py index 08f8c99ef4b..24663e66cbc 100755 --- a/test/py/onnx_backend_test.py +++ b/test/py/onnx_backend_test.py @@ -162,6 +162,8 @@ def create_backend_test(testname=None, target_device=None): backend_test.include(r'.*test_MaxPool[1-9]d.*') backend_test.include(r'.*test_mean.*') backend_test.include(r'.*test_min.*') + backend_test.include(r'.*test_mod.*') + backend_test.include(r'.*test_fmod.*') backend_test.include(r'.*test_mul.*') backend_test.include(r'.*test_multinomial.*') backend_test.include(r'.*test_Multinomial.*') From 062a29daebe32692d832d5db22c9f22eab846095 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Thu, 14 Jul 2022 17:11:02 +0000 Subject: [PATCH 05/20] Add additional tests for mod operator parsing Verifies that we should get fmod or mod based on the fmod flag found in the parse in onnx operator. Added additional tests for data type, and half type. The half datatype currently uses std::fmod (defined in half.hpp) and incase things change down the road we'll have some sort of test coverage should std::fmod is not used anymore instead. --- test/onnx/gen_onnx.py | 69 ++++++++++++++-- test/onnx/mod_test.onnx | 31 ++++---- test/onnx/mod_test_different_dtypes.onnx | 19 +++++ test/onnx/mod_test_fmod.onnx | 31 ++++---- test/onnx/mod_test_fmod_different_dtypes.onnx | 20 +++++ test/onnx/mod_test_fmod_half.onnx | 23 ++++++ test/onnx/mod_test_half.onnx | 22 ++++++ test/onnx/onnx_test.cpp | 78 +++++++++++++++++++ 8 files changed, 259 insertions(+), 34 deletions(-) create mode 100644 test/onnx/mod_test_different_dtypes.onnx create mode 100644 test/onnx/mod_test_fmod_different_dtypes.onnx create mode 100644 test/onnx/mod_test_fmod_half.onnx create mode 100644 test/onnx/mod_test_half.onnx diff --git a/test/onnx/gen_onnx.py b/test/onnx/gen_onnx.py index 326a013d537..010f01bce63 100755 --- a/test/onnx/gen_onnx.py +++ b/test/onnx/gen_onnx.py @@ -2993,6 +2993,7 @@ def matmul_bmbm_test(): @onnx_test def matmul_bmv_test(): + m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 6]) @@ -3233,20 +3234,76 @@ def min_test(): @onnx_test def mod_test(): - a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2]) - b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2]) - y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2]) + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) + + node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2']) + + return ([node], [a, b], [y]) + + +@onnx_test +def mod_test_half(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3]) node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2']) return ([node], [a, b], [y]) +@onnx_test +def mod_test_different_dtypes(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) + + node = onnx.helper.make_node( + 'Mod', + inputs=['0', '1'], + outputs=['2'], + ) + + return ([node], [a, b], [y]) + + @onnx_test def mod_test_fmod(): - a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2]) - b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2]) - y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2]) + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) + + node = onnx.helper.make_node( + 'Mod', + inputs=['0', '1'], + outputs=['2'], + fmod=1 #fmod flag = 1 + ) + + return ([node], [a, b], [y]) + + +@onnx_test +def mod_test_fmod_half(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [3, 3, 3]) + + node = onnx.helper.make_node('Mod', + inputs=['0', '1'], + outputs=['2'], + fmod=1) + + return ([node], [a, b], [y]) + + +@onnx_test +def mod_test_fmod_different_dtypes(): + a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) node = onnx.helper.make_node( 'Mod', diff --git a/test/onnx/mod_test.onnx b/test/onnx/mod_test.onnx index 14e5ea92979..f813b91ea19 100644 --- a/test/onnx/mod_test.onnx +++ b/test/onnx/mod_test.onnx @@ -1,16 +1,19 @@ -mod_test:M +mod_test:e  0 -12"Modmod_testZ -0 - - -Z -1 - - -b -2 - - -B \ No newline at end of file +12"Modmod_testZ +0 + + + +Z +1 + + + +b +2 + + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_different_dtypes.onnx b/test/onnx/mod_test_different_dtypes.onnx new file mode 100644 index 00000000000..2aa6e6db0b3 --- /dev/null +++ b/test/onnx/mod_test_different_dtypes.onnx @@ -0,0 +1,19 @@ +mod_test_different_dtypes:v + +0 +12"Modmod_test_different_dtypesZ +0 + + + +Z +1 + + + +b +2 + + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_fmod.onnx b/test/onnx/mod_test_fmod.onnx index fbfbebdd42c..6313ffad7e3 100644 --- a/test/onnx/mod_test_fmod.onnx +++ b/test/onnx/mod_test_fmod.onnx @@ -1,17 +1,20 @@ - mod_test_fmod:_ + mod_test_fmod:w  0 12"Mod* -fmod  mod_test_fmodZ -0 - - -Z -1 - - -b -2 - - -B \ No newline at end of file +fmod  mod_test_fmodZ +0 + + + +Z +1 + + + +b +2 + + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_fmod_different_dtypes.onnx b/test/onnx/mod_test_fmod_different_dtypes.onnx new file mode 100644 index 00000000000..76a13649a15 --- /dev/null +++ b/test/onnx/mod_test_fmod_different_dtypes.onnx @@ -0,0 +1,20 @@ +mod_test_fmod_different_dtypes:ˆ + +0 +12"Mod* +fmod mod_test_fmod_different_dtypesZ +0 + + + +Z +1 + + + +b +2 + + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_fmod_half.onnx b/test/onnx/mod_test_fmod_half.onnx new file mode 100644 index 00000000000..30ef5c2f88f --- /dev/null +++ b/test/onnx/mod_test_fmod_half.onnx @@ -0,0 +1,23 @@ +mod_test_fmod_half:| + +0 +12"Mod* +fmod mod_test_fmod_halfZ +0 + + + + +Z +1 + + + + +b +2 + + + + +B \ No newline at end of file diff --git a/test/onnx/mod_test_half.onnx b/test/onnx/mod_test_half.onnx new file mode 100644 index 00000000000..68ca0657b37 --- /dev/null +++ b/test/onnx/mod_test_half.onnx @@ -0,0 +1,22 @@ + mod_test_half:j + +0 +12"Mod mod_test_halfZ +0 + + + + +Z +1 + + + + +b +2 + + + + +B \ No newline at end of file diff --git a/test/onnx/onnx_test.cpp b/test/onnx/onnx_test.cpp index 07d26af0a00..8fda6da941f 100644 --- a/test/onnx/onnx_test.cpp +++ b/test/onnx/onnx_test.cpp @@ -2950,6 +2950,84 @@ TEST_CASE(min_test) optimize_onnx("min_test.onnx"); } +TEST_CASE(mod_test) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + mm->add_instruction(migraphx::make_op("mod"), input0, input1); + + auto prog = optimize_onnx("mod_test.onnx"); + + EXPECT(p == prog); +} + +TEST_CASE(mod_test_half) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); + mm->add_instruction(migraphx::make_op("mod"), input0, input1); + + auto prog = optimize_onnx("mod_test_half.onnx"); + + EXPECT(p == prog); +} + +TEST_CASE(mod_test_different_dtypes) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}}); + add_common_op(*mm, migraphx::make_op("mod"), {input0, input1}); + + auto prog = optimize_onnx("mod_test_different_dtypes.onnx"); + + EXPECT(p == prog); +} + +TEST_CASE(mod_test_fmod) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + mm->add_instruction(migraphx::make_op("fmod"), input0, input1); + + auto prog = optimize_onnx("mod_test_fmod.onnx"); + + EXPECT(p == prog); +} + +TEST_CASE(mod_test_fmod_half) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); + mm->add_instruction(migraphx::make_op("fmod"), input0, input1); + + auto prog = optimize_onnx("mod_test_fmod_half.onnx"); + + EXPECT(p == prog); +} + +TEST_CASE(mod_test_fmod_different_dtypes) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}}); + add_common_op(*mm, migraphx::make_op("fmod"), {input0, input1}); + + auto prog = optimize_onnx("mod_test_fmod_different_dtypes.onnx"); + + EXPECT(p == prog); +} + TEST_CASE(multinomial_test) { migraphx::program p; From a3d267cac77cd75190b82915a0cfb28832a78965 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Thu, 14 Jul 2022 17:12:54 +0000 Subject: [PATCH 06/20] Add false commutative attribute for fmod/mod operators Put this in so that it's clear in migraphx that this binary operation is not commutative. Taking a look at other binary ops like min/max, mul, etc this is defined as true. My intention here is to be explicit if this is used for any checks and passes --- src/include/migraphx/op/fmod.hpp | 6 ++++++ src/include/migraphx/op/mod.hpp | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/src/include/migraphx/op/fmod.hpp b/src/include/migraphx/op/fmod.hpp index aeba0a9e79a..17069107472 100644 --- a/src/include/migraphx/op/fmod.hpp +++ b/src/include/migraphx/op/fmod.hpp @@ -42,6 +42,12 @@ namespace op { struct fmod : binary { + value attributes() const + { + auto a = base_attributes(); + a["commutative"] = false; + return a; + } std::string point_function() const { return "fmod"; } auto apply() const { diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index 9f3c3224227..98d72204b48 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -42,6 +42,12 @@ namespace op { struct mod : binary { + value attributes() const + { + auto a = base_attributes(); + a["commutative"] = false; + return a; + } std::string point_function() const { return "mod"; } auto apply() const { From 0bf1090ce47df0651fcc4b1e16c20d23bd12fa5a Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Thu, 14 Jul 2022 22:15:17 +0000 Subject: [PATCH 07/20] Fix mod_test in verify_onnx Needed to recalculate this and make sure what I'm getting is valid on output --- test/onnx/verify_onnx.cpp | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index 43cbaf54362..8095de57e61 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -657,26 +657,27 @@ TEST_CASE(mod_test_fmod) migraphx::program p = migraphx::parse_onnx("mod_test_fmod.onnx"); p.compile(migraphx::ref::target{}); - migraphx::shape s{migraphx::shape::float_type, {2, 2}}; + migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}}; - std::vector data = { - 3.0, 2.0, -3.0, 2.0, 9.0, 5.0, -9.0, 5.0, 0.0, 10.0, -0.0, 5.0, 6.0, 9.0}; + std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + + std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; migraphx::parameter_map p_map; - p_map["x"] = migraphx::argument(s, data.data()); + p_map["0"] = migraphx::argument(s, a.data()); + p_map["1"] = migraphx::argument(s, b.data()); auto result = p.eval(p_map).back(); std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - for(auto i : result_vector) - { - std::cout << i; - std::cout << " "; - } - std::cout << std::endl; + std::vector gold{1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 1.0, 3.0, 5.0, + 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.0, 1.0, 3.0}; - std::vector gold{1.0, -1.0, 4.0, -4.0, 0.0, 0.0, 6.0}; EXPECT(migraphx::verify_range(result_vector, gold)); } From b242b7da557912890ebc49f610ec253043d62a7d Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 15 Jul 2022 00:38:25 +0000 Subject: [PATCH 08/20] Add test case in verify_onnx for mod_test and fix migraphx::mod Fixed mod operator in migraphx to handle negative values correctly. Since onnx produces mod similar to the python equivalent of mod() this test was used to verify that negatives are treated as positive results, rather than a remainder. --- src/include/migraphx/op/mod.hpp | 2 +- test/onnx/verify_onnx.cpp | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index 98d72204b48..67011fb4928 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -51,7 +51,7 @@ struct mod : binary std::string point_function() const { return "mod"; } auto apply() const { - return [](auto x, auto y) { return std::fmod((std::fmod(x, y) + y), y); }; + return [](auto x, auto y) { return std::fmod((std::abs(std::fmod(x, y)) + y), y); }; } }; diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index 8095de57e61..e644ae4baa1 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -636,19 +636,26 @@ TEST_CASE(mod_test) migraphx::program p = migraphx::parse_onnx("mod_test.onnx"); p.compile(migraphx::ref::target{}); - migraphx::shape s{migraphx::shape::float_type, {2, 2}}; + migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}}; - std::vector data = { - 3.0, 2.0, -3.0, 2.0, 9.0, 5.0, -9.0, 5.0, 0.0, 10.0, -0.0, 5.0, 6.0, 9.0}; + std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + + std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; migraphx::parameter_map p_map; - p_map["x"] = migraphx::argument(s, data.data()); + p_map["0"] = migraphx::argument(s, a.data()); + p_map["1"] = migraphx::argument(s, b.data()); auto result = p.eval(p_map).back(); std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.0, 1.0, 4.0, 4.0, 0.0, 0.0, 6.0}; + std::vector gold{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 1.0, 3.0, 5.0, + 7.0, 9.0, 1.0, 4.0, 7.0, 3.0, 1.0, 1.0, 3.0}; EXPECT(migraphx::verify_range(result_vector, gold)); } From ca630d4d3f71a582e773fd6a3825c46d1f244135 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 15 Jul 2022 01:13:44 +0000 Subject: [PATCH 09/20] Add additional test in verify_onnx for different datatypes using parsed in mod protobuf Verify the result fomr the protobuf is correct when using mixed datatypes. The goal here is to verify we cast up correctly from int32 and our results are sane. --- test/onnx/verify_onnx.cpp | 60 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index e644ae4baa1..e9c401ed78f 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -659,6 +659,36 @@ TEST_CASE(mod_test) EXPECT(migraphx::verify_range(result_vector, gold)); } +TEST_CASE(mod_test_different_types) +{ + migraphx::program p = migraphx::parse_onnx("mod_test_different_dtypes.onnx"); + p.compile(migraphx::ref::target{}); + + migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}}; + migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}}; + + std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + + std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; + + migraphx::parameter_map p_map; + p_map["0"] = migraphx::argument(s_float, a.data()); + p_map["1"] = migraphx::argument(s_int, b.data()); + + auto result = p.eval(p_map).back(); + std::vector result_vector; + result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); + + std::vector gold{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 1.0, 3.0, 5.0, + 7.0, 9.0, 1.0, 4.0, 7.0, 3.0, 1.0, 1.0, 3.0}; + + EXPECT(migraphx::verify_range(result_vector, gold)); +} + TEST_CASE(mod_test_fmod) { migraphx::program p = migraphx::parse_onnx("mod_test_fmod.onnx"); @@ -688,6 +718,36 @@ TEST_CASE(mod_test_fmod) EXPECT(migraphx::verify_range(result_vector, gold)); } +TEST_CASE(mod_test_fmod_different_types) +{ + migraphx::program p = migraphx::parse_onnx("mod_test_fmod_different_dtypes.onnx"); + p.compile(migraphx::ref::target{}); + + migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}}; + migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}}; + + std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + + std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; + + migraphx::parameter_map p_map; + p_map["0"] = migraphx::argument(s_float, a.data()); + p_map["1"] = migraphx::argument(s_int, b.data()); + + auto result = p.eval(p_map).back(); + std::vector result_vector; + result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); + + std::vector gold{1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, + 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 1.0, 3.0, 5.0, + 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.0, 1.0, 3.0}; + + EXPECT(migraphx::verify_range(result_vector, gold)); +} + TEST_CASE(nonzero_test) { migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx"); From 541958414e88ec2ba4a3b50d9bf78f24c0fa6d91 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 15 Jul 2022 12:33:29 +0000 Subject: [PATCH 10/20] fixup! Add additional test in verify_onnx for different datatypes using parsed in mod protobuf --- src/include/migraphx/op/mod.hpp | 2 +- test/onnx/verify_onnx.cpp | 34 +++++++++++++++++---------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index 67011fb4928..a4d6e9c27d2 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -51,7 +51,7 @@ struct mod : binary std::string point_function() const { return "mod"; } auto apply() const { - return [](auto x, auto y) { return std::fmod((std::abs(std::fmod(x, y)) + y), y); }; + return [](auto x, auto y) { return std::fmod((std::remainder(x, y)) + y, y); }; } }; diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index e9c401ed78f..34cfb588ae2 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -638,12 +638,13 @@ TEST_CASE(mod_test) migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}}; - std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + std::vector a = {-4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, + 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, + -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0}; - std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, - 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; + std::vector b = {2.0, -3.0, 8.0, -2.0, 3.0, 5.0, 2.0, -3.0, 8.0, + -2.0, 3.0, 5.0, 2.0, -3.0, 8.0, -2.0, 3.0, 5.0, + 2.0, -3.0, 8.0, -2.0, 3.0, 5.0, 2.0, -3.0, 8.0}; migraphx::parameter_map p_map; p_map["0"] = migraphx::argument(s, a.data()); @@ -653,9 +654,10 @@ TEST_CASE(mod_test) std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 1.0, 3.0, 5.0, - 7.0, 9.0, 1.0, 4.0, 7.0, 3.0, 1.0, 1.0, 3.0}; + std::vector gold = {0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, + 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, + 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0}; + EXPECT(migraphx::verify_range(result_vector, gold)); } @@ -667,12 +669,12 @@ TEST_CASE(mod_test_different_types) migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}}; migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}}; - std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + std::vector a = {-4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, + 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, + -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0}; - std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, - 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; + std::vector b = {2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, + 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8}; migraphx::parameter_map p_map; p_map["0"] = migraphx::argument(s_float, a.data()); @@ -682,9 +684,9 @@ TEST_CASE(mod_test_different_types) std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 1.0, 3.0, 5.0, - 7.0, 9.0, 1.0, 4.0, 7.0, 3.0, 1.0, 1.0, 3.0}; + std::vector gold = {0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, + 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, + 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0}; EXPECT(migraphx::verify_range(result_vector, gold)); } From 7864e9e739c04b550cec72145ed8de01f006727e Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Sat, 16 Jul 2022 00:55:22 +0000 Subject: [PATCH 11/20] Remove gpu test for now without GPU implimentation --- test/py/onnx_backend_test.py | 2 -- test/verify/test_fmod.cpp | 42 ------------------------------------ test/verify/test_mod.cpp | 42 ------------------------------------ 3 files changed, 86 deletions(-) delete mode 100644 test/verify/test_fmod.cpp delete mode 100644 test/verify/test_mod.cpp diff --git a/test/py/onnx_backend_test.py b/test/py/onnx_backend_test.py index 24663e66cbc..08f8c99ef4b 100755 --- a/test/py/onnx_backend_test.py +++ b/test/py/onnx_backend_test.py @@ -162,8 +162,6 @@ def create_backend_test(testname=None, target_device=None): backend_test.include(r'.*test_MaxPool[1-9]d.*') backend_test.include(r'.*test_mean.*') backend_test.include(r'.*test_min.*') - backend_test.include(r'.*test_mod.*') - backend_test.include(r'.*test_fmod.*') backend_test.include(r'.*test_mul.*') backend_test.include(r'.*test_multinomial.*') backend_test.include(r'.*test_Multinomial.*') diff --git a/test/verify/test_fmod.cpp b/test/verify/test_fmod.cpp deleted file mode 100644 index a822055159f..00000000000 --- a/test/verify/test_fmod.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * The MIT License (MIT) - * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "verify_program.hpp" -#include -#include -#include - -struct test_fmod : verify_program -{ - migraphx::program create_program() const - { - migraphx::program p; - auto* mm = p.get_main_module(); - migraphx::shape s{migraphx::shape::float_type, {3}}; - auto x = mm->add_parameter("x", s); - auto y = mm->add_parameter("y", s); - mm->add_instruction(migraphx::make_op("fmod"), x, y); - return p; - } -}; diff --git a/test/verify/test_mod.cpp b/test/verify/test_mod.cpp deleted file mode 100644 index 1a2a0c9301b..00000000000 --- a/test/verify/test_mod.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * The MIT License (MIT) - * - * Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "verify_program.hpp" -#include -#include -#include - -struct test_mod : verify_program -{ - migraphx::program create_program() const - { - migraphx::program p; - auto* mm = p.get_main_module(); - migraphx::shape s{migraphx::shape::float_type, {3}}; - auto x = mm->add_parameter("x", s); - auto y = mm->add_parameter("y", s); - mm->add_instruction(migraphx::make_op("mod"), x, y); - return p; - } -}; From 1fa6802e892c9cb888cb894256cc95a4deeec21c Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Mon, 18 Jul 2022 19:25:20 +0000 Subject: [PATCH 12/20] fixup! Added Test related changes for mod operator --- src/onnx/parse_mod.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 74480f1ff31..6eaaec459fa 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -39,7 +39,7 @@ struct parse_mod : op_parser onnx_parser::node_info info, std::vector args) const { - if(args.size() < 2) + if(args.size() != 2) MIGRAPHX_THROW("mod operators should have 2 operands"); std::string mod = "mod"; From 95e58d6bd4fa4b5fd8eeb74d94f3180578e3a9d0 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Mon, 18 Jul 2022 21:44:58 +0000 Subject: [PATCH 13/20] Add name parameter to fmod and mod and remove extra check for dims Remove extra check since binary handles input checking in check_shapes --- src/include/migraphx/op/fmod.hpp | 1 + src/include/migraphx/op/mod.hpp | 1 + src/onnx/parse_mod.cpp | 3 --- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/include/migraphx/op/fmod.hpp b/src/include/migraphx/op/fmod.hpp index 17069107472..cb8440ebebe 100644 --- a/src/include/migraphx/op/fmod.hpp +++ b/src/include/migraphx/op/fmod.hpp @@ -42,6 +42,7 @@ namespace op { struct fmod : binary { + std::string name() const { return "fmod"; } value attributes() const { auto a = base_attributes(); diff --git a/src/include/migraphx/op/mod.hpp b/src/include/migraphx/op/mod.hpp index a4d6e9c27d2..0802658905e 100644 --- a/src/include/migraphx/op/mod.hpp +++ b/src/include/migraphx/op/mod.hpp @@ -42,6 +42,7 @@ namespace op { struct mod : binary { + std::string name() const { return "mod"; } value attributes() const { auto a = base_attributes(); diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 6eaaec459fa..53c1e50f2aa 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -39,9 +39,6 @@ struct parse_mod : op_parser onnx_parser::node_info info, std::vector args) const { - if(args.size() != 2) - MIGRAPHX_THROW("mod operators should have 2 operands"); - std::string mod = "mod"; if(contains(info.attributes, "fmod")) { From 140ca438af3a19a7f9625d474117642115346b7d Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 15:52:59 +0000 Subject: [PATCH 14/20] Add extra check to not parse float inputs without fmod=1 Need this to be handle as float input with fmod=0 is actually invalid with the Onnx operator and should be flagged as an error on parse. --- .../include/migraphx/onnx/onnx_parser.hpp | 2 ++ src/onnx/onnx_parser.cpp | 20 +++++++++++++++++++ src/onnx/parse_mod.cpp | 8 ++++++++ 3 files changed, 30 insertions(+) diff --git a/src/onnx/include/migraphx/onnx/onnx_parser.hpp b/src/onnx/include/migraphx/onnx/onnx_parser.hpp index d8340db16e8..04b05e7c2e9 100644 --- a/src/onnx/include/migraphx/onnx/onnx_parser.hpp +++ b/src/onnx/include/migraphx/onnx/onnx_parser.hpp @@ -118,6 +118,8 @@ struct onnx_parser }; shape::type_t get_type(int dtype); +bool is_type_float(int dtype); +bool is_type_float(shape::type_t dtype); } // namespace onnx } // namespace MIGRAPHX_INLINE_NS diff --git a/src/onnx/onnx_parser.cpp b/src/onnx/onnx_parser.cpp index 4ed93675575..324c1bf8ada 100644 --- a/src/onnx/onnx_parser.cpp +++ b/src/onnx/onnx_parser.cpp @@ -487,6 +487,26 @@ shape::type_t get_type(int dtype) } } +bool is_type_float(int dtype) +{ + bool r = false; + if(dtype == 1 || dtype == 10 || dtype == 11) + { + r = true; + } + return r; +} + +bool is_type_float(shape::type_t d_type) +{ + bool r = false; + if(dtype == shape::float_type || dtype == shape::double_type || dtype == shape::half_type) + { + r = true; + } + return r; +} + } // namespace onnx } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 53c1e50f2aa..016c23bb14d 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -40,6 +40,14 @@ struct parse_mod : op_parser std::vector args) const { std::string mod = "mod"; + if(parser.is_type_float(arg[0]) || parser.is_type_float(arg[1])) + { + if(!contains(info.attributes, "fmod")) + { + MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); + } + } + if(contains(info.attributes, "fmod")) { if(parser.parse_value(info.attributes.at("fmod")).at() == 1) From f867ae63b89f76bec3d721f5ca519ce98fc50e4f Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 16:47:16 +0000 Subject: [PATCH 15/20] Require float inputs to use fmod=1 Modified test cases and generated tests to reflect and validate this. --- src/onnx/onnx_parser.cpp | 2 +- src/onnx/parse_mod.cpp | 4 +-- test/onnx/gen_onnx.py | 10 +++--- test/onnx/mod_test.onnx | 6 ++-- test/onnx/mod_test_different_dtypes.onnx | 4 +-- test/onnx/onnx_test.cpp | 26 ++++++++-------- test/onnx/verify_onnx.cpp | 39 +++++++++++------------- 7 files changed, 44 insertions(+), 47 deletions(-) diff --git a/src/onnx/onnx_parser.cpp b/src/onnx/onnx_parser.cpp index 324c1bf8ada..1f9d7971bf0 100644 --- a/src/onnx/onnx_parser.cpp +++ b/src/onnx/onnx_parser.cpp @@ -497,7 +497,7 @@ bool is_type_float(int dtype) return r; } -bool is_type_float(shape::type_t d_type) +bool is_type_float(shape::type_t dtype) { bool r = false; if(dtype == shape::float_type || dtype == shape::double_type || dtype == shape::half_type) diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 016c23bb14d..52a2d9f4fb6 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -40,9 +40,9 @@ struct parse_mod : op_parser std::vector args) const { std::string mod = "mod"; - if(parser.is_type_float(arg[0]) || parser.is_type_float(arg[1])) + if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type())) { - if(!contains(info.attributes, "fmod")) + if(contains(info.attributes, "fmod") == false) { MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); } diff --git a/test/onnx/gen_onnx.py b/test/onnx/gen_onnx.py index 010f01bce63..00d89138cbe 100755 --- a/test/onnx/gen_onnx.py +++ b/test/onnx/gen_onnx.py @@ -3234,9 +3234,9 @@ def min_test(): @onnx_test def mod_test(): - a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) - b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 3, 3]) - y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) + a = helper.make_tensor_value_info('0', TensorProto.INT32, [3, 3, 3]) + b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3]) node = onnx.helper.make_node('Mod', inputs=['0', '1'], outputs=['2']) @@ -3256,9 +3256,9 @@ def mod_test_half(): @onnx_test def mod_test_different_dtypes(): - a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 3, 3]) + a = helper.make_tensor_value_info('0', TensorProto.INT16, [3, 3, 3]) b = helper.make_tensor_value_info('1', TensorProto.INT32, [3, 3, 3]) - y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 3, 3]) + y = helper.make_tensor_value_info('2', TensorProto.INT32, [3, 3, 3]) node = onnx.helper.make_node( 'Mod', diff --git a/test/onnx/mod_test.onnx b/test/onnx/mod_test.onnx index f813b91ea19..54b5b7530b3 100644 --- a/test/onnx/mod_test.onnx +++ b/test/onnx/mod_test.onnx @@ -3,17 +3,17 @@ 0 12"Modmod_testZ 0 - +   Z 1 - +   b 2 - +   B \ No newline at end of file diff --git a/test/onnx/mod_test_different_dtypes.onnx b/test/onnx/mod_test_different_dtypes.onnx index 2aa6e6db0b3..9985ee90a67 100644 --- a/test/onnx/mod_test_different_dtypes.onnx +++ b/test/onnx/mod_test_different_dtypes.onnx @@ -3,7 +3,7 @@ 0 12"Modmod_test_different_dtypesZ 0 - +   Z @@ -13,7 +13,7 @@  b 2 - +   B \ No newline at end of file diff --git a/test/onnx/onnx_test.cpp b/test/onnx/onnx_test.cpp index 8fda6da941f..0024d429a26 100644 --- a/test/onnx/onnx_test.cpp +++ b/test/onnx/onnx_test.cpp @@ -2954,8 +2954,8 @@ TEST_CASE(mod_test) { migraphx::program p; auto* mm = p.get_main_module(); - auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); - auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}}); + auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}}); mm->add_instruction(migraphx::make_op("mod"), input0, input1); auto prog = optimize_onnx("mod_test.onnx"); @@ -2965,22 +2965,24 @@ TEST_CASE(mod_test) TEST_CASE(mod_test_half) { - migraphx::program p; - auto* mm = p.get_main_module(); - auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); - auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::half_type, {3, 3, 3}}); - mm->add_instruction(migraphx::make_op("mod"), input0, input1); - - auto prog = optimize_onnx("mod_test_half.onnx"); - - EXPECT(p == prog); + bool result = false; + try + { + auto prog = optimize_onnx("mod_test_half.onnx"); + } + catch(std::exception& e) + { + (void)e; + result = true; + } + EXPECT(result); } TEST_CASE(mod_test_different_dtypes) { migraphx::program p; auto* mm = p.get_main_module(); - auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 3, 3}}); + auto input0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::int16_type, {3, 3, 3}}); auto input1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int32_type, {3, 3, 3}}); add_common_op(*mm, migraphx::make_op("mod"), {input0, input1}); diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index 34cfb588ae2..12d0dc59ecd 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -636,27 +636,24 @@ TEST_CASE(mod_test) migraphx::program p = migraphx::parse_onnx("mod_test.onnx"); p.compile(migraphx::ref::target{}); - migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}}; + migraphx::shape s{migraphx::shape::int32_type, {3, 3, 3}}; - std::vector a = {-4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, - 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, - -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0}; + std::vector a = {-4, 7, 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, + 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, 5}; - std::vector b = {2.0, -3.0, 8.0, -2.0, 3.0, 5.0, 2.0, -3.0, 8.0, - -2.0, 3.0, 5.0, 2.0, -3.0, 8.0, -2.0, 3.0, 5.0, - 2.0, -3.0, 8.0, -2.0, 3.0, 5.0, 2.0, -3.0, 8.0}; + std::vector b = {2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, + 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8}; migraphx::parameter_map p_map; p_map["0"] = migraphx::argument(s, a.data()); p_map["1"] = migraphx::argument(s, b.data()); auto result = p.eval(p_map).back(); - std::vector result_vector; + std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold = {0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, - 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, - 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0}; + std::vector gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, + 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5}; EXPECT(migraphx::verify_range(result_vector, gold)); } @@ -666,27 +663,25 @@ TEST_CASE(mod_test_different_types) migraphx::program p = migraphx::parse_onnx("mod_test_different_dtypes.onnx"); p.compile(migraphx::ref::target{}); - migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}}; - migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}}; + migraphx::shape s_int16{migraphx::shape::int16_type, {3, 3, 3}}; + migraphx::shape s_int32{migraphx::shape::int32_type, {3, 3, 3}}; - std::vector a = {-4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, - 4.0, -7.0, 8.0, -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, - -4.0, 7.0, 5.0, 4.0, -7.0, 8.0, -4.0, 7.0, 5.0}; + std::vector a = {-4, 7, 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, + 5, 4, -7, 8, -4, 7, 5, 4, -7, 8, -4, 7, 5}; std::vector b = {2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8, -2, 3, 5, 2, -3, 8}; migraphx::parameter_map p_map; - p_map["0"] = migraphx::argument(s_float, a.data()); - p_map["1"] = migraphx::argument(s_int, b.data()); + p_map["0"] = migraphx::argument(s_int16, a.data()); + p_map["1"] = migraphx::argument(s_int32, b.data()); auto result = p.eval(p_map).back(); - std::vector result_vector; + std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold = {0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, - 0.0, 2.0, 3.0, 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, - 0.0, -2.0, 5.0, 0.0, 2.0, 3.0, 0.0, -2.0, 5.0}; + std::vector gold = {0, -2, 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, + 5, 0, 2, 3, 0, -2, 5, 0, 2, 3, 0, -2, 5}; EXPECT(migraphx::verify_range(result_vector, gold)); } From 7a9e143dbff4294d62315b5444ec053b12654021 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 17:26:43 +0000 Subject: [PATCH 16/20] Fix issue with clang-tidy --- src/onnx/parse_mod.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/onnx/parse_mod.cpp b/src/onnx/parse_mod.cpp index 52a2d9f4fb6..1f21ff4a0bd 100644 --- a/src/onnx/parse_mod.cpp +++ b/src/onnx/parse_mod.cpp @@ -42,7 +42,7 @@ struct parse_mod : op_parser std::string mod = "mod"; if(is_type_float(args[0]->get_shape().type()) || is_type_float(args[1]->get_shape().type())) { - if(contains(info.attributes, "fmod") == false) + if(!contains(info.attributes, "fmod")) { MIGRAPHX_THROW("Mod operator with float args and fmod=0 invalid"); } From 484cd4fb3a5ab95250e51fcd331a88dc0fc826d1 Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 18:21:30 +0000 Subject: [PATCH 17/20] Add more fractional values for floating point mod tests --- test/onnx/verify_onnx.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/onnx/verify_onnx.cpp b/test/onnx/verify_onnx.cpp index 12d0dc59ecd..bbbb031f2e2 100644 --- a/test/onnx/verify_onnx.cpp +++ b/test/onnx/verify_onnx.cpp @@ -693,9 +693,9 @@ TEST_CASE(mod_test_fmod) migraphx::shape s{migraphx::shape::float_type, {3, 3, 3}}; - std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + std::vector a = {1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9, + 10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 16.6, 17.9, 18.2, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.2, 26.3, 27.1}; std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; @@ -708,9 +708,9 @@ TEST_CASE(mod_test_fmod) std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 1.0, 3.0, 5.0, - 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.0, 1.0, 3.0}; + std::vector gold{1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9, + 10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2, + 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1}; EXPECT(migraphx::verify_range(result_vector, gold)); } @@ -723,9 +723,9 @@ TEST_CASE(mod_test_fmod_different_types) migraphx::shape s_float{migraphx::shape::float_type, {3, 3, 3}}; migraphx::shape s_int{migraphx::shape::int32_type, {3, 3, 3}}; - std::vector a = {1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.0, 26.0, 27.0}; + std::vector a = {1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9, + 10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 16.6, 17.9, 18.2, + 19.0, 20.0, 21.0, -22.0, 23.0, -24.0, 25.2, 26.3, 27.1}; std::vector b = {30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4}; @@ -738,9 +738,9 @@ TEST_CASE(mod_test_fmod_different_types) std::vector result_vector; result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); - std::vector gold{1.0, -2.0, 3.0, 4.0, -5.0, 6.0, 7.0, -8.0, 9.0, - 10.0, 11.0, 12.0, 13.0, -14.0, 15.0, 1.0, 3.0, 5.0, - 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.0, 1.0, 3.0}; + std::vector gold{1.2, -2.2, 3.3, 4.1, -5.4, 6.7, 7.8, -8.4, 9.9, + 10.7, 11.2, 12.3, 13.9, -14.2, 15.8, 1.6, 3.9, 5.2, + 7.0, 9.0, 1.0, -4.0, 7.0, -3.0, 1.2, 1.3, 3.1}; EXPECT(migraphx::verify_range(result_vector, gold)); } From a659a508066ac73b1733cc8bc352661f458fd9da Mon Sep 17 00:00:00 2001 From: Ted Themistokleous <107195283+TedThemistokleous@users.noreply.github.com> Date: Fri, 22 Jul 2022 15:03:49 -0400 Subject: [PATCH 18/20] Update test/onnx/onnx_test.cpp Co-authored-by: Umang Yadav <29876643+umangyadav@users.noreply.github.com> --- test/onnx/onnx_test.cpp | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/test/onnx/onnx_test.cpp b/test/onnx/onnx_test.cpp index 6fb6a4b7522..d787684d7d9 100644 --- a/test/onnx/onnx_test.cpp +++ b/test/onnx/onnx_test.cpp @@ -2969,17 +2969,7 @@ TEST_CASE(mod_test) TEST_CASE(mod_test_half) { - bool result = false; - try - { - auto prog = optimize_onnx("mod_test_half.onnx"); - } - catch(std::exception& e) - { - (void)e; - result = true; - } - EXPECT(result); + EXPECT(test::throws([&] { migraphx::parse_onnx("mod_test_half.onnx"); })); } TEST_CASE(mod_test_different_dtypes) From 583c5b467eeac87e1a0b9ebcbf08207c1a0b0a8b Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 19:10:18 +0000 Subject: [PATCH 19/20] Cleanup and PR comments --- src/onnx/onnx_parser.cpp | 10 ---------- test/onnx/gen_onnx.py | 1 - test/ref_ops_test.cpp | 2 +- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/src/onnx/onnx_parser.cpp b/src/onnx/onnx_parser.cpp index 397a07cf637..c37b39f432a 100644 --- a/src/onnx/onnx_parser.cpp +++ b/src/onnx/onnx_parser.cpp @@ -514,16 +514,6 @@ shape::type_t get_type(int dtype) } } -bool is_type_float(int dtype) -{ - bool r = false; - if(dtype == 1 || dtype == 10 || dtype == 11) - { - r = true; - } - return r; -} - bool is_type_float(shape::type_t dtype) { bool r = false; diff --git a/test/onnx/gen_onnx.py b/test/onnx/gen_onnx.py index 5d55c526253..fba4ce7d0c3 100755 --- a/test/onnx/gen_onnx.py +++ b/test/onnx/gen_onnx.py @@ -2993,7 +2993,6 @@ def matmul_bmbm_test(): @onnx_test def matmul_bmv_test(): - m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7]) m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7]) y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 6]) diff --git a/test/ref_ops_test.cpp b/test/ref_ops_test.cpp index e4ed3ee53da..97e133e20eb 100644 --- a/test/ref_ops_test.cpp +++ b/test/ref_ops_test.cpp @@ -3085,7 +3085,7 @@ TEST_CASE(mod_test) EXPECT(migraphx::verify_range(results_vector, gold)); } -TEST_CASE(mod_floatingPoint_test) +TEST_CASE(mod_float_test) { migraphx::program p; auto* mm = p.get_main_module(); From 5db9d457e197f1f22f15b3e306820d2625cb206d Mon Sep 17 00:00:00 2001 From: Ted Themistokleous Date: Fri, 22 Jul 2022 19:26:06 +0000 Subject: [PATCH 20/20] cleanup --- src/onnx/include/migraphx/onnx/onnx_parser.hpp | 1 - test/ref_ops_test.cpp | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/onnx/include/migraphx/onnx/onnx_parser.hpp b/src/onnx/include/migraphx/onnx/onnx_parser.hpp index 72c7d329d02..878cce2b286 100644 --- a/src/onnx/include/migraphx/onnx/onnx_parser.hpp +++ b/src/onnx/include/migraphx/onnx/onnx_parser.hpp @@ -119,7 +119,6 @@ struct onnx_parser }; shape::type_t get_type(int dtype); -bool is_type_float(int dtype); bool is_type_float(shape::type_t dtype); } // namespace onnx diff --git a/test/ref_ops_test.cpp b/test/ref_ops_test.cpp index 97e133e20eb..677cb4a3ab5 100644 --- a/test/ref_ops_test.cpp +++ b/test/ref_ops_test.cpp @@ -3048,7 +3048,7 @@ TEST_CASE(fmod_test) EXPECT(migraphx::verify_range(results_vector, gold)); } -TEST_CASE(fmod_floatingPoint_test) +TEST_CASE(fmod_float_test) { migraphx::program p; auto* mm = p.get_main_module();