Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update clip for opset 11 #1661

Merged
merged 4 commits into from
Aug 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions onnxruntime/core/providers/cpu/cpu_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
namespace onnxruntime {

// Forward declarations of op kernels
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, Clip);
class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, 10, Clip);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, Elu);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, HardSigmoid);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, LeakyRelu);
Expand Down Expand Up @@ -298,9 +298,12 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain,
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 10, double, RoiAlign);
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 10, ReverseSequence);

// opset 11
class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Clip);

void RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
static const BuildKernelCreateInfoFn function_table[] = {
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, Clip)>,
BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, 10, Clip)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, Elu)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, HardSigmoid)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 6, LeakyRelu)>,
Expand Down Expand Up @@ -579,6 +582,9 @@ void RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 10, float, RoiAlign)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 10, double, RoiAlign)>,
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 10, ReverseSequence)>,

//opset 11
BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Clip)>,
};

for (auto& function_table_entry : function_table) {
Expand Down
9 changes: 8 additions & 1 deletion onnxruntime/core/providers/cpu/math/clip.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,16 @@

namespace onnxruntime {

ONNX_CPU_OPERATOR_KERNEL(
ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
Clip,
6,
10,
KernelDefBuilder().MayInplace(0, 0).TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
Clip_6<float>);

ONNX_CPU_OPERATOR_KERNEL(
Clip,
11,
KernelDefBuilder().MayInplace(0, 0).TypeConstraint("T", DataTypeImpl::GetTensorType<float>()),
Clip<float>);

Expand Down
36 changes: 34 additions & 2 deletions onnxruntime/core/providers/cpu/math/clip.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
namespace onnxruntime {

template <typename T>
class Clip final : public OpKernel {
class Clip_6 final : public OpKernel {
public:
Clip(const OpKernelInfo& info) : OpKernel(info) {
Clip_6(const OpKernelInfo& info) : OpKernel(info) {
ORT_ENFORCE(info.GetAttr<T>("max", &max_).IsOK());
ORT_ENFORCE(info.GetAttr<T>("min", &min_).IsOK());
}
Expand All @@ -32,4 +32,36 @@ class Clip final : public OpKernel {
T min_;
};

template <typename T>
class Clip final : public OpKernel {
public:
Clip(const OpKernelInfo& info) : OpKernel(info) {
}

Status Compute(OpKernelContext* ctx) const override {
const auto* X = ctx->Input<Tensor>(0);
const auto* min = ctx->Input<Tensor>(1);
const auto* max = ctx->Input<Tensor>(2);
Tensor* Y = ctx->Output(0, X->Shape());

auto min_val = -std::numeric_limits<T>::infinity();
auto max_val = std::numeric_limits<T>::infinity();
if (min) {
ORT_ENFORCE(min->Shape().NumDimensions() == 0, "min should be a scalar.");
min_val = *(min->template Data<T>());
Copy link
Contributor

@skottmckay skottmckay Aug 21, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The ORT implementation generally allows both rank 0, and rank 1 with shape {1} to be considered a scalar. e.g. see anywhere using IsScalarOr1ElementVector.

Given that, could/should this use the same approach?

Maybe the ONNX spec should be updated to allow this approach. @gramalingam what are your thoughts?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I did have a few discussion with Rama concerning a few other similar ops... The conclusion was to follow the ONNX spec as closely as possible... In ORT we did allow both in the past and now we cant simply change this check because it will hurt backward compatibility however for new ops we should do strict checks...

@gramalingam : Comments?

}
if (max) {
ORT_ENFORCE(max->Shape().NumDimensions() == 0, "max should be a scalar.");
max_val = *(max->template Data<T>());
}

EigenVectorMap<T>(Y->template MutableData<T>(), Y->Shape().Size()) =
ConstEigenVectorMap<T>(X->template Data<T>(), X->Shape().Size())
.cwiseMax(min_val)
.cwiseMin(max_val);

return Status::OK();
}
};

} // namespace onnxruntime
14 changes: 7 additions & 7 deletions onnxruntime/test/onnx/main.cc
Original file line number Diff line number Diff line change
Expand Up @@ -404,19 +404,19 @@ int real_main(int argc, char* argv[], Ort::Env& env) {
{"cumsum_1d_reverse_exclusive", "not implemented yet"},
{"cumsum_1d_reverse", "not implemented yet"},
{"cumsum_1d_exclusive", "not implemented yet"},
{"cumsum_1d", "not implemented yet"},
{"clip_splitbounds", "not implemented yet for opset 11"},
{"clip_outbounds", "not implemented yet for opset 11"},
{"clip_example", "not implemented yet for opset 11"},
{"clip_default_min", "not implemented yet for opset 11"},
{"clip_default_max", "not implemented yet for opset 11"},
{"clip", "not implemented yet for opset 11"},
{"cumsum_1d", "not implemented yet"},
};

#ifdef USE_NGRAPH
broken_tests.insert({"dequantizelinear", "ambiguity in scalar dimensions [] vs [1]", {"onnx150"}});
broken_tests.insert({"qlinearconv", "ambiguity in scalar dimensions [] vs [1]"});
broken_tests.insert({"quantizelinear", "ambiguity in scalar dimensions [] vs [1]", {"onnx150"}});
broken_tests.insert({"clip_splitbounds", "not implemented yet for opset 11"});
broken_tests.insert({"clip_outbounds", "not implemented yet for opset 11"});
broken_tests.insert({"clip_example", "not implemented yet for opset 11"});
broken_tests.insert({"clip_default_min", "not implemented yet for opset 11"});
broken_tests.insert({"clip_default_max", "not implemented yet for opset 11"});
broken_tests.insert({"clip", "not implemented yet for opset 11"});
#endif

#ifdef USE_MKLDNN
Expand Down
40 changes: 38 additions & 2 deletions onnxruntime/test/providers/cpu/math/clip_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
namespace onnxruntime {
namespace test {

TEST(MathOpTest, Clip) {
OpTester test("Clip");
TEST(MathOpTest, Clip_6) {
OpTester test("Clip", 6);

test.AddAttribute("min", -10.0f);
test.AddAttribute("max", 10.0f);
Expand All @@ -25,5 +25,41 @@ TEST(MathOpTest, Clip) {
test.Run();
}

TEST(MathOpTest, Clip_Default) {
OpTester test("Clip", 11);

std::vector<int64_t> dims{3, 3};
test.AddInput<float>("X", dims,
{11.0f, 4.4f, 432.3f,
-1.3f, 3.5f, 64.0f,
-5.4f, 9.3f, 82.4f});
test.AddOutput<float>("Y", dims,
{11.0f, 4.4f, 432.3f,
-1.3f, 3.5f, 64.0f,
-5.4f, 9.3f, 82.4f});

// nGraph does not support Clip opset 11 yet.
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kNGraphExecutionProvider});
}

TEST(MathOpTest, Clip) {
OpTester test("Clip", 11);

std::vector<int64_t> dims{3, 3};
test.AddInput<float>("X", dims,
{-1.0f, 0.0f, 1.0f,
-6.0f, 0.0f, 6.0f,
-5.4f, 2.0f, 6.0f});
test.AddInput<float>("min", {}, {-5});
test.AddInput<float>("max", {}, {5});
test.AddOutput<float>("Y", dims,
{-1.0f, 0.0f, 1.0f,
-5.0f, 0.0f, 5.0f,
-5.0f, 2.0f, 5.0f});

// nGraph does not support Clip opset 11 yet.
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kNGraphExecutionProvider});
}

} // namespace test
} // namespace onnxruntime
3 changes: 2 additions & 1 deletion onnxruntime/test/python/onnx_backend_test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@ def create_backend_test(testname=None):
'^test_dynamicquantizelinear_max_adjusted_expanded*',
'^test_dynamicquantizelinear_min_adjusted*',
'^test_dynamicquantizelinear_min_adjusted_expanded*',
'^test_clip*',
'^test_depthtospace*',
'^test_gather_elements*',
'^test_scatter_elements*',
Expand All @@ -127,6 +126,8 @@ def create_backend_test(testname=None):
# Example of how to disable tests for a specific provider.
# if c2.supports_device('NGRAPH'):
# current_failing_tests = current_failing_tests + ('|^test_operator_repeat_dim_overflow_cpu.*',)
if c2.supports_device('NGRAPH'):
current_failing_tests = current_failing_tests + ('|^test_clip*',)

filters = current_failing_tests + \
tests_with_pre_opset7_dependencies_filters() + \
Expand Down