Skip to content

Commit

Permalink
* impl - FFI for np_where_op (apache#17817)
Browse files Browse the repository at this point in the history
* impl - FFI for np_may_share_memory

* impl - FFI benchmark

Co-authored-by: Ubuntu <[email protected]>
  • Loading branch information
2 people authored and MoisesHer committed Apr 10, 2020
1 parent ce6e5de commit 6ac9c76
Show file tree
Hide file tree
Showing 5 changed files with 161 additions and 12 deletions.
2 changes: 2 additions & 0 deletions benchmark/python/ffi/benchmark_ffi.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ def prepare_workloads():
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])


def benchmark_helper(f, *args, **kwargs):
Expand Down
15 changes: 3 additions & 12 deletions python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -6855,7 +6855,7 @@ def shares_memory(a, b, max_work=None):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
return _api_internal.share_memory(a, b).item()


@set_module('mxnet.ndarray.numpy')
Expand Down Expand Up @@ -6896,7 +6896,7 @@ def may_share_memory(a, b, max_work=None):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
return _api_internal.share_memory(a, b).item()


@set_module('mxnet.ndarray.numpy')
Expand Down Expand Up @@ -7482,16 +7482,7 @@ def where(condition, x=None, y=None): # pylint: disable=too-many-return-stateme
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
return _api_internal.where(condition, x, y)


@set_module('mxnet.ndarray.numpy')
Expand Down
43 changes: 43 additions & 0 deletions src/api/operator/numpy/np_memory_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* \file np_memory_op.cc
* \brief Implementation of the API of functions in src/operator/numpy/np_memory_op.cc
*/
#include <mxnet/api_registry.h>
#include <mxnet/runtime/packed_func.h>
#include "../utils.h"

namespace mxnet {

MXNET_REGISTER_API("_npi.share_memory")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
const nnvm::Op* op = Op::Get("_npi_share_memory");
nnvm::NodeAttrs attrs;
attrs.op = op;
int num_inputs = 2;
int num_outputs = 0;
NDArray* inputs[] = {args[0].operator mxnet::NDArray*(), args[1].operator mxnet::NDArray*()};
auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, nullptr);
*ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
});

} // namespace mxnet
101 changes: 101 additions & 0 deletions src/api/operator/numpy/np_where_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* \file np_where_op.cc
* \brief Implementation of the API of functions in src/operator/numpy/np_where_op.cc
*/
#include <mxnet/api_registry.h>
#include <mxnet/runtime/packed_func.h>
#include "../utils.h"
#include "../../../operator/numpy/np_where_op-inl.h"

namespace mxnet {

inline static bool isScalar(const runtime::MXNetArgValue& arg) {
return arg.type_code() == kDLInt ||
arg.type_code() == kDLUInt ||
arg.type_code() == kDLFloat;
}

inline static void _npi_where(runtime::MXNetArgs args,
runtime::MXNetRetValue* ret) {
using namespace runtime;
const nnvm::Op* op = Op::Get("_npi_where");
nnvm::NodeAttrs attrs;
attrs.op = op;
int num_inputs = 3;
int num_outputs = 0;
NDArray* inputs[] = {args[0].operator mxnet::NDArray*(),
args[1].operator mxnet::NDArray*(),
args[2].operator mxnet::NDArray*()};
auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, nullptr);
*ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
}

inline static void _npi_where_scalar1(runtime::MXNetArgs args,
runtime::MXNetRetValue* ret,
bool isl) {
using namespace runtime;
nnvm::NodeAttrs attrs;
const nnvm::Op* op = isl ? Op::Get("_npi_where_lscalar") : Op::Get("_npi_where_rscalar");
op::NumpyWhereScalarParam param;
param.scalar = isl ? args[1].operator double() : args[2].operator double();
attrs.op = op;
attrs.parsed = param;
SetAttrDict<op::NumpyWhereScalarParam>(&attrs);
int num_inputs = 2;
int num_outputs = 0;
NDArray* inputs[] =
{args[0].operator mxnet::NDArray*(),
isl ? args[2].operator mxnet::NDArray*() : args[1].operator mxnet::NDArray*()};
auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, nullptr);
*ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
}

inline static void _npi_where_scalar2(runtime::MXNetArgs args,
runtime::MXNetRetValue* ret) {
using namespace runtime;
const nnvm::Op* op = Op::Get("_npi_where_scalar2");
op::NumpyWhereScalar2Param param;
nnvm::NodeAttrs attrs;
param.x = args[1].operator double();
param.x = args[2].operator double();
attrs.op = op;
attrs.parsed = param;
SetAttrDict<op::NumpyWhereScalar2Param>(&attrs);
int num_inputs = 1;
int num_outputs = 0;
NDArray* inputs[] = {args[0].operator mxnet::NDArray*()};
auto ndoutputs = Invoke(op, &attrs, num_inputs, inputs, &num_outputs, nullptr);
*ret = reinterpret_cast<mxnet::NDArray*>(ndoutputs[0]);
}

MXNET_REGISTER_API("_npi.where")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
if (isScalar(args[1]) && isScalar(args[2])) {
_npi_where_scalar2(args, ret);
} else if (!isScalar(args[1]) && !isScalar(args[2])) {
_npi_where(args, ret);
} else {
_npi_where_scalar1(args, ret, isScalar(args[1]));
}
});

} // namespace mxnet
12 changes: 12 additions & 0 deletions src/operator/numpy/np_where_op-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ struct NumpyWhereScalarParam : public dmlc::Parameter<NumpyWhereScalarParam> {
.set_default(0.0)
.describe("The scalar value of x/y.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream scalar_s;
scalar_s << scalar;
(*dict)["scalar"] = scalar_s.str();
}
};

struct NumpyWhereScalar2Param : public dmlc::Parameter<NumpyWhereScalar2Param> {
Expand All @@ -61,6 +66,13 @@ struct NumpyWhereScalar2Param : public dmlc::Parameter<NumpyWhereScalar2Param> {
.set_default(0.0)
.describe("The scalar value of y.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream x_s, y_s;
x_s << x;
y_s << y;
(*dict)["x"] = x_s.str();
(*dict)["y"] = y_s.str();
}
};

template<int ndim>
Expand Down

0 comments on commit 6ac9c76

Please sign in to comment.