Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[v1.7.x] Backport #17177 to 1.7.x (Fix incorrect calculation results when the C locale is set to a locale that uses commas as the decimal separator) #18147

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cpp-package/include/mxnet-cpp/optimizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#ifndef MXNET_CPP_OPTIMIZER_H_
#define MXNET_CPP_OPTIMIZER_H_

#include <dmlc/strtonum.h>
#include <map>
#include <vector>
#include <string>
Expand Down Expand Up @@ -84,7 +85,7 @@ class Optimizer {
Optimizer *SetLRScheduler(std::unique_ptr<LRScheduler> lrScheduler) {
CHECK(lrScheduler);
lrScheduler_ = std::move(lrScheduler);
lrScheduler_->SetLR(std::stof(params_["lr"]));
lrScheduler_->SetLR(dmlc::stof(params_["lr"]));
return this;
}
/*!
Expand Down
25 changes: 13 additions & 12 deletions cpp-package/include/mxnet-cpp/optimizer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#ifndef MXNET_CPP_OPTIMIZER_HPP_
#define MXNET_CPP_OPTIMIZER_HPP_

#include <dmlc/strtonum.h>
#include <algorithm>
#include <utility>
#include <numeric>
Expand Down Expand Up @@ -116,11 +117,11 @@ inline float Optimizer::GetLR_(int index) {
if (nullptr != lrScheduler_) {
return lrScheduler_->GetLR(num_update_);
}
return std::stof(params_["lr"]);
return dmlc::stof(params_["lr"]);
}

inline float Optimizer::GetWD_(int index) {
float wd = std::stof(params_["wd"]);
float wd = dmlc::stof(params_["wd"]);
return wd;
}

Expand Down Expand Up @@ -362,9 +363,9 @@ inline void AdamOptimizer::Update(int index, NDArray weight, NDArray grad) {
auto values = GetParamValues_();
CHECK_EQ(keys.size(), values.size());

float lr = std::stof(params_["lr"]);
float b1 = std::stof(params_["beta1"]);
float b2 = std::stof(params_["beta2"]);
float lr = dmlc::stof(params_["lr"]);
float b1 = dmlc::stof(params_["beta1"]);
float b2 = dmlc::stof(params_["beta2"]);
float t = count_[index];
float coef1 = 1.0f - std::pow(b1, t);
float coef2 = 1.0f - std::pow(b2, t);
Expand Down Expand Up @@ -407,15 +408,15 @@ inline void AdaGradOptimizer::Update(int index, NDArray weight, NDArray grad) {
CreateState_(index, weight);
}

float eps = std::stof(params_["eps"]);
float eps = dmlc::stof(params_["eps"]);
float lr = GetLR_(index);
float wd = GetWD_(index);
UpdateCount_(index);
if (params_.count("rescale_grad") > 0) {
grad *= std::stof(params_["rescale_grad"]);
grad *= dmlc::stof(params_["rescale_grad"]);
}
if (params_.count("clip_gradient") > 0) {
_clip(grad, std::stof(params_["clip_gradient"]));
_clip(grad, dmlc::stof(params_["clip_gradient"]));
}
auto& history = *history_[index];
history += grad * grad;
Expand Down Expand Up @@ -448,16 +449,16 @@ inline void AdaDeltaOptimizer::Update(int index, NDArray weight, NDArray grad) {
CreateState_(index, weight);
}

float rho = std::stof(params_["rho"]);
float epsilon = std::stof(params_["epsilon"]);
float rho = dmlc::stof(params_["rho"]);
float epsilon = dmlc::stof(params_["epsilon"]);
float wd = GetWD_(index);
UpdateCount_(index);

if (params_.count("rescale_grad") > 0) {
grad *= std::stof(params_["rescale_grad"]);
grad *= dmlc::stof(params_["rescale_grad"]);
}
if (params_.count("clip_gradient") > 0) {
_clip(grad, std::stof(params_["clip_gradient"]));
_clip(grad, dmlc::stof(params_["clip_gradient"]));
}

auto& acc_g = *acc_g_[index];
Expand Down
2 changes: 1 addition & 1 deletion docs/static_site/src/pages/api/faq/new_op.md
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ Simple arguments can be parsed like
NNVM_REGISTER_OP(scalar_op)
.set_attr_parser(
[](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
```

Expand Down
3 changes: 2 additions & 1 deletion plugin/torch/torch_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "./torch_base.h"
#include <mxnet/base.h>
#include <mxnet/ndarray.h>
#include <dmlc/strtonum.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
Expand Down Expand Up @@ -69,7 +70,7 @@ void TorchRunOp(std::vector<NDArray> arr_in,
lua_pushinteger(L, std::stoi(val));
break;
case 'f':
lua_pushnumber(L, std::stof(val));
lua_pushnumber(L, dmlc::stof(val));
break;
case 's':
lua_pushstring(L, val.c_str());
Expand Down
3 changes: 2 additions & 1 deletion src/nnvm/legacy_op_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
* \brief Utility to adapt OpProperty to the new NNVM registery
*/
#include <dmlc/base.h>
#include <dmlc/strtonum.h>
#include <mxnet/base.h>
#include <mxnet/operator.h>
#include <mxnet/op_attr_types.h>
Expand Down Expand Up @@ -511,7 +512,7 @@ void RegisterLegacyNDFunc() {
const std::string& name = reg->arguments[i+reg->num_use_vars].name;
auto s = dict.find(name);
CHECK(s != dict.end()) << "Missing scalar param " << name;
scalars.push_back(std::stof(s->second));
scalars.push_back(dmlc::stof(s->second));
dict.erase(s);
}

Expand Down
3 changes: 2 additions & 1 deletion src/operator/contrib/gradient_multiplier_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
* \brief
* \author Istvan Fehervari
*/
#include <dmlc/strtonum.h>
#include "../tensor/elemwise_unary_op.h"
#include "../tensor/elemwise_binary_scalar_op.h"

Expand Down Expand Up @@ -77,7 +78,7 @@ multiplies the gradient from the subsequent level by a scalar factor lambda and
the preceding layer.
)code" ADD_FILELINE)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<FInferStorageType>("FInferStorageType", ElemwiseStorageType<1, 1, false, true, true>)
.set_attr<FCompute>("FCompute<cpu>", UnaryOp::IdentityCompute<cpu>)
Expand Down
3 changes: 2 additions & 1 deletion src/operator/numpy/np_boolean_mask_assign.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
* \brief CPU implementation of Boolean Mask Assign
*/

#include <dmlc/strtonum.h>
#include "../../common/utils.h"
#include "../contrib/boolean_mask-inl.h"

Expand Down Expand Up @@ -272,7 +273,7 @@ void NumpyBooleanAssignForwardCPU(const nnvm::NodeAttrs& attrs,
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignCPUKernel<true>, cpu>::Launch(
s, valid_num, data.dptr<DType>(), prefix_sum.data(), prefix_sum.size(),
leading, middle, trailing, static_cast<DType>(std::stod(attrs.dict.at("value"))));
leading, middle, trailing, static_cast<DType>(dmlc::stod(attrs.dict.at("value"))));
});
}
}
Expand Down
3 changes: 2 additions & 1 deletion src/operator/numpy/np_boolean_mask_assign.cu
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
*/

#include <cub/cub.cuh>
#include <dmlc/strtonum.h>
#include "../../common/utils.h"
#include "../contrib/boolean_mask-inl.h"

Expand Down Expand Up @@ -252,7 +253,7 @@ void NumpyBooleanAssignForwardGPU(const nnvm::NodeAttrs& attrs,
}
} else {
CHECK(attrs.dict.find("value") != attrs.dict.end()) << "value is not provided";
double value = std::stod(attrs.dict.at("value"));
double value = dmlc::stod(attrs.dict.at("value"));
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<true>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
Expand Down
3 changes: 2 additions & 1 deletion src/operator/numpy/np_elemwise_broadcast_logic_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include "../tvmop/op_module.h"
#endif // MXNET_USE_TVM_OP

#include <dmlc/strtonum.h>
#include "../tensor/elemwise_binary_broadcast_op.h"
#include "../tensor/elemwise_binary_scalar_op.h"

Expand Down Expand Up @@ -225,7 +226,7 @@ struct TVMBinaryBroadcastScalarCompute {
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
attrs->parsed = dmlc::stod(attrs->dict["scalar"]); \
}) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
Expand Down
3 changes: 2 additions & 1 deletion src/operator/numpy/np_elemwise_broadcast_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
* \brief CPU Implementation of basic functions for elementwise numpy binary broadcast operator.
*/

#include <dmlc/strtonum.h>
#include "./np_elemwise_broadcast_op.h"

namespace mxnet {
Expand All @@ -33,7 +34,7 @@ namespace op {
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
attrs->parsed = dmlc::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
Expand Down
17 changes: 9 additions & 8 deletions src/operator/numpy/np_elemwise_broadcast_op_extended.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
* \brief CPU Implementation of extended functions for elementwise numpy binary broadcast operator.
*/

#include <dmlc/strtonum.h>
#include "../../common/utils.h"
#include "./np_elemwise_broadcast_op.h"

Expand All @@ -34,7 +35,7 @@ namespace op {
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
attrs->parsed = dmlc::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
Expand Down Expand Up @@ -87,7 +88,7 @@ NNVM_REGISTER_OP(_npi_lcm_scalar)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseIntType<1, 1>)
Expand Down Expand Up @@ -175,7 +176,7 @@ NNVM_REGISTER_OP(_npi_bitwise_xor_scalar)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseIntType<1, 1>)
Expand All @@ -192,7 +193,7 @@ NNVM_REGISTER_OP(_npi_bitwise_or_scalar)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseIntType<1, 1>)
Expand Down Expand Up @@ -275,13 +276,13 @@ MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rarctan2_scalar)

MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_arctan2_scalar)
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = dmlc::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>",
BinaryScalarOp::Backward<cpu, mshadow_op::arctan2_grad>);

MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rarctan2_scalar)
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = dmlc::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>",
BinaryScalarOp::Backward<cpu, mshadow_op::arctan2_rgrad>);

Expand Down Expand Up @@ -363,12 +364,12 @@ NNVM_REGISTER_OP(_backward_npi_ldexp)

MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_ldexp_scalar)
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = dmlc::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::ldexp_grad>);

MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rldexp_scalar)
.add_argument("scalar", "float", "scalar value")
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = dmlc::stod(attrs->dict["scalar"]); })
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rldexp_grad>);

} // namespace op
Expand Down
5 changes: 3 additions & 2 deletions src/operator/numpy/np_true_divide.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
* \brief CPU Implementation of true_divide operator.
*/

#include <dmlc/strtonum.h>
#include "./np_true_divide-inl.h"

namespace mxnet {
Expand Down Expand Up @@ -88,7 +89,7 @@ NNVM_REGISTER_OP(_npi_true_divide_scalar)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", TrueDivideType<1>)
Expand All @@ -111,7 +112,7 @@ NNVM_REGISTER_OP(_npi_rtrue_divide_scalar)
.set_num_inputs(1)
.set_num_outputs(1)
.set_attr_parser([](NodeAttrs* attrs) {
attrs->parsed = std::stod(attrs->dict["scalar"]);
attrs->parsed = dmlc::stod(attrs->dict["scalar"]);
})
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>)
.set_attr<nnvm::FInferType>("FInferType", TrueDivideType<1>)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#define MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_POST_QUANTIZE_ALIGN_SCALE_PROPERTY_H_
#if MXNET_USE_MKLDNN == 1

#include <dmlc/strtonum.h>
#include <string>
#include <vector>
#include "../common.h"
Expand Down Expand Up @@ -146,8 +147,8 @@ class SgMKLDNNPostQuantizeAlignScaleProperty : public SubgraphProperty {
float min_calib = 0.0f;
float max_calib = 0.0f;
for (size_t i = 0; i < subgraph_nodes.size(); ++i) {
auto this_min_calib = std::stof(subgraph_nodes[i]->attrs.dict["min_calib_range"]);
auto this_max_calib = std::stof(subgraph_nodes[i]->attrs.dict["max_calib_range"]);
auto this_min_calib = dmlc::stof(subgraph_nodes[i]->attrs.dict["min_calib_range"]);
auto this_max_calib = dmlc::stof(subgraph_nodes[i]->attrs.dict["max_calib_range"]);
if (min_calib > this_min_calib) min_calib = this_min_calib;
if (max_calib < this_max_calib) max_calib = this_max_calib;
}
Expand Down
3 changes: 2 additions & 1 deletion src/operator/tensor/elemwise_binary_scalar_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_

#include <mxnet/operator_util.h>
#include <dmlc/strtonum.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
Expand Down Expand Up @@ -400,7 +401,7 @@ class BinaryScalarOp : public UnaryOp {
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
attrs->parsed = dmlc::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
Expand Down
Loading