Skip to content

Commit

Permalink
[clang-tidy] enable bugprone-narrowing-conversions check (#57861)
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Oct 11, 2023
1 parent 98696c0 commit e973253
Show file tree
Hide file tree
Showing 20 changed files with 108 additions and 75 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ bugprone-integer-division,
bugprone-misplaced-widening-cast,
-bugprone-move-forwarding-reference,
-bugprone-multiple-statement-macro,
-bugprone-narrowing-conversions,
bugprone-narrowing-conversions,
-bugprone-not-null-terminated-result,
-bugprone-parent-virtual-call,
-bugprone-posix-return,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ CrossEntropyWithSoftmaxSPMDRule::InferForward(
input_specs_size));

auto x_shape = input_specs[0].shape();
int x_ndim = x_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
auto x_dist_attr_src = input_specs[0].dist_attr();
std::vector<int64_t> x_dims_mapping_src = x_dist_attr_src.dims_mapping();

Expand Down Expand Up @@ -176,8 +176,8 @@ CrossEntropyWithSoftmaxSPMDRule::InferBackward(
const std::vector<DistTensorSpec>& output_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: verify input args based on cross_entropy_with_softmax logic
int64_t ninputs = input_specs.size();
int64_t noutputs = output_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
int64_t noutputs = static_cast<int64_t>(output_specs.size());
PADDLE_ENFORCE_EQ(
ninputs,
2,
Expand All @@ -194,7 +194,7 @@ CrossEntropyWithSoftmaxSPMDRule::InferBackward(

// step1: build Einsum Notation
std::vector<int64_t> x_shape = input_specs[0].shape();
int64_t x_ndim = x_shape.size();
int64_t x_ndim = static_cast<int64_t>(x_shape.size());
std::vector<int64_t> label_shape = input_specs[1].shape();

int axis = ExtractAttr<int>("axis", attrs);
Expand All @@ -205,7 +205,7 @@ CrossEntropyWithSoftmaxSPMDRule::InferBackward(

// normalize axis
if (axis < 0) {
axis = x_ndim + axis;
axis = static_cast<int>(x_ndim + axis);
}

std::string alphabet =
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/custom_operator/custom_operator_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " attrs: " << attrs_names[j]
<< " related to No." << i
<< " grad_attrs: " << grad_attrs_names[i];
in_out_map[op_type][1][4][j] = i;
in_out_map[op_type][1][4][j] = i; // NOLINT
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -693,7 +693,7 @@ void BuildOpFuncList(const platform::Place& place,
// op is not a operatorwithkernel, so direcly run OperatorBase::Run()

std::vector<std::shared_ptr<OperatorBase>> following_ops(
ops.begin() + i + 1, ops.end());
ops.begin() + static_cast<int>(i) + 1, ops.end());
HandleOperatorBase(place,
ops[i],
&op_func_node,
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/framework/new_executor/new_ir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,8 @@ void NewIRInterpreter::reset_scope(Scope* new_scope) {
scope_ = new_scope;
for (size_t i = 0; i < value_exe_info_->GetVarList().size(); i++) {
const auto& var_name = value_exe_info_->GetNameById(static_cast<int>(i));
value_exe_info_->ResetVarList(i, new_scope->FindVar(var_name));
value_exe_info_->ResetVarList(static_cast<int>(i),
new_scope->FindVar(var_name));
}
// The index should be assured valid, cause the InterpreterCore may not be
// fully built, but was still cached and used. For example, see unit test
Expand Down
20 changes: 13 additions & 7 deletions paddle/fluid/inference/utils/io_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,35 +240,41 @@ void DeserializeShapeRangeInfo(
continue;
} else {
std::vector<int32_t> tmp(info.min_shape_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.min_shape(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.min_shape(static_cast<int>(k));
min_shape->insert(std::make_pair(name, tmp));

tmp.resize(info.max_shape_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.max_shape(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.max_shape(static_cast<int>(k));
max_shape->insert(std::make_pair(name, tmp));

tmp.resize(info.opt_shape_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.opt_shape(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.opt_shape(static_cast<int>(k));
opt_shape->insert(std::make_pair(name, tmp));
}
}
for (int i = 0; i < shape_range_infos.shape_range_info_size(); ++i) {
auto info = shape_range_infos.shape_range_info(i);
auto info = shape_range_infos.shape_range_info(static_cast<int>(i));
auto name = info.name();
if (min_value->count(name) || max_value->count(name) ||
opt_value->count(name)) {
continue;
} else {
std::vector<int32_t> tmp(info.min_value_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.min_value(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.min_value(static_cast<int>(k));
min_value->insert(std::make_pair(name, tmp));

tmp.resize(info.max_value_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.max_value(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.max_value(static_cast<int>(k));
max_value->insert(std::make_pair(name, tmp));

tmp.resize(info.opt_value_size());
for (size_t k = 0; k < tmp.size(); ++k) tmp[k] = info.opt_value(k);
for (size_t k = 0; k < tmp.size(); ++k)
tmp[k] = info.opt_value(static_cast<int>(k));
opt_value->insert(std::make_pair(name, tmp));
}
}
Expand Down
30 changes: 17 additions & 13 deletions paddle/fluid/inference/utils/table_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ void TablePrinter::InsertRow(const std::vector<std::string>& row) {
if (line.length() > max_width) max_width = line.length();
}

if (max_width > widths_[i]) widths_[i] = static_cast<float>(max_width);
if (static_cast<float>(max_width) > widths_[i])
widths_[i] = static_cast<float>(max_width);

size_t num_lines = table_row[i].size();
if (num_lines > max_height) max_height = num_lines;
Expand Down Expand Up @@ -159,47 +160,50 @@ void TablePrinter::CalcLayout() {
// If the number of rows required for this record is larger than 1, we
// will break that line and put it in multiple lines
if (num_rows > 1) {
data_[i][j].erase(data_[i][j].begin() + line_index);
data_[i][j].erase(data_[i][j].begin() + line_index); // NOLINT
for (size_t k = 0; k < num_rows; ++k) {
size_t start =
std::min(static_cast<size_t>(k * shares_[j]), line.length());
size_t end = std::min(static_cast<size_t>((k + 1) * shares_[j]),
line.length());
data_[i][j].insert(data_[i][j].begin() + line_index + k,
std::min(static_cast<size_t>(k * shares_[j]), // NOLINT
line.length());
size_t end =
std::min(static_cast<size_t>((k + 1) * shares_[j]), // NOLINT
line.length());
data_[i][j].insert(data_[i][j].begin() + line_index + k, // NOLINT
line.substr(start, end - start));
}

// update line_index
line_index += num_rows - 1;
}

if (heights_[i] < (num_rows - 1 + data_[i][j].size()))
heights_[i] += num_rows - 1;
if (heights_[i] < static_cast<float>(num_rows - 1 + data_[i][j].size()))
heights_[i] += static_cast<float>(num_rows - 1);
}
}
}
}

void TablePrinter::AddRowDivider(std::stringstream& ss) {
ss << "+";
for (auto share : shares_) {
for (size_t j = 0; j < share + 2; ++j) ss << "-";
for (float share : shares_) {
for (float j = 0; j < share + 2; ++j) ss << "-";
ss << "+";
}
ss << "\n";
}

void TablePrinter::AddRow(std::stringstream& ss, size_t row_idx) {
auto row = data_[row_idx];
size_t max_height = heights_[row_idx];
size_t max_height = static_cast<size_t>(heights_[row_idx]);

for (size_t h = 0; h < max_height; ++h) {
ss << "|" << std::left;
for (size_t i = 0; i < row.size(); ++i) {
if (h < row[i].size()) {
ss << " " << std::setw(shares_[i]) << row[i][h] << " |";
ss << " " << std::setw(static_cast<int>(shares_[i])) << row[i][h]
<< " |";
} else {
ss << " " << std::setw(shares_[i]) << " "
ss << " " << std::setw(static_cast<int>(shares_[i])) << " "
<< " |";
}
}
Expand Down
19 changes: 11 additions & 8 deletions paddle/fluid/ir_adaptor/translator/program_translator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,18 +58,21 @@ const std::unordered_set<std::string> ProgramTranslator::unsupported_ops = {
static std::vector<uint64_t> GetCondOpIds(const BlockDesc& src_block,
uint64_t first_id) {
std::vector<uint64_t> op_list = {first_id};
if (src_block.Op(first_id + 1)->Type() == "logical_not") {
if (src_block.Op(static_cast<int>(first_id + 1))->Type() == "logical_not") {
op_list.emplace_back(first_id + 1);
}
if (src_block.Op(first_id + 2)->Type() == "conditional_block") {
if (src_block.Op(static_cast<int>(first_id + 2))->Type() ==
"conditional_block") {
op_list.emplace_back(first_id + 2);
}
if (src_block.Op(first_id + 3)->Type() == "cast") {
if (src_block.Op(static_cast<int>(first_id + 3))->Type() == "cast") {
op_list.emplace_back(first_id + 3);
}
size_t output_size = src_block.Op(first_id)->Output("Out").size();
size_t output_size =
src_block.Op(static_cast<int>(first_id))->Output("Out").size();
for (size_t i = 0; i < output_size; i++) {
if (src_block.Op(first_id + 4 + i)->Type() == "select_input") {
if (src_block.Op(static_cast<int>(first_id + 4 + i))->Type() ==
"select_input") {
op_list.emplace_back(first_id + 4 + i);
}
}
Expand All @@ -80,7 +83,7 @@ ConditionBlockCombination::ConditionBlockCombination(
const ::paddle::framework::BlockDesc& src_block,
const std::vector<uint64_t>& op_ids) {
for (auto op_id : op_ids) {
op_list_.emplace_back(src_block.Op(op_id));
op_list_.emplace_back(src_block.Op(static_cast<int>(op_id)));
}
PADDLE_ENFORCE(Verify(op_list_),
platform::errors::NotFound(
Expand Down Expand Up @@ -224,7 +227,7 @@ void ProgramTranslator::TranslateBlock(const BlockDesc& src_block,
if (translate_completed.count(op_id) && translate_completed.at(op_id)) {
continue;
}
auto op = src_block.Op(op_id);
auto op = src_block.Op(static_cast<int>(op_id));
VLOG(8) << "=============>start to translate a op: " << op->Type();

PADDLE_ENFORCE_EQ(unsupported_ops.count(op->Type()),
Expand Down Expand Up @@ -256,7 +259,7 @@ void ProgramTranslator::TranslateBlock(const BlockDesc& src_block,
src_block.Op(id)->Type() == "assign",
"The operator at the end of the sub block needs to be assign");
yeild_inputs.emplace_back(
param_map_[src_block.Op(id)->Input("X")[0]].value);
param_map_[src_block.Op(static_cast<int>(id))->Input("X")[0]].value);
}
pir::AttributeMap attribute_map;
auto yeild_info = ctx_->GetRegisteredOpInfo(pir::YieldOp::name());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/pylayer_op_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static void FindAllPyLayerOpAndPyLayerGradOp(
for (size_t i = 1; i < program.Size(); ++i) {
auto &block = program.Block(i);
for (size_t j = 0; j < block.OpSize(); ++j) {
auto *op = block.Op(j);
auto *op = block.Op(static_cast<int>(j));
if (op->Type() == "pylayer") {
fwd_ops->emplace_back(op);
} else if (op->Type() == "pylayer_grad") {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pir/dialect/operator/ir/op_attribute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ phi::Scalar ScalarAttribute::data() {

IntArrayAttribute IntArrayAttribute::Parse(pir::IrParser &parser) { // NOLINT
Token buket_token = parser.ConsumeToken();
std::vector<int32_t> vec{};
std::vector<int> vec{};
while (parser.PeekToken().val_ != "]") {
Token val_token = parser.ConsumeToken();
vec.push_back(atoll(val_token.val_.c_str()));
vec.push_back(atoi(val_token.val_.c_str()));
if (parser.PeekToken().val_ == "]") break;
parser.ConsumeToken();
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/operator/ir/op_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ pir::Type OperatorDialect::ParseType(pir::IrParser &parser) { // NOLINT
break;
}
parser.ConsumeToken();
parser.lexer->Unget(peek_token_val.size() - 1);
parser.lexer->Unget(static_cast<int>(peek_token_val.size() - 1));
if (parser.PeekToken().token_type_ != DIGIT) {
break;
}
Expand Down
12 changes: 8 additions & 4 deletions paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,8 @@ void HandleForSpecialOp(
vec_inputs.emplace_back();
continue;
}
auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name());
auto new_in = GetNewInput(
cur_in, *map_value_pair, static_cast<int>(i), op_item->name());
vec_inputs.push_back(new_in);
vec_inner_types.push_back(new_in.type());
}
Expand All @@ -801,7 +802,8 @@ void HandleForSpecialOp(
vec_inputs.emplace_back();
continue;
}
auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name());
auto new_in = GetNewInput(
cur_in, *map_value_pair, static_cast<int>(i), op_item->name());
vec_inputs.push_back(new_in);

if (new_in.type().isa<pir::VectorType>()) {
Expand All @@ -826,7 +828,8 @@ void HandleForSpecialOp(
vec_inputs.emplace_back();
continue;
}
auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name());
auto new_in = GetNewInput(
cur_in, *map_value_pair, static_cast<int>(i), op_item->name());
vec_inputs.push_back(new_in);

if (new_in.type().isa<pir::VectorType>()) {
Expand All @@ -850,7 +853,8 @@ void HandleForSpecialOp(
vec_inputs.emplace_back();
continue;
}
auto new_in = GetNewInput(cur_in, *map_value_pair, i, op_item->name());
auto new_in = GetNewInput(
cur_in, *map_value_pair, static_cast<int>(i), op_item->name());
vec_inputs.push_back(new_in);
}
}
Expand Down
24 changes: 16 additions & 8 deletions paddle/fluid/pybind/auto_parallel_py.cc
Original file line number Diff line number Diff line change
Expand Up @@ -645,16 +645,20 @@ static void parse_attrs(PyObject *obj,
phi::distributed::InferSpmdContext *ctx,
const size_t arg_pos) {
if (PyBool_Check(first_item)) {
auto attrs = CastPyArg2Booleans(obj, infer_spmd_string, arg_pos);
auto attrs = CastPyArg2Booleans(
obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attrs);
} else if (PyCheckInteger(first_item)) {
auto attrs = CastPyArg2Ints(obj, infer_spmd_string, arg_pos);
auto attrs =
CastPyArg2Ints(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attrs);
} else if (PyLong_Check(first_item)) {
auto attrs = CastPyArg2Longs(obj, infer_spmd_string, arg_pos);
auto attrs =
CastPyArg2Longs(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attrs);
} else if (PyFloat_Check(first_item)) {
auto attrs = CastPyArg2Floats(obj, infer_spmd_string, arg_pos);
auto attrs =
CastPyArg2Floats(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attrs);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
Expand All @@ -671,16 +675,20 @@ static void parse_attr(PyObject *obj,
phi::distributed::InferSpmdContext *ctx,
const size_t arg_pos) {
if (PyBool_Check(obj)) {
auto attr = CastPyArg2Boolean(obj, infer_spmd_string, arg_pos);
auto attr = CastPyArg2Boolean(
obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attr);
} else if (PyCheckInteger(obj)) {
auto attr = CastPyArg2Int(obj, infer_spmd_string, arg_pos);
auto attr =
CastPyArg2Int(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attr);
} else if (PyLong_Check(obj)) {
auto attr = CastPyArg2Long(obj, infer_spmd_string, arg_pos);
auto attr =
CastPyArg2Long(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attr);
} else if (PyFloat_Check(obj)) {
auto attr = CastPyArg2Float(obj, infer_spmd_string, arg_pos);
auto attr =
CastPyArg2Float(obj, infer_spmd_string, static_cast<ssize_t>(arg_pos));
ctx->EmplaceBackAttr(attr);
} else { // TODO(ljz) support other types
PADDLE_THROW(platform::errors::InvalidArgument(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ ProcessMesh GetSubProcessMesh(const ProcessMesh& mesh, int64_t axis) {
for (int64_t i = 0; i < shape_of_axis; ++i) {
coord[axis] = i;
int64_t rank = coord.back();
for (int64_t j = coord.size() - 2; j >= 0; --j) {
for (int64_t j = static_cast<int64_t>(coord.size() - 2); j >= 0; --j) {
rank += coord[j] * mesh.dim_size(j + 1);
}
process_ids.emplace_back(rank);
Expand All @@ -58,7 +58,8 @@ int64_t FindFirstDiffShardAxis(const TensorDistAttr& in_dist_attr,
const auto& out_dims_mapping = out_dist_attr.dims_mapping();
int64_t axis = -1;

for (int64_t i = in_dims_mapping.size() - 1; i >= 0; --i) {
for (int64_t i = static_cast<int64_t>(in_dims_mapping.size() - 1); i >= 0;
--i) {
if (in_dims_mapping[i] != out_dims_mapping[i]) {
axis = i;
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ void SToSReshardFunction::Eval(phi::DeviceContext* dev_ctx,
const auto& in_process_ids = in_process_mesh.process_ids();
auto dtype = in.dtype();
const auto& logical_ddim = in.dims();
int64_t nranks = in_process_ids.size();
int64_t nranks = static_cast<int64_t>(in_process_ids.size());
int in_split_axis =
GetSplitAxisWithDimsMapping(in.dist_attr().dims_mapping()).begin()->first;
int out_split_axis =
Expand Down
Loading

0 comments on commit e973253

Please sign in to comment.