Skip to content

Commit

Permalink
[clang-tidy] No.34,36 enable performance-noexcept-move-constructor,mo…
Browse files Browse the repository at this point in the history
…dernize-use-transparent-functors (PaddlePaddle#56261)

* fix

* fix

* CI

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* CI

* fix

* CI
  • Loading branch information
enkilee authored and BeingGod committed Sep 9, 2023
1 parent 40feaa9 commit fc54b40
Show file tree
Hide file tree
Showing 28 changed files with 84 additions and 77 deletions.
4 changes: 2 additions & 2 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ modernize-use-equals-default,
-modernize-use-noexcept,
modernize-use-nullptr,
modernize-use-override,
-modernize-use-transparent-functors,
modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions,
performance-faster-string-find,
-performance-for-range-copy,
Expand All @@ -197,7 +197,7 @@ performance-inefficient-string-concatenation,
-performance-move-const-arg,
-performance-move-constructor-init,
-performance-no-automatic-move,
-performance-noexcept-move-constructor,
performance-noexcept-move-constructor,
-performance-trivially-destructible,
-performance-type-promotion-in-math-fn,
-performance-unnecessary-copy-initialization,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ DistModelDataBuf& DistModelDataBuf::operator=(const DistModelDataBuf& other) {
return *this;
}

DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) {
DistModelDataBuf& DistModelDataBuf::operator=(
DistModelDataBuf&& other) noexcept {
data_ = other.data_;
memory_owned_ = other.memory_owned_;
length_ = other.length_;
Expand All @@ -88,7 +89,7 @@ DistModelDataBuf& DistModelDataBuf::operator=(DistModelDataBuf&& other) {
return *this;
}

DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other)
DistModelDataBuf::DistModelDataBuf(DistModelDataBuf&& other) noexcept
: data_(other.data_),
length_(other.length_),
memory_owned_(other.memory_owned_) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ class DistModelDataBuf {
void Resize(size_t length);

DistModelDataBuf& operator=(const DistModelDataBuf& other);
DistModelDataBuf& operator=(DistModelDataBuf&& other);
DistModelDataBuf(DistModelDataBuf&& other);
DistModelDataBuf& operator=(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(DistModelDataBuf&& other) noexcept;
DistModelDataBuf(const DistModelDataBuf& other);

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ static int64_t GetMemorySize(
std::accumulate(dims.begin(),
dims.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>());
std::multiplies<int64_t>()); // NOLINT
}

// Split all variables in the graph into phi::DenseTensor and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ int64_t MemoryReusePass::GetMemorySize(const details::VarHandle &var) const {
return std::accumulate(shapes.begin(),
shapes.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) *
std::multiplies<>()) *
sizeof_dtype;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,8 +354,9 @@ void ConvBiasFusePass::FuseConvBias(ir::Graph* graph,
"must have same shape, but they are different: %s, %s.",
conv_bias_tensor->dims(),
eltwise_bias_tensor->dims()));
*conv_bias_tensor = tensor_apply_eltwise(
*conv_bias_tensor, *eltwise_bias_tensor, std::plus<float>());
*conv_bias_tensor = tensor_apply_eltwise(*conv_bias_tensor,
*eltwise_bias_tensor,
std::plus<float>()); // NOLINT

conv->Op()->SetOutput("Output",
std::vector<std::string>({eltwise_out->Name()}));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,14 +141,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape1_shape[i] =
std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
Expand All @@ -160,14 +161,15 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape2_shape[i] =
std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
Expand Down
34 changes: 18 additions & 16 deletions paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,14 +141,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape1_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape1_shape[i] = std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape1_shape[i] =
std::accumulate(x_shape1.begin(),
x_shape1.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape1_shape.begin(),
reshape1_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
Expand All @@ -160,14 +161,15 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
if ((reshape2_shape[i] == unk_dim_idx) && (i != 0)) {
// there is no sufficient info
if (!all_positive) return;
reshape2_shape[i] = std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) /
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>());
reshape2_shape[i] =
std::accumulate(x_shape2.begin(),
x_shape2.end(),
static_cast<int64_t>(1),
std::multiplies<int64_t>()) / // NOLINT
std::accumulate(reshape2_shape.begin(),
reshape2_shape.end(),
static_cast<int64_t>(-1),
std::multiplies<int64_t>()); // NOLINT
break;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ void MemoryOptimizePass::CollectLifeCycle(
auto var_bytes = std::accumulate(in_shape.begin(),
in_shape.end(),
(int64_t)1,
std::multiplies<int64_t>());
std::multiplies<>());
persis_byte +=
paddle::framework::SizeOfType(node->Var()->GetDataType()) *
var_bytes;
Expand Down Expand Up @@ -183,8 +183,8 @@ void MemoryOptimizePass::CollectVarMemorySize(
if (v < 0) v = fake_batch_size;
}

int size = std::accumulate(
shape.begin(), shape.end(), 1, std::multiplies<int>());
int size =
std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>());
(*space_table)[node->Var()->Name()] =
size * paddle::framework::SizeOfType(node->Var()->GetDataType());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/api/api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ int PaddleDtypeSize(PaddleDType dtype) {
}
}

PaddleBuf::PaddleBuf(PaddleBuf &&other)
PaddleBuf::PaddleBuf(PaddleBuf &&other) noexcept
: data_(other.data_),
length_(other.length_),
memory_owned_(other.memory_owned_) {
Expand Down Expand Up @@ -74,7 +74,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
return *this;
}

PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) {
PaddleBuf &PaddleBuf::operator=(PaddleBuf &&other) noexcept {
// only the buffer with external memory can be copied
data_ = other.data_;
length_ = other.length_;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/api/paddle_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,9 @@ class PD_INFER_DECL PaddleBuf {

~PaddleBuf() { Free(); }
PaddleBuf& operator=(const PaddleBuf&);
PaddleBuf& operator=(PaddleBuf&&);
PaddleBuf& operator=(PaddleBuf&&) noexcept;
PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other);
PaddleBuf(PaddleBuf&& other) noexcept;

private:
void Free();
Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/operators/fused/resnet_unit_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ namespace operators {
// Shape of bitmask
static framework::DDim GetBitmaskDims(std::vector<int> out_shape) {
int c = out_shape.back();
int64_t nhw =
std::accumulate(
out_shape.begin(), out_shape.end(), 1, std::multiplies<int>()) /
c;
int64_t nhw = std::accumulate(out_shape.begin(),
out_shape.end(),
1,
std::multiplies<int>()) / // NOLINT
c;
int32_t c_int32_elems = ((c + 63) & ~63) / 32;
int32_t nhw_int32_elems = ((nhw + 31) & ~31);
std::vector<int> bitmask_shape = {nhw_int32_elems, c_int32_elems, 1};
Expand Down
4 changes: 2 additions & 2 deletions paddle/ir/core/op_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ InterfaceValue::~InterfaceValue() {
if (model_) free(model_);
}

InterfaceValue::InterfaceValue(InterfaceValue&& val) {
InterfaceValue::InterfaceValue(InterfaceValue&& val) noexcept {
type_id_ = val.type_id_;
model_ = val.model_;
val.model_ = nullptr;
}

InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) {
InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept {
swap(std::move(val));
return *this;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/ir/core/op_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ class IR_API InterfaceValue {
InterfaceValue() = default;
explicit InterfaceValue(TypeId type_id) : type_id_(type_id) {}
InterfaceValue(const InterfaceValue &) = delete;
InterfaceValue(InterfaceValue &&);
InterfaceValue(InterfaceValue &&) noexcept;
InterfaceValue &operator=(const InterfaceValue &) = delete;
InterfaceValue &operator=(InterfaceValue &&);
InterfaceValue &operator=(InterfaceValue &&) noexcept;
~InterfaceValue();
void swap(InterfaceValue &&val) {
using std::swap;
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ class PADDLE_API Tensor final {
/**
* @brief Construct a new Tensor object by move
*/
Tensor(Tensor&&) = default;
Tensor(Tensor&&) noexcept = default;

/**
* @brief Construct a new Tensor object by a TensorBase pointer
Expand Down Expand Up @@ -522,7 +522,7 @@ class PADDLE_API Tensor final {
* @param x
* @return Tensor&
*/
Tensor& operator=(Tensor&& x) &;
Tensor& operator=(Tensor&& x) & noexcept;

/**
* @brief Tensor operants
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ void Tensor::reset() {

Tensor &Tensor::operator=(const Tensor &x) & = default;

Tensor &Tensor::operator=(Tensor &&x) & {
Tensor &Tensor::operator=(Tensor &&x) &noexcept {
impl_ = std::move(x.impl_);
autograd_meta_ = std::move(x.autograd_meta_);
name_ = std::move(x.name_);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/backends/cpu/cpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ CPUContext::CPUContext(const Place& place)

CPUContext::~CPUContext() = default;

CPUContext::CPUContext(CPUContext&&) = default;
CPUContext::CPUContext(CPUContext&&) = default; // NOLINT

CPUContext& CPUContext::operator=(CPUContext&&) = default;
CPUContext& CPUContext::operator=(CPUContext&&) = default; // NOLINT

Eigen::DefaultDevice* CPUContext::eigen_device() const {
return impl_->GetEigenDevice();
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/backends/gpu/gpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -814,9 +814,9 @@ struct GPUContext::Impl {

thread_local AttributeMap GPUContext::Impl::dnn_attrs_ = {};

GPUContext::GPUContext(GPUContext&&) = default;
GPUContext::GPUContext(GPUContext&&) = default; // NOLINT

GPUContext& GPUContext::operator=(GPUContext&&) = default;
GPUContext& GPUContext::operator=(GPUContext&&) = default; // NOLINT

GPUContext::GPUContext(const GPUPlace& place, bool init, int stream_priority)
: DeviceContext(), impl_(std::make_unique<Impl>(place)) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/dense_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ DenseTensor& DenseTensor::operator=(const DenseTensor& other) {
return *this;
}

DenseTensor& DenseTensor::operator=(DenseTensor&& other) {
DenseTensor& DenseTensor::operator=(DenseTensor&& other) noexcept {
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
storage_properties_ = std::move(other.storage_properties_);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class DenseTensor : public TensorBase,
/// \brief DenseTensor shallow copy assignment.
DenseTensor& operator=(const DenseTensor& other);

DenseTensor& operator=(DenseTensor&& other);
DenseTensor& operator=(DenseTensor&& other) noexcept;

DenseTensor();

Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/device_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -312,12 +312,12 @@ DeviceContext::DeviceContext(const DeviceContext& other) {
#endif
}

DeviceContext::DeviceContext(DeviceContext&& other) {
DeviceContext::DeviceContext(DeviceContext&& other) noexcept {
impl_ = std::move(other.impl_);
}

DeviceContext& DeviceContext::operator=(DeviceContext&& other) = default;

DeviceContext& DeviceContext::operator=(DeviceContext&& other) noexcept =
default;
DeviceContext::~DeviceContext() = default;

void DeviceContext::SetAllocator(const Allocator* allocator) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/device_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ class PADDLE_API DeviceContext {
/**
* @brief Move construct.
*/
DeviceContext(DeviceContext&&);
DeviceContext(DeviceContext&&) noexcept;

/**
* @brief Move assign operator.
*/
DeviceContext& operator=(DeviceContext&&);
DeviceContext& operator=(DeviceContext&&) noexcept;

/**
* @brief Default destruct.
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/sparse_coo_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ SparseCooTensor::SparseCooTensor() {
this->SetMember(non_zero_indices, non_zero_elements, {1}, true);
}

SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) { // NOLINT
SparseCooTensor::SparseCooTensor(SparseCooTensor&& other) noexcept {
this->non_zero_elements_ = other.non_zero_elements_;
this->non_zero_indices_ = other.non_zero_indices_;
this->coalesced_ = other.coalesced_;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/sparse_coo_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class SparseCooTensor : public TensorBase,
SparseCooTensor(const SparseCooTensor& other);

/// \brief move constructor
SparseCooTensor(SparseCooTensor&& other);
SparseCooTensor(SparseCooTensor&& other) noexcept;

/// \brief SparseCooTensor shallow copy assignment.
SparseCooTensor& operator=(const SparseCooTensor& other);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/string_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ StringTensor& StringTensor::operator=(const StringTensor& other) {
return *this;
}

StringTensor& StringTensor::operator=(StringTensor&& other) { // NOLINT
StringTensor& StringTensor::operator=(StringTensor&& other) noexcept {
meta_ = std::move(other.meta_);
std::swap(holder_, other.holder_);
return *this;
Expand Down
Loading

0 comments on commit fc54b40

Please sign in to comment.