Skip to content

Commit c596127

Browse files
committed
[CPU] Apply 'readability-implicit-bool-conversion' clang-tidy remarks
1 parent 60b09ad commit c596127

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+111
-103
lines changed

src/plugins/intel_cpu/src/.clang-tidy

+5-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
# -google-explicit-constructor,
3333
# -google-readability-casting,
3434
# -modernize-avoid-c-arrays,
35-
# -readability-implicit-bool-conversion,
3635
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
3736
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
3837
# Remove warning disablement after CI pipeline migrates to C++17 from C++20 for:
@@ -47,6 +46,7 @@ Checks: >
4746
modernize-*,
4847
cppcoreguidelines-prefer-member-initializer,
4948
readability-else-after-return,
49+
readability-implicit-bool-conversion,
5050
-bugprone-easily-swappable-parameters,
5151
-bugprone-exception-escape,
5252
-bugprone-implicit-widening-of-multiplication-result,
@@ -66,7 +66,6 @@ Checks: >
6666
-modernize-use-std-numbers,
6767
-modernize-use-trailing-return-type,
6868
-readability-identifier-length,
69-
-readability-implicit-bool-conversion,
7069
-readability-magic-numbers,
7170
-cppcoreguidelines-avoid-magic-numbers,
7271
-readability-uppercase-literal-suffix,
@@ -91,6 +90,10 @@ CheckOptions:
9190
value: google
9291
- key: modernize-use-auto.MinTypeNameLength
9392
value: "3"
93+
- key: readability-implicit-bool-conversion.AllowIntegerConditions
94+
value: true
95+
- key: readability-implicit-bool-conversion.AllowPointerConditions
96+
value: true
9497
### To be considered to enable:
9598
# # Unifies the usage of the statements
9699
# - key: readability-braces-around-statements.ShortStatementLines

src/plugins/intel_cpu/src/cpu_memory.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ Memory::Memory(dnnl::engine eng, MemoryDescPtr desc, MemoryBlockPtr block)
115115
if (m_pMemDesc->getPrecision() == element::string) {
116116
OPENVINO_THROW("[CPU] Memory object can't be created for string data.");
117117
}
118-
bool memAllocated = m_blockHandle->getRawPtr();
118+
bool memAllocated = m_blockHandle->getRawPtr() != nullptr;
119119

120120
create(m_pMemDesc, nullptr, !memAllocated);
121121
}

src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ struct regs_to_spill {
3737
static std::vector<Xbyak::Reg> get(const std::set<snippets::Reg>& live_regs) {
3838
std::vector<Xbyak::Reg> regs_to_spill;
3939
auto push_if_live = [&live_regs, &regs_to_spill](Xbyak::Reg&& reg) {
40-
if (live_regs.empty() || live_regs.count(Xbyak2SnippetsReg(reg))) {
40+
if (live_regs.empty() || (live_regs.count(Xbyak2SnippetsReg(reg)) != 0u)) {
4141
regs_to_spill.emplace_back(reg);
4242
}
4343
};

src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -79,9 +79,9 @@ static bool is_segfault_detector_emitter(const intel_cpu::jit_emitter* emitter)
7979
// default active for typical tensor memory access emitters
8080
bool ret = false;
8181
ret = is_load_emitter(emitter) || is_store_emitter(emitter) ||
82-
dynamic_cast<const intel_cpu::jit_brgemm_emitter*>(emitter) ||
83-
dynamic_cast<const intel_cpu::jit_brgemm_copy_b_emitter*>(emitter) ||
84-
dynamic_cast<const intel_cpu::jit_kernel_emitter*>(emitter);
82+
(dynamic_cast<const intel_cpu::jit_brgemm_emitter*>(emitter) != nullptr) ||
83+
(dynamic_cast<const intel_cpu::jit_brgemm_copy_b_emitter*>(emitter) != nullptr) ||
84+
(dynamic_cast<const intel_cpu::jit_kernel_emitter*>(emitter) != nullptr);
8585
return ret;
8686
// use below code to active all emitters for extend usage
8787
// return !dynamic_cast<const jit_nop_emitter*>(emitter);

src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ void jit_kernel_static_emitter::init_data_pointers(const std::vector<Xbyak::Reg6
202202
h->mov(data_ptr_regs[num_params + i], h->ptr[reg_runtime_params + GET_OFF(buffer_scratchpad_ptr)]);
203203
}
204204
size_t i = 0;
205-
for (; i < num_params - last_iter_explicitly; i++) {
205+
for (; i < num_params - static_cast<size_t>(last_iter_explicitly); i++) {
206206
if (i < num_inputs) {
207207
h->mov(data_ptr_regs[i], h->ptr[reg_runtime_params + GET_OFF(src_ptrs) + i * sizeof(void*)]);
208208
} else {

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_amx.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ void BrgemmAMXKernelExecutor::configure_tiles_if_needed(amx_tile_config_t* confi
232232
dnnl_dim_t N,
233233
dnnl_dim_t K) {
234234
auto compatible = [&](amx_tile_config_t* rhs) {
235-
return rhs && rhs->M == M && rhs->N == N && rhs->K == K;
235+
return (rhs != nullptr) && rhs->M == M && rhs->N == N && rhs->K == K;
236236
};
237237
if (config && !compatible(config)) {
238238
config->M = M;

src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_base.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,8 @@ void BrgemmBaseKernelExecutor::execute_brgemm_kernel(
163163
brgemm_p.ptr_D = dst;
164164
brgemm_p.ptr_buf = scratch;
165165
brgemm_p.ptr_bias = nullptr;
166-
brgemm_p.do_post_ops = with_comp;
167-
brgemm_p.do_apply_comp = with_comp;
166+
brgemm_p.do_post_ops = static_cast<size_t>(with_comp);
167+
brgemm_p.do_apply_comp = static_cast<size_t>(with_comp);
168168
brgemm_p.skip_accm = 0;
169169
brgemm_p.BS = 1; // default value
170170
OV_CPU_JIT_EMITTER_ASSERT(kernel, "has nullptr Brgemm kernel");

src/plugins/intel_cpu/src/graph_optimizer.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -2451,7 +2451,7 @@ void GraphOptimizer::FusePerformedAsScaleShiftAndFakeQuantize(Graph& graph) {
24512451

24522452
const auto isSubnormal = [](const float value) {
24532453
const auto* u32data = reinterpret_cast<const uint32_t*>(&value);
2454-
return (*u32data) && (((*u32data) & (0xFF << 23)) == 0);
2454+
return ((*u32data) != 0u) && (((*u32data) & (0xFF << 23)) == 0);
24552455
};
24562456

24572457
for (size_t i = 0; i < newInputScale.size(); i++) {

src/plugins/intel_cpu/src/node.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -108,11 +108,11 @@ Node::Node(const std::shared_ptr<ov::Node>& op, GraphContext::CPtr ctx, const Sh
108108
}
109109

110110
const auto& rtInfo = op->get_rt_info();
111-
if (rtInfo.count("originalLayersNames")) {
111+
if (rtInfo.count("originalLayersNames") != 0u) {
112112
originalLayers = getRTInfoValue(rtInfo, "originalLayersNames");
113113
}
114114

115-
if (rtInfo.count("parallelDomain")) {
115+
if (rtInfo.count("parallelDomain") != 0u) {
116116
parallelDomain = getRTInfoValue(rtInfo, "parallelDomain");
117117
}
118118

@@ -844,7 +844,7 @@ bool Node::outputShapeDataDependency() const {
844844
auto port_mask = shapeInference->get_port_mask();
845845
if (EMPTY_PORT_MASK != port_mask) {
846846
for (size_t i = 0; i < getParentEdges().size(); ++i) {
847-
if ((port_mask & (1 << i)) && !getParentEdgeAt(i)->getParent()->isConstant()) {
847+
if (((port_mask & (1 << i)) != 0u) && !getParentEdgeAt(i)->getParent()->isConstant()) {
848848
return true;
849849
}
850850
}
@@ -1218,10 +1218,10 @@ void Node::toNumaNodeImpl(int numaNodeID) {
12181218
}
12191219

12201220
// mbind constant prim args to numa nodes
1221-
if (primArgs.count(DNNL_ARG_WEIGHTS)) {
1221+
if (primArgs.count(DNNL_ARG_WEIGHTS) != 0u) {
12221222
mbind_move(primArgs[DNNL_ARG_WEIGHTS], numaNodeID);
12231223
}
1224-
if (primArgs.count(DNNL_ARG_BIAS)) {
1224+
if (primArgs.count(DNNL_ARG_BIAS) != 0u) {
12251225
mbind_move(primArgs[DNNL_ARG_BIAS], numaNodeID);
12261226
}
12271227

src/plugins/intel_cpu/src/nodes/bin_conv.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1103,8 +1103,8 @@ void BinaryConvolution::createPrimitive() {
11031103
jcp.ow = dstDims[3];
11041104

11051105
bool with_groups = group > 1;
1106-
jcp.kh = weiDims[with_groups + 2];
1107-
jcp.kw = weiDims[with_groups + 3];
1106+
jcp.kh = weiDims[static_cast<int>(with_groups) + 2];
1107+
jcp.kw = weiDims[static_cast<int>(with_groups) + 3];
11081108

11091109
jcp.t_pad = paddingL[0];
11101110
jcp.b_pad = paddingR[0];

src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ struct CausalMaskPreprocess::ExecutorCausalMaskPreprocess : public CausalMaskPre
8686
bool cmask_eq0 = (j <= row);
8787
bool amask_eq0 = (pamask[j] == 0);
8888
bool padding_mask = (cmask_eq0 && amask_eq0);
89-
pdst[j] = (padding_mask | (!cmask_eq0)) ? min_dtype : static_cast<T>(0);
89+
pdst[j] =
90+
(static_cast<int>(padding_mask) | static_cast<int>(!cmask_eq0)) ? min_dtype : static_cast<T>(0);
9091
}
9192
for (; j < kvLen; j++) {
9293
bool cmask_eq0 = (j <= row);

src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -869,19 +869,19 @@ struct ConvertFrom4BitPrecision<std::tuple<src_t, dst_t>> {
869869
auto dst = static_cast<dst_t*>(ctx.dstPtr);
870870
if (ctx.inType == ov::element::nf4) {
871871
parallel_for(ctx.size, [&](size_t i) {
872-
dst[i] = static_cast<dst_t>(ConvertNF4::dequantize(get_u4(src[i / 2], i % 2)));
872+
dst[i] = static_cast<dst_t>(ConvertNF4::dequantize(get_u4(src[i / 2], (i % 2) != 0u)));
873873
});
874874
} else if (ctx.inType == ov::element::u4) {
875875
parallel_for(ctx.size, [&](size_t i) {
876-
dst[i] = static_cast<dst_t>(get_u4(src[i / 2], i % 2));
876+
dst[i] = static_cast<dst_t>(get_u4(src[i / 2], (i % 2) != 0u));
877877
});
878878
} else if (ctx.inType == ov::element::i4) {
879879
parallel_for(ctx.size, [&](size_t i) {
880-
dst[i] = static_cast<dst_t>(get_i4(src[i / 2], i % 2));
880+
dst[i] = static_cast<dst_t>(get_i4(src[i / 2], (i % 2) != 0u));
881881
});
882882
} else if (ctx.inType == ov::element::f4e2m1) {
883883
parallel_for(ctx.size, [&](size_t i) {
884-
dst[i] = static_cast<dst_t>(float4_e2m1::from_bits(get_u4(src[i / 2], i % 2)));
884+
dst[i] = static_cast<dst_t>(float4_e2m1::from_bits(get_u4(src[i / 2], (i % 2) != 0u)));
885885
});
886886
} else {
887887
OPENVINO_THROW("cpu_convert doesn't support input data type: ", ctx.inType, ". Not implemented.");

src/plugins/intel_cpu/src/nodes/conv.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ const std::vector<impl_desc_type>& Convolution::getDefaultImplPriority() {
406406
static const std::vector<impl_desc_type> priorities_wo_brgemm = [&] {
407407
std::vector<impl_desc_type> result;
408408
std::copy_if(priorities.begin(), priorities.end(), std::back_inserter(result), [](impl_desc_type type) {
409-
return !(type & impl_desc_type::brgconv);
409+
return (type & impl_desc_type::brgconv) == 0;
410410
});
411411
return result;
412412
}();

src/plugins/intel_cpu/src/nodes/convert.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void Convert::getSupportedDescriptors() {
7777
}
7878

7979
bool Convert::isSupportedDesc(const MemoryDesc& desc) {
80-
bool isSupported = desc.getType() & MemoryDescType::Blocked;
80+
bool isSupported = (desc.getType() & MemoryDescType::Blocked) != 0;
8181
if (desc.getType() == MemoryDescType::DnnlBlocked) {
8282
isSupported &= desc.as<const DnnlMemoryDesc>()->hasEmptyExtraData();
8383
}

src/plugins/intel_cpu/src/nodes/deconv.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ Deconvolution::Deconvolution(const std::shared_ptr<ov::Node>& op, const GraphCon
244244
autoPad = one_of(groupConvBackprop->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER);
245245
}
246246
for (size_t i = 0; i < deconvAttrs.dilation.size(); i++) {
247-
deconvAttrs.kernel.push_back(weightDims[withGroups + 2 + i]);
247+
deconvAttrs.kernel.push_back(weightDims[static_cast<int>(withGroups) + 2 + i]);
248248
}
249249
#if defined(OV_CPU_WITH_ACL)
250250
deconvAttrs.aclFastMath = context->getConfig().aclFastMath;
@@ -303,7 +303,7 @@ void Deconvolution::createDnnlCompatibleWeights() {
303303
} else {
304304
order = {1, 0};
305305
}
306-
for (size_t i = 2 + withGroups; i < blockedDims.size(); i++) {
306+
for (size_t i = 2 + static_cast<int>(withGroups); i < blockedDims.size(); i++) {
307307
order.push_back(i);
308308
}
309309

@@ -611,7 +611,8 @@ void Deconvolution::getSupportedDescriptors() {
611611
// OV ConvBackWardData defines weight shape as [Conv_OC, Conv_IC, ....].
612612
// ONEDNN Deconv define weight shape as [Deconv_OC, Deconv_IC,...],
613613
// Deconv_OC = Conv_IC , Deconv_IC = Conv_OC
614-
std::swap(dnnlCompatibleWeiDims[withGroups + 0], dnnlCompatibleWeiDims[withGroups + 1]);
614+
std::swap(dnnlCompatibleWeiDims[static_cast<int>(withGroups) + 0],
615+
dnnlCompatibleWeiDims[static_cast<int>(withGroups) + 1]);
615616
setPostOps(*attr, outShape.getStaticDims());
616617

617618
if (isInt8) {
@@ -861,7 +862,7 @@ const std::vector<impl_desc_type>& Deconvolution::getDefaultImplPriority() {
861862
static const std::vector<impl_desc_type> priorities_wo_brgemm = [&] {
862863
std::vector<impl_desc_type> result;
863864
std::copy_if(priorities.begin(), priorities.end(), std::back_inserter(result), [](impl_desc_type type) {
864-
return !(type & impl_desc_type::brgconv);
865+
return (type & impl_desc_type::brgconv) == 0;
865866
});
866867
return result;
867868
}();

src/plugins/intel_cpu/src/nodes/eltwise.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,8 @@ const std::map<const ov::DiscreteTypeInfo, Eltwise::Initializer>& Eltwise::getIn
156156
{ov::op::v10::IsInf::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
157157
node.algorithm = Algorithm::EltwiseIsInf;
158158
const auto& attributes = ov::as_type_ptr<ov::op::v10::IsInf>(op)->get_attributes();
159-
node.alpha = attributes.detect_negative;
160-
node.beta = attributes.detect_positive;
159+
node.alpha = static_cast<float>(attributes.detect_negative);
160+
node.beta = static_cast<float>(attributes.detect_positive);
161161
}},
162162
{ov::op::v10::IsNaN::get_type_info_static(), [](const std::shared_ptr<ov::Node>& op, Eltwise& node) {
163163
node.algorithm = Algorithm::EltwiseIsNaN;

src/plugins/intel_cpu/src/nodes/executors/common/common_utils.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
namespace ov::intel_cpu {
1717

1818
OV_CPU_MAYBE_UNUSED_FUNCTION static std::vector<float> getDeQuantizedScales(const MemoryArgs& memory) {
19-
if (!memory.count(ARG_DST_DEQ_SCALE)) {
19+
if (memory.count(ARG_DST_DEQ_SCALE) == 0u) {
2020
return {};
2121
}
2222

src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -78,13 +78,13 @@ class DnnlFCExecutor : public Executor {
7878
m_scratchPadMemory = m_context->getScratchPad()->createScratchPadMem(newPrimMemDesc);
7979
m_primArgs[DNNL_ARG_SCRATCHPAD] = m_scratchPadMemory->getPrimitive();
8080

81-
if (m_primArgs.count(DNNL_ARG_WEIGHTS)) {
81+
if (m_primArgs.count(DNNL_ARG_WEIGHTS) != 0u) {
8282
if (!mbind_move(m_primArgs[DNNL_ARG_WEIGHTS], numaNodeID)) {
8383
DEBUG_LOG("[FullyConnected] move DNNL_ARG_WEIGHTS to node ", numaNodeID, " failed");
8484
}
8585
}
8686

87-
if (m_primArgs.count(DNNL_ARG_BIAS)) {
87+
if (m_primArgs.count(DNNL_ARG_BIAS) != 0u) {
8888
if (!mbind_move(m_primArgs[DNNL_ARG_BIAS], numaNodeID)) {
8989
DEBUG_LOG("[FullyConnected] move DNNL_ARG_BIAS to node ", numaNodeID, " failed");
9090
}

src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs,
222222
DnnlPostOpsComposer
223223
dnnlpoc(postOps, context->getEngine(), dims, dims.size() - 1, isINT8, 1 << 0, memory, outputDataType);
224224

225-
if (memory.count(ARG_WEI | ARG_ATTR_SCALES)) {
225+
if (memory.count(ARG_WEI | ARG_ATTR_SCALES) != 0u) {
226226
auto dstPrc = memory.at(ARG_WEI | ARG_ATTR_SCALES)->getPrecision();
227227
if (dstPrc != f8e8m0 || useDynamicQuantization) {
228228
dstPrc = ov::element::f32;
@@ -233,7 +233,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs,
233233
dstPrc);
234234
}
235235

236-
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) {
236+
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS) != 0u) {
237237
auto dstPrc = useDynamicQuantization ? ov::element::u8 : ov::element::f32;
238238
dnnlpoc.appendDecompressionZeroPointsLegacy(memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS),
239239
!attrs.weightsNonTransposed,

src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -156,14 +156,14 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs,
156156
const auto maxRank =
157157
std::max({srcDesc->getShape().getRank(), weiDesc->getShape().getRank(), dstDesc->getShape().getRank()});
158158
const auto normWeiDims = normalizeToRank(weiDesc->getShape().getStaticDims(), maxRank);
159-
if (memory.count(ARG_WEI | ARG_ATTR_SCALES)) {
159+
if (memory.count(ARG_WEI | ARG_ATTR_SCALES) != 0u) {
160160
auto dstPrc = ov::element::f32;
161161
dnnlpoc.appendDecompressionScales(memory.at(ARG_WEI | ARG_ATTR_SCALES),
162162
!weightsNonTransposed,
163163
dstPrc,
164164
normWeiDims);
165165
}
166-
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) {
166+
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS) != 0u) {
167167
// TODO: clarify oneDNN requirements on ZP precision
168168
auto zp = memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS);
169169
auto zpPrc = zp->getPrecision();

src/plugins/intel_cpu/src/nodes/executors/type_mask.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ struct TypeMask {
4747
}
4848
// match
4949
bool operator&(const ov::element::Type precision) const {
50-
return value & TypeMask(precision);
50+
return (value & TypeMask(precision)) != 0u;
5151
}
5252

5353
const uint64_t value;

src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ void refine_anchors(const float* deltas,
108108
proposals[p_idx + 1] = y0;
109109
proposals[p_idx + 2] = x1;
110110
proposals[p_idx + 3] = y1;
111-
proposals[p_idx + 4] = (min_box_W <= box_w) * (min_box_H <= box_h) * score;
111+
proposals[p_idx + 4] = static_cast<int>(min_box_W <= box_w) * static_cast<int>(min_box_H <= box_h) * score;
112112
}
113113
});
114114
}

src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp

+6-4
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,13 @@ void ExperimentalDetectronPriorGridGenerator::execute(const dnnl::stream& strm)
6565
const int layer_width = grid_w_ ? grid_w_ : getParentEdgeAt(INPUT_FEATUREMAP)->getMemory().getStaticDims()[3];
6666
const int layer_height = grid_h_ ? grid_h_ : getParentEdgeAt(INPUT_FEATUREMAP)->getMemory().getStaticDims()[2];
6767
const float step_w =
68-
stride_w_ ? stride_w_
69-
: static_cast<float>(getParentEdgeAt(INPUT_IMAGE)->getMemory().getStaticDims()[3]) / layer_width;
68+
(stride_w_ != 0.0f)
69+
? stride_w_
70+
: static_cast<float>(getParentEdgeAt(INPUT_IMAGE)->getMemory().getStaticDims()[3]) / layer_width;
7071
const float step_h =
71-
stride_h_ ? stride_h_
72-
: static_cast<float>(getParentEdgeAt(INPUT_IMAGE)->getMemory().getStaticDims()[2]) / layer_height;
72+
(stride_h_ != 0.0f)
73+
? stride_h_
74+
: static_cast<float>(getParentEdgeAt(INPUT_IMAGE)->getMemory().getStaticDims()[2]) / layer_height;
7375

7476
const auto* bottom_data_0 = getSrcDataAtPortAs<const float>(0);
7577
auto* top_data_0 = getDstDataAtPortAs<float>(OUTPUT_ROIS);

src/plugins/intel_cpu/src/nodes/fullyconnected.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ void FullyConnected::initSupportedPrimitiveDescriptors() {
557557
nodeConfig.inConfs.resize(srcDescs.size());
558558

559559
for (const auto& desc : nodeDescriptors) {
560-
if (m_atoi.count(desc.first)) {
560+
if (m_atoi.count(desc.first) != 0u) {
561561
nodeConfig.inConfs[m_atoi[desc.first]] = desc.second;
562562
}
563563
}
@@ -601,23 +601,23 @@ void FullyConnected::needSplitMemoryForTensorParallel() {
601601
memory[ARG_DST] = getDstMemoryAtPort(0);
602602
tp_cfg.cached_dst = split_horizontal(context->getEngine(), dst, -1, tp_cfg.w_rank, tp_cfg.w_size, false);
603603

604-
if (memory.count(ARG_DST | ARG_ATTR_SCALES)) {
604+
if (memory.count(ARG_DST | ARG_ATTR_SCALES) != 0u) {
605605
memory[ARG_DST | ARG_ATTR_SCALES] = split_horizontal(context->getEngine(),
606606
memory[ARG_DST | ARG_ATTR_SCALES],
607607
0,
608608
tp_cfg.w_rank,
609609
tp_cfg.w_size);
610610
}
611611

612-
if (memory.count(ARG_WEI | ARG_ATTR_SCALES)) {
612+
if (memory.count(ARG_WEI | ARG_ATTR_SCALES) != 0u) {
613613
auto scale_mem = std::const_pointer_cast<IMemory>(memory[ARG_WEI | ARG_ATTR_SCALES]);
614614
memory[ARG_WEI | ARG_ATTR_SCALES] =
615615
attrs.weightsNonTransposed
616616
? split_vertical(context->getEngine(), scale_mem, 0, tp_cfg.w_rank, tp_cfg.w_size)
617617
: split_horizontal(context->getEngine(), scale_mem, 0, tp_cfg.w_rank, tp_cfg.w_size);
618618
}
619619

620-
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) {
620+
if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS) != 0u) {
621621
auto zeropoint_mem = std::const_pointer_cast<IMemory>(memory[ARG_WEI | ARG_ATTR_ZERO_POINTS]);
622622
auto element_num = zeropoint_mem->getSize() / zeropoint_mem->getPrecision().size();
623623
if (element_num == 1) {

src/plugins/intel_cpu/src/nodes/generate_proposals.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ void refine_anchors(const float* deltas,
110110
proposals[p_idx + 2] = x1;
111111
proposals[p_idx + 3] = y1;
112112
proposals[p_idx + 4] = score;
113-
proposals[p_idx + 5] = (min_box_W <= box_w) * (min_box_H <= box_h) * 1.0;
113+
proposals[p_idx + 5] = static_cast<int>(min_box_W <= box_w) * static_cast<int>(min_box_H <= box_h) * 1.0;
114114
}
115115
});
116116
}

0 commit comments

Comments
 (0)