From 683cfc2b0b061eabef040b99f1f35d839f39f6c6 Mon Sep 17 00:00:00 2001 From: Tommy Chiang Date: Wed, 12 Feb 2025 08:36:13 +0000 Subject: [PATCH] [core] Remove custom ov::optional As CPP17 is the default standard of OpenVINO, there is no need to use the custom ov::optional struct anymore. Note that the `bool` operator of std::optional is marked as `explicit` while the original implementation is not. --- .../symbol_optimization.cpp | 4 +- .../dev_api/openvino/core/tensor_util.hpp | 9 +- .../convolution_backprop_shape_inference.hpp | 4 +- .../include/fft_common_validation.hpp | 4 +- ...p_convolution_backprop_shape_inference.hpp | 4 +- .../include/interpolate_shape_inference.hpp | 8 +- .../shape_inference/include/ov_optional.hpp | 139 ------------------ .../include/range_shape_inference.hpp | 2 +- .../include/segment_max_shape_inference.hpp | 3 +- .../include/strided_slice_shape_inference.hpp | 5 +- src/core/shape_inference/include/utils.hpp | 42 +++--- src/plugins/intel_cpu/src/graph_optimizer.cpp | 17 ++- src/plugins/intel_cpu/src/memory_control.cpp | 1 - .../executors/acl/acl_fullyconnected.cpp | 1 - .../acl/acl_fullyconnected_utils.cpp | 27 ++-- .../acl/acl_fullyconnected_utils.hpp | 16 +- .../executors/executor_implementation.hpp | 6 +- .../fullyconnected_implementations.cpp | 28 ++-- .../src/nodes/executors/variable_executor.hpp | 2 +- src/plugins/intel_cpu/src/nodes/memory.cpp | 17 ++- src/plugins/intel_cpu/src/nodes/memory.hpp | 18 +-- .../src/shape_inference/shape_inference.cpp | 33 +++-- .../src/shape_inference/shape_inference.hpp | 7 +- 23 files changed, 137 insertions(+), 260 deletions(-) delete mode 100644 src/core/shape_inference/include/ov_optional.hpp diff --git a/src/common/transformations/src/transformations/symbolic_transformations/symbol_optimization.cpp b/src/common/transformations/src/transformations/symbolic_transformations/symbol_optimization.cpp index 708100273942a0..857e6a667bacfa 100644 --- a/src/common/transformations/src/transformations/symbolic_transformations/symbol_optimization.cpp +++ b/src/common/transformations/src/transformations/symbolic_transformations/symbol_optimization.cpp @@ -4,6 +4,8 @@ #include "transformations/symbolic_transformations/symbol_optimization.hpp" +#include + #include "itt.hpp" #include "openvino/core/bound_evaluation_util.hpp" #include "openvino/core/rt_info.hpp" @@ -379,7 +381,7 @@ struct OutputValue { }); } - static ov::optional make(const ov::Output& output) { + static std::optional make(const ov::Output& output) { auto symbols = output.get_tensor().get_value_symbol(); if (symbols.empty() || symbols.size() == 1) return {}; diff --git a/src/core/dev_api/openvino/core/tensor_util.hpp b/src/core/dev_api/openvino/core/tensor_util.hpp index 9d3b9d0b985206..6edf2c3c4cbd07 100644 --- a/src/core/dev_api/openvino/core/tensor_util.hpp +++ b/src/core/dev_api/openvino/core/tensor_util.hpp @@ -4,8 +4,9 @@ #pragma once +#include + #include "openvino/op/constant.hpp" -#include "ov_optional.hpp" namespace ov { namespace util { @@ -15,7 +16,7 @@ template OPENVINO_API Tensor greater_equal(const ov::Tensor& lhs, const T& element); OPENVINO_API bool reduce_and(const ov::Tensor& t); template -OPENVINO_API ov::optional> to_vector(const ov::Tensor& t); +OPENVINO_API std::optional> to_vector(const ov::Tensor& t); template Tensor make_tensor_of_value(const element::Type_t& et, const T& value, Shape shape = {}) { @@ -34,8 +35,8 @@ Tensor greater_equal(const ov::Tensor& lhs, const T& element) { } template -ov::optional> to_vector(const ov::Tensor& t) { - ov::optional> result; +std::optional> to_vector(const ov::Tensor& t) { + std::optional> result; if (t) result = ov::op::v0::Constant(t).cast_vector(); return result; diff --git a/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp b/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp index d6f81f0f5ff28f..1420f53fbab606 100644 --- a/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp +++ b/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp @@ -3,6 +3,8 @@ // #pragma once +#include + #include "convolution_backprop_shape_inference_util.hpp" #include "convolution_shape_inference_util.hpp" #include "openvino/op/convolution.hpp" @@ -22,7 +24,7 @@ std::vector shape_infer(const ConvolutionBackpropData* op, NODE_VALIDATION_CHECK(op, inputs_count >= 2); using namespace ov::util; - ov::optional out_spatial_shape; + std::optional out_spatial_shape; if (has_spatial_shape) { const auto& spatial_shape = input_shapes[2]; NODE_VALIDATION_CHECK(op, diff --git a/src/core/shape_inference/include/fft_common_validation.hpp b/src/core/shape_inference/include/fft_common_validation.hpp index 9e2d5683833ed2..4fcf4603c78b58 100644 --- a/src/core/shape_inference/include/fft_common_validation.hpp +++ b/src/core/shape_inference/include/fft_common_validation.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "openvino/core/axis_vector.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/op/util/fft_base.hpp" @@ -99,7 +101,7 @@ void validate_signal_size(const ov::op::util::FFTBase* op, template void shape_validation(const ov::op::util::FFTBase* op, const std::vector& input_shapes, - ov::optional>& axes, + std::optional>& axes, FFTKind fft_kind) { const auto& input_shape = input_shapes[0]; const auto& axes_shape = input_shapes[1]; diff --git a/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp b/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp index 2f180e2b41c4a7..ce81bc043e331d 100644 --- a/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp +++ b/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp @@ -3,6 +3,8 @@ // #pragma once +#include + #include "convolution_backprop_shape_inference.hpp" #include "openvino/op/group_conv.hpp" #include "utils.hpp" @@ -33,7 +35,7 @@ std::vector shape_infer(const GroupConvolutionBackpropData* op, NODE_VALIDATION_CHECK(op, inputs_count >= 2); using namespace ov::util; - ov::optional out_spatial_shape; + std::optional out_spatial_shape; if (has_spatial_shape) { const auto& spatial_shape = input_shapes[2]; NODE_VALIDATION_CHECK(op, diff --git a/src/core/shape_inference/include/interpolate_shape_inference.hpp b/src/core/shape_inference/include/interpolate_shape_inference.hpp index 650bb8574d6a0b..848bc8916ff998 100644 --- a/src/core/shape_inference/include/interpolate_shape_inference.hpp +++ b/src/core/shape_inference/include/interpolate_shape_inference.hpp @@ -5,10 +5,10 @@ #pragma once #include -#include +#include #include "dimension_util.hpp" -#include "pooling_shape_inference_util.hpp" +#include "openvino/op/interpolate.hpp" #include "utils.hpp" namespace ov { @@ -153,8 +153,8 @@ TRShape make_padded_shape(const TShape& input, TInputIter pads_begin, TInputIter * @return Not null pointer with axes values or null pointer if can't get axes from input. */ template > -ov::optional get_axes(const Node* const op, size_t port, bool has_axes, size_t rank, const ITensorAccessor& ta) { - ov::optional axes; +std::optional get_axes(const Node* const op, size_t port, bool has_axes, size_t rank, const ITensorAccessor& ta) { + std::optional axes; if (has_axes) { using TAxis = typename TRes::value_type; axes = std::move(get_input_const_data_as(op, port, ta)); diff --git a/src/core/shape_inference/include/ov_optional.hpp b/src/core/shape_inference/include/ov_optional.hpp deleted file mode 100644 index 15973ae0c8a5f8..00000000000000 --- a/src/core/shape_inference/include/ov_optional.hpp +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -#ifdef OPENVINO_CPP_17_VER -using optional = std::optional; -#else - -/** - * @brief Store optional object of type T (basic version of std::optional). - * - * @note If cpp17 used this class should be replaced by std::optional. - * - * @tparam T Type of stored object. - */ -template -class optional { -public: - constexpr optional() noexcept = default; - template - constexpr optional(Args&&... args) : m_has_value{true}, - m_opt(std::forward(args)...) {} - - optional(const optional& other) : m_has_value{other.m_has_value}, m_opt{} { - if (other.m_has_value) { - create(*other); - } - } - - optional(optional&& other) noexcept : m_has_value{other.m_has_value}, m_opt{} { - if (other.m_has_value) { - create(std::move(*other)); - } - } - - ~optional() { - reset(); - } - - optional& operator=(const optional& other) { - if (other) { - *this = *other; - } else { - reset(); - } - return *this; - } - - optional& operator=(optional&& other) noexcept { - if (other) { - *this = std::move(*other); - } else { - reset(); - } - return *this; - } - - template - optional& operator=(U&& value) { - if (m_has_value) { - m_opt.m_value = std::forward(value); - } else { - emplace(std::forward(value)); - } - return *this; - } - - constexpr operator bool() const { - return m_has_value; - } - - constexpr const T& operator*() const& noexcept { - return m_opt.m_value; - } - - T& operator*() & noexcept { - return m_opt.m_value; - } - - constexpr const T&& operator*() const&& noexcept { - return m_opt.m_value; - } - - T&& operator*() && noexcept { - return std::move(m_opt.m_value); - } - - constexpr const T* operator->() const noexcept { - return &m_opt.m_value; - } - - T* operator->() noexcept { - return &m_opt.m_value; - } - - void reset() { - if (m_has_value) { - m_opt.m_value.T::~T(); - m_has_value = false; - } - } - - template - void emplace(Args&&... args) { - create(std::forward(args)...); - m_has_value = true; - } - -private: - template - void create(Args&&... args) { - new (std::addressof(m_opt)) T(std::forward(args)...); - } - - struct Empty {}; - - template - union Storage { - Empty m_empty; - T m_value; - - constexpr Storage() noexcept : m_empty{} {} - constexpr Storage(uint8_t) noexcept : Storage{} {} - - template - constexpr Storage(Args&&... args) : m_value(std::forward(args)...) {} - ~Storage() {} - }; - - bool m_has_value = false; - Storage m_opt{}; -}; -#endif -} // namespace ov diff --git a/src/core/shape_inference/include/range_shape_inference.hpp b/src/core/shape_inference/include/range_shape_inference.hpp index 3662ace10ee240..574271f86365a6 100644 --- a/src/core/shape_inference/include/range_shape_inference.hpp +++ b/src/core/shape_inference/include/range_shape_inference.hpp @@ -102,7 +102,7 @@ std::vector range_shape_infer(const Node* op, output_shapes[0] = TRShape{static_cast(strided)}; } else { - symbol_propagation(op, output_shapes, start, step, start_val, step_val); + symbol_propagation(op, output_shapes, start, step, start_val.has_value(), step_val.has_value()); } return output_shapes; } diff --git a/src/core/shape_inference/include/segment_max_shape_inference.hpp b/src/core/shape_inference/include/segment_max_shape_inference.hpp index 3c365682b11f10..57d0815523d167 100644 --- a/src/core/shape_inference/include/segment_max_shape_inference.hpp +++ b/src/core/shape_inference/include/segment_max_shape_inference.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "openvino/op/segment_max.hpp" #include "utils.hpp" @@ -50,7 +51,7 @@ std::vector shape_infer(const SegmentMax* op, // validate num_segments input const auto num_segments_available = op->inputs().size() == 3; - ov::optional num_segments; + std::optional num_segments; if (num_segments_available) { num_segments = get_input_const_data_as_shape(op, 2, tensor_accessor); } diff --git a/src/core/shape_inference/include/strided_slice_shape_inference.hpp b/src/core/shape_inference/include/strided_slice_shape_inference.hpp index 2e57e23f14070e..1939aa101995a3 100644 --- a/src/core/shape_inference/include/strided_slice_shape_inference.hpp +++ b/src/core/shape_inference/include/strided_slice_shape_inference.hpp @@ -5,8 +5,9 @@ #pragma once #include -#include +#include +#include "openvino/op/strided_slice.hpp" #include "slice_shape_inference_utils.hpp" #include "utils.hpp" @@ -61,7 +62,7 @@ std::vector shape_infer(const StridedSlice* op, const auto begin = get_input_bounds(op, 1, ta); const auto end = get_input_bounds(op, 2, ta); - ov::optional> strides; + std::optional> strides; if (input_shapes.size() > 3) { strides = get_input_const_data_as(op, 3, ta); } else if (begin) { diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index 7d28309c489361..437c367f2222bf 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -4,13 +4,13 @@ #pragma once #include +#include #include #include "element_visitor.hpp" #include "openvino/core/bound_evaluation_util.hpp" #include "openvino/core/validation_util.hpp" #include "openvino/opsets/opset1.hpp" -#include "ov_optional.hpp" #include "shape_infer_type_utils.hpp" #include "tensor_data_accessor.hpp" @@ -204,10 +204,10 @@ template , class UnaryOperation = ov::util::Cast, typename std::enable_if::value>::type* = nullptr> -ov::optional get_input_const_data_as(const ov::Node* op, - size_t idx, - const ITensorAccessor& tensor_accessor, - UnaryOperation&& func = ov::util::Cast()) { +std::optional get_input_const_data_as(const ov::Node* op, + size_t idx, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::Cast()) { if (auto t = tensor_accessor(idx)) { return {get_tensor_data_as(t, std::forward(func))}; } else { @@ -245,10 +245,10 @@ template , class UnaryOperation = ov::util::Cast, typename std::enable_if::value>::type* = nullptr> -ov::optional get_input_const_data_as(const ov::Node* op, - size_t idx, - const ITensorAccessor& tensor_accessor, - UnaryOperation&& func = ov::util::Cast()) { +std::optional get_input_const_data_as(const ov::Node* op, + size_t idx, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::Cast()) { if (auto t = tensor_accessor(idx)) { return {get_tensor_data_as(t, std::forward(func))}; } else if (const auto& constant = @@ -285,11 +285,11 @@ template , typename std::enable_if::value>::type* = nullptr> -ov::optional get_input_const_data_as_shape(const ov::Node* op, - size_t port, - const ITensorAccessor& tensor_accessor, - UnaryOperation&& func = ov::util::InTypeRange()) { - auto shape = ov::optional(); +std::optional get_input_const_data_as_shape(const ov::Node* op, + size_t port, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::InTypeRange()) { + auto shape = std::optional(); if (auto s = get_input_const_data_as(op, port, tensor_accessor, @@ -303,11 +303,11 @@ template , typename std::enable_if::value>::type* = nullptr> -ov::optional get_input_const_data_as_shape(const ov::Node* op, - size_t port, - const ITensorAccessor& tensor_accessor, - UnaryOperation&& func = ov::util::InTypeRange()) { - auto shape = ov::optional(); +std::optional get_input_const_data_as_shape(const ov::Node* op, + size_t port, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::InTypeRange()) { + auto shape = std::optional(); if (auto t = tensor_accessor(port)) { shape.emplace(get_tensor_data_as(t, std::forward(func))); } else if (port < op->get_input_size()) { @@ -349,7 +349,7 @@ inline element::Type get_input_const_element_type(const ov::Node* const op, size * \return Return optional vector of bounds as pair lower, upper when evaluated successful. */ template >> -ov::optional get_input_bounds(const ov::Node* op, size_t port, const ITensorAccessor& ta) { +std::optional get_input_bounds(const ov::Node* op, size_t port, const ITensorAccessor& ta) { const auto make_bound = [](element::Type_t et) { return [et](TData lb, TData ub) -> typename TResult::value_type { return {element::get_value_or_limit_of(et, lb), element::get_value_or_limit_of(et, ub)}; @@ -357,7 +357,7 @@ ov::optional get_input_bounds(const ov::Node* op, size_t port, const IT }; constexpr auto cast = ov::util::Cast(); - ov::optional out; + std::optional out; if (const auto t = ta(port)) { const auto& et = t.get_element_type(); diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index 4be85596657a16..75d49bb3779fb8 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -3136,11 +3137,11 @@ void GraphOptimizer::MatchSdpaKvCache(Graph& graph) { auto memInputNode = std::dynamic_pointer_cast(node); OPENVINO_ASSERT(memInputNode, "MemoryInput node ", node->getName(), " has unexpected dynamic type"); - ov::optional> inputShapes; - ov::optional> inputPrcs; + std::optional> inputShapes; + std::optional> inputPrcs; if (!node->getParentEdges().empty()) { - inputShapes = ov::optional>(std::vector{}); - inputPrcs = ov::optional>(std::vector{}); + inputShapes = std::optional>(std::vector{}); + inputPrcs = std::optional>(std::vector{}); auto& input_shape_vec = *inputShapes; auto& input_prc_vec = *inputPrcs; @@ -3272,11 +3273,11 @@ void GraphOptimizer::DropRedundantMemoryOutput(Graph& graph) { auto memInputNode = std::dynamic_pointer_cast(node); OPENVINO_ASSERT(memInputNode, "MemoryInput node ", node->getName(), " has unexpected dynamic type"); - ov::optional> inputShapes; - ov::optional> inputPrcs; + std::optional> inputShapes; + std::optional> inputPrcs; if (!node->getParentEdges().empty()) { - inputShapes = ov::optional>(std::vector{}); - inputPrcs = ov::optional>(std::vector{}); + inputShapes = std::optional>(std::vector{}); + inputPrcs = std::optional>(std::vector{}); auto& input_shape_vec = *inputShapes; auto& input_prc_vec = *inputPrcs; diff --git a/src/plugins/intel_cpu/src/memory_control.cpp b/src/plugins/intel_cpu/src/memory_control.cpp index decde90a0e6eb9..20e07ae72432ba 100644 --- a/src/plugins/intel_cpu/src/memory_control.cpp +++ b/src/plugins/intel_cpu/src/memory_control.cpp @@ -6,7 +6,6 @@ #include #include -#include #include #include "openvino/runtime/memory_solver.hpp" diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp index 1c4b963026915f..3010a7cff9f647 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp @@ -17,7 +17,6 @@ #include "nodes/executors/executor.hpp" #include "nodes/executors/implementation_utils.hpp" #include "nodes/executors/memory_arguments.hpp" -#include "ov_optional.hpp" #include "utils/cpu_utils.hpp" #include "utils/debug_capabilities.h" diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp index 16a197a9ebcf5b..e4f33731f1fadf 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp @@ -1,11 +1,12 @@ // Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include #include "acl_fullyconnected.hpp" #include "acl_utils.hpp" +#include "common/primitive_desc_iface.hpp" +#include "cpu/acl/acl_utils.hpp" #include "memory_desc/cpu_memory_desc_utils.h" #include "nodes/common/cpu_convert.h" #include "nodes/common/cpu_memcpy.h" @@ -61,9 +62,9 @@ DnnlMemoryDescPtr acl_fc_executor::makeTransposedWeightDescriptor(const DnnlMemo return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); } -ov::optional acl_fc_executor::convertWeightPrecision(const MemoryPtr& input, - const MemoryPtr& output, - ov::element::Type weightPrecision) { +std::optional acl_fc_executor::convertWeightPrecision(const MemoryPtr& input, + const MemoryPtr& output, + ov::element::Type weightPrecision) { MemoryArgs memoryArgs; memoryArgs[ARG_SRC] = input; memoryArgs[ARG_DST] = output; @@ -71,7 +72,7 @@ ov::optional acl_fc_executor::convertWeightPrecision(const MemoryPtr& auto aclWeightsConverter = std::make_shared(); if (aclWeightsConverter->update(memoryArgs)) { aclWeightsConverter->execute(memoryArgs); - return ov::optional(memoryArgs.at(ARG_DST)); + return std::optional(memoryArgs.at(ARG_DST)); } if (!node::Convert::isSupportedDesc(input->getDesc()) || !node::Convert::isSupportedDesc(output->getDesc())) { @@ -87,14 +88,14 @@ ov::optional acl_fc_executor::convertWeightPrecision(const MemoryPtr& weightPrecision, input->getSize() / input->getDesc().getPrecision().size()); - return ov::optional(std::make_shared(output->getPrimitive().get_engine(), - output->getDesc().cloneWithNewPrecision(weightPrecision), - tmpBuff.data())); + return std::optional(std::make_shared(output->getPrimitive().get_engine(), + output->getDesc().cloneWithNewPrecision(weightPrecision), + tmpBuff.data())); } -ov::optional acl_fc_executor::reorderDataFallback(const MemoryPtr& input, - const MemoryPtr& output, - const ExecutorContext::CPtr& context) { +std::optional acl_fc_executor::reorderDataFallback(const MemoryPtr& input, + const MemoryPtr& output, + const ExecutorContext::CPtr& context) { if (output->getDataType() == input->getDataType()) { return {}; } @@ -119,7 +120,7 @@ ov::optional acl_fc_executor::reorderDataFallback(const MemoryPtr& in reorderWithoutConvert.execute( loc_stream, {{DNNL_ARG_FROM, convertOutput->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); - return ov::optional(output); + return std::optional(output); } } return {}; diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp index d2917fd132f991..1aa8860be25998 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once + +#include + #include "acl_common_executor.hpp" #include "nodes/executors/fullyconnected_config.hpp" -#include "ov_optional.hpp" namespace ov::intel_cpu { @@ -23,13 +25,13 @@ VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDims& wSha DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, const DnnlMemoryDescPtr& dstDesc); -ov::optional convertWeightPrecision(const MemoryPtr& input, - const MemoryPtr& output, - ov::element::Type weightPrecision); +std::optional convertWeightPrecision(const MemoryPtr& input, + const MemoryPtr& output, + ov::element::Type weightPrecision); -ov::optional reorderDataFallback(const MemoryPtr& input, - const MemoryPtr& output, - const ExecutorContext::CPtr& context); +std::optional reorderDataFallback(const MemoryPtr& input, + const MemoryPtr& output, + const ExecutorContext::CPtr& context); MemoryPtr reorderData(const DnnlMemoryDescPtr& srcWeightDesc, const DnnlMemoryDescPtr& dstWeightDesc, diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp index a8198eebc9fc2d..d2a3eb71ee0c9f 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp @@ -6,10 +6,10 @@ #include #include +#include #include "nodes/executors/executor.hpp" #include "nodes/executors/executor_config.hpp" -#include "ov_optional.hpp" namespace ov::intel_cpu { @@ -19,7 +19,7 @@ class ExecutorImplementation { public: using SupportsPredicate = std::function&)>; using RequiresFallbackPredicate = - std::function>(const executor::Config&)>; + std::function>(const executor::Config&)>; using AcceptsShapePredicate = std::function; using CreateFunction = std::function> requiresFallback(const executor::Config& config) const { + std::optional> requiresFallback(const executor::Config& config) const { if (m_requiresFallback) { return m_requiresFallback(config); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 54a2fe9d4d45b3..01a12c44599131 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -3,6 +3,7 @@ // #include +#include #include #include "cpu/x64/cpu_isa_traits.hpp" @@ -26,7 +27,6 @@ #include "nodes/executors/precision_translation.hpp" #include "nodes/executors/type_mask.hpp" #include "openvino/core/type/element_type.hpp" -#include "ov_optional.hpp" #include "utils/cpp/maybe_unused.hpp" #include "utils/debug_capabilities.h" @@ -196,10 +196,10 @@ static MemoryDescArgs createOptimalDescriptors(const MemoryDescArgs& currentDesc } template -ov::optional> requiresFallbackCommon(const executor::Config& config, - const TypeMapping& typeMapping, - const LayoutConfig& layoutConfig, - const MappingNotation& notation) { +std::optional> requiresFallbackCommon(const executor::Config& config, + const TypeMapping& typeMapping, + const LayoutConfig& layoutConfig, + const MappingNotation& notation) { const auto typeConfig = getTypeConfiguration(config.descs, typeMapping, notation); if (fullyMatchConfiguration(config.descs, typeConfig, layoutConfig, notation)) { @@ -208,7 +208,7 @@ ov::optional> requiresFallbackCommon(const executor::Con const auto optimalDescriptors = createOptimalDescriptors(config.descs, typeConfig, layoutConfig, notation); - return ov::optional>(FCConfig{optimalDescriptors, config.attrs, config.postOps}); + return std::optional>(FCConfig{optimalDescriptors, config.attrs, config.postOps}); } OV_CPU_MAYBE_UNUSED_FUNCTION static inline bool noWeightsDecompression(const FCConfig& config) { @@ -245,7 +245,7 @@ const std::vector>& getImplementations() { return MlasGemmExecutor::supports(config); }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { // @todo Implement proper handling for the cases when fallback is not expected // throwing exception is not an option, since requiresFallback is used in two contexts: // 1) getting proper memory descriptors configuration @@ -294,7 +294,7 @@ const std::vector>& getImplementations() { return true; }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { // @todo use dnnlConvolutionLayoutConfig after one is implemented return requiresFallbackCommon(config, dnnlConvolutionTypeMapping, @@ -372,7 +372,7 @@ const std::vector>& getImplementations() { return ACLFullyConnectedExecutor::supports(config); }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return requiresFallbackCommon(config, aclFCTypeMapping, aclFCLayoutConfig, @@ -402,7 +402,7 @@ const std::vector>& getImplementations() { return ACLLowpFullyConnectedExecutor::supports(config); }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return requiresFallbackCommon(config, aclLowpFCTypeMapping, aclFCLayoutConfig, @@ -441,7 +441,7 @@ const std::vector>& getImplementations() { return MatMulKleidiAIExecutor::supports(config); }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return {}; }, // acceptsShapes @@ -467,7 +467,7 @@ const std::vector>& getImplementations() { return ShlFCExecutor::supports(config); }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return {}; }, // acceptsShapes @@ -498,7 +498,7 @@ const std::vector>& getImplementations() { return false; }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return requiresFallbackCommon(config, dnnlMatMulTypeMapping, dnnlFCLayoutConfig, @@ -549,7 +549,7 @@ const std::vector>& getImplementations() { return true; }, // requiresFallback - [](const FCConfig& config) -> ov::optional> { + [](const FCConfig& config) -> std::optional> { return requiresFallbackCommon(config, dnnlFCTypeMapping, dnnlFCLayoutConfig, diff --git a/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp index dd797f1c02f606..4b99d5d67dfd9a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp @@ -80,7 +80,7 @@ class VariableExecutor : public Executor { suitableImplementations.end(), implementationRequiresFallback.begin(), [&config](const ExecutorImplementationRef& impl) { - return impl.get().requiresFallback(config); + return impl.get().requiresFallback(config).has_value(); }); return implementationRequiresFallback; diff --git a/src/plugins/intel_cpu/src/nodes/memory.cpp b/src/plugins/intel_cpu/src/nodes/memory.cpp index bc7efc97bcc44c..d3a3ce679b5772 100644 --- a/src/plugins/intel_cpu/src/nodes/memory.cpp +++ b/src/plugins/intel_cpu/src/nodes/memory.cpp @@ -4,6 +4,7 @@ #include "memory.hpp" +#include #include #include @@ -405,8 +406,8 @@ MemoryInputBase::MemoryInputBase(const std::string& id, const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, MemoryInputBase::mode mode) : Input(output_shape, output_prc, name, type, context), MemoryStateNode(id) { @@ -596,8 +597,8 @@ MemoryInput::MemoryInput(const std::string& id, const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, std::shared_ptr func, mode mode) : MemoryInputBase::MemoryInputBase(id, name, type, output_shape, output_prc, context, input_shape, input_prc, mode), @@ -917,8 +918,8 @@ MemoryInputSDPA::MemoryInputSDPA(const std::string& id, const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, const std::shared_ptr& sdpaNode) : MemoryInputBase(id, name, type, output_shape, output_prc, context, input_shape, input_prc), m_sdpaNode(sdpaNode) {} @@ -1024,8 +1025,8 @@ MemoryInputSingle::MemoryInputSingle(const std::string& id, const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, std::shared_ptr func) : MemoryInput(id, name, diff --git a/src/plugins/intel_cpu/src/nodes/memory.hpp b/src/plugins/intel_cpu/src/nodes/memory.hpp index 97cbd6af096914..bd800e065fdd96 100644 --- a/src/plugins/intel_cpu/src/nodes/memory.hpp +++ b/src/plugins/intel_cpu/src/nodes/memory.hpp @@ -7,10 +7,10 @@ #include #include +#include #include "input.h" #include "memory_state_base.h" -#include "ov_optional.hpp" #include "proxy_mem_blk.h" namespace ov::intel_cpu::node { @@ -165,8 +165,8 @@ class MemoryInputBase : public Input, public MemoryStateNode { const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, mode mode = mode::read_value_assign); protected: @@ -202,8 +202,8 @@ class MemoryInput : public MemoryInputBase { const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, std::shared_ptr func = nullptr, mode mode = mode::read_value_assign); @@ -250,8 +250,8 @@ class MemoryInputSingle : public MemoryInput { const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, std::shared_ptr func); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -271,8 +271,8 @@ class MemoryInputSDPA : public MemoryInputBase { const Shape& output_shape, const ov::element::Type& output_prc, const GraphContext::CPtr& context, - const ov::optional>& input_shape, - const ov::optional>& input_prc, + const std::optional>& input_shape, + const std::optional>& input_prc, const std::shared_ptr& sdpaNode); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index 0274b0e5d0be4e..a47f48395fa9de 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include "adaptive_avg_pool_shape_inference.hpp" #include "adaptive_max_pool_shape_inference.hpp" @@ -147,8 +148,8 @@ class ShapeInferBase : public IStaticShapeInfer { } } - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor&) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { NODE_VALIDATION_CHECK(m_node.get(), input_shapes.size() > 0, "Incorrect number of input shapes"); return {std::vector{input_shapes[0]}}; } @@ -207,8 +208,8 @@ class ShapeInferCopy : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor&) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { return {op::copy_shape_infer(m_node.get(), input_shapes)}; } }; @@ -220,8 +221,8 @@ class ShapeInferEltwise : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor&) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { return {op::eltwise_shape_infer(m_node.get(), input_shapes)}; } }; @@ -233,8 +234,8 @@ class ShapeInferFallback : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { const auto op = m_node.get(); std::shared_ptr local_op; @@ -277,8 +278,8 @@ class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { return {shape_infer(static_cast(m_node.get()), input_shapes, tensor_accessor)}; } @@ -299,8 +300,8 @@ class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor&) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { return {shape_infer(static_cast(m_node.get()), input_shapes)}; } }; @@ -333,8 +334,8 @@ class ShapeInferPaddingTA : public ShapeInferPaddingBase { public: using ShapeInferPaddingBase::ShapeInferPaddingBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { return {shape_infer(static_cast(m_node.get()), input_shapes, m_pads_begin, m_pads_end, tensor_accessor)}; } @@ -354,8 +355,8 @@ class ShapeInferPaddingTA : public ShapeInferPaddingBase { public: using ShapeInferPaddingBase::ShapeInferPaddingBase; - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor&) override { + std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { return {shape_infer(static_cast(m_node.get()), input_shapes, m_pads_begin, m_pads_end)}; } }; diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index b046f5dacdb36a..198119578aecd0 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -4,9 +4,10 @@ #pragma once +#include + #include "openvino/core/core.hpp" #include "openvino/core/node.hpp" -#include "ov_optional.hpp" #include "shape_inference/shape_inference_cpu.hpp" #include "shape_inference/static_shape.hpp" #include "tensor_data_accessor.hpp" @@ -24,8 +25,8 @@ class IStaticShapeInfer : public IShapeInfer { * @param tensor_accessor Accessor to CPU constant data specific for operator. * @return Optionally return vector of static shape adapters holding CPU dimensions. */ - virtual ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) = 0; + virtual std::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) = 0; virtual const std::vector& get_input_ranks() = 0; };