From e45cda628965712cce316f7fdbfa264ba2773654 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sun, 20 Mar 2022 13:34:05 +0300 Subject: [PATCH] Try to fix API reference for new API --- docs/api/api_reference.rst | 5 ++- .../include/transformations_visibility.hpp | 1 - .../include/openvino/core/core_visibility.hpp | 23 ++++++++++ src/core/include/openvino/core/model.hpp | 6 ++- src/inference/include/ie/ie_version.hpp | 1 - .../include/openvino/runtime/common.hpp | 1 + .../openvino/runtime/compiled_model.hpp | 1 + .../include/openvino/runtime/core.hpp | 1 + .../include/openvino/runtime/exception.hpp | 7 ++- .../openvino/runtime/infer_request.hpp | 1 + .../openvino/runtime/intel_gna/properties.hpp | 19 ++++++++ .../openvino/runtime/intel_gpu/ocl/dx.hpp | 5 ++- .../openvino/runtime/intel_gpu/ocl/ocl.hpp | 12 +++++ .../openvino/runtime/intel_gpu/ocl/va.hpp | 2 + .../openvino/runtime/intel_gpu/properties.hpp | 45 +++++++++++++++++-- .../runtime/intel_gpu/remote_properties.hpp | 12 +++++ .../openvino/runtime/profiling_info.hpp | 2 + .../include/openvino/runtime/properties.hpp | 2 + .../openvino/runtime/remote_context.hpp | 1 + .../openvino/runtime/remote_tensor.hpp | 3 +- .../openvino/runtime/variable_state.hpp | 1 + 21 files changed, 138 insertions(+), 13 deletions(-) diff --git a/docs/api/api_reference.rst b/docs/api/api_reference.rst index 7010d07c2830a6..5c34983b59c9e3 100644 --- a/docs/api/api_reference.rst +++ b/docs/api/api_reference.rst @@ -10,6 +10,7 @@ API references available: .. toctree:: :maxdepth: 2 - - ../groupie_cpp_api + + ../groupov_cpp_api + ../groupie_c_api ie_python_api/api diff --git a/src/common/transformations/include/transformations_visibility.hpp b/src/common/transformations/include/transformations_visibility.hpp index b75bc4b898a73d..a5445844ee643e 100644 --- a/src/common/transformations/include/transformations_visibility.hpp +++ b/src/common/transformations/include/transformations_visibility.hpp @@ -12,7 +12,6 @@ */ /** - * @ingroup ie_cpp_api * @defgroup ie_transformation_api Inference Engine Transformation API * @brief Defines Inference Engine Transformations API which is used to transform ngraph::Function * diff --git a/src/core/include/openvino/core/core_visibility.hpp b/src/core/include/openvino/core/core_visibility.hpp index 18154e50b138de..37313dc4068e00 100644 --- a/src/core/include/openvino/core/core_visibility.hpp +++ b/src/core/include/openvino/core/core_visibility.hpp @@ -11,6 +11,29 @@ // OPENVINO_API is used for the public API symbols. It either DLL imports or DLL exports // (or does nothing for static build) +/** + * @defgroup ov_cpp_api OpenVINO Runtime C++ API + * OpenVINO Runtime C++ API + * + * @defgroup ov_model_cpp_api OpenVINO Core C++ API to work with ov::Model + * @ingroup ov_cpp_api + * OpenVINO Core C++ API to work with ov::Model, dynamic and static shapes, types + * + * @defgroup ov_ops_cpp_api OpenVINO C++ API to create operations + * @ingroup ov_cpp_api + * OpenVINO C++ API to create operations from different opsets. Such API is used to + * creation models from code, write transformations and traverse the model graph + * + * @defgroup ov_opset_cpp_api OpenVINO C++ API to work with operation sets + * @ingroup ov_cpp_api + * OpenVINO C++ API to work with operation sets + * + * @defgroup ov_runtime_cpp_api OpenVINO Inference C++ API + * @ingroup ov_cpp_api + * OpenVINO Inference C++ API provides ov::Core, ov::CompiledModel, ov::InferRequest + * and ov::Tensor classes + */ + #ifdef _WIN32 # pragma warning(disable : 4251) # pragma warning(disable : 4275) diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index a5a357ad865db0..a97d7800ffe790 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -34,7 +34,11 @@ class FrontEnd; } class ModelAccessor; -/// A user-defined model. + +/** + * @brief A user-defined model + * @ingroup ov_model_cpp_api + */ class OPENVINO_API Model : public std::enable_shared_from_this { friend class frontend::FrontEnd; friend OPENVINO_API std::shared_ptr clone_model(const Model& func, diff --git a/src/inference/include/ie/ie_version.hpp b/src/inference/include/ie/ie_version.hpp index 3ea974baf1f913..a36031e58ef15d 100644 --- a/src/inference/include/ie/ie_version.hpp +++ b/src/inference/include/ie/ie_version.hpp @@ -27,7 +27,6 @@ #include "ie_api.h" /** - * @ingroup ie_cpp_api * @brief Inference Engine C++ API */ namespace InferenceEngine { diff --git a/src/inference/include/openvino/runtime/common.hpp b/src/inference/include/openvino/runtime/common.hpp index 772d1ed78639b7..4d5da824b38302 100644 --- a/src/inference/include/openvino/runtime/common.hpp +++ b/src/inference/include/openvino/runtime/common.hpp @@ -46,6 +46,7 @@ namespace ie = InferenceEngine; /** * @brief This type of map is used for result of Core::query_model + * @ingroup ov_runtime_cpp_api * - `key` means operation name * - `value` means device name supporting this operation */ diff --git a/src/inference/include/openvino/runtime/compiled_model.hpp b/src/inference/include/openvino/runtime/compiled_model.hpp index 5d81f69242c96c..4ee42d225317be 100644 --- a/src/inference/include/openvino/runtime/compiled_model.hpp +++ b/src/inference/include/openvino/runtime/compiled_model.hpp @@ -32,6 +32,7 @@ class InferRequest; /** * @brief This class represents a compiled model. + * @ingroup ov_runtime_cpp_api * A model is compiled by a specific device by applying multiple optimization * transformations, then mapping to compute kernels. */ diff --git a/src/inference/include/openvino/runtime/core.hpp b/src/inference/include/openvino/runtime/core.hpp index e98928f32a3244..c2f20af36ea3d6 100644 --- a/src/inference/include/openvino/runtime/core.hpp +++ b/src/inference/include/openvino/runtime/core.hpp @@ -34,6 +34,7 @@ namespace ov { /** * @brief This class represents an OpenVINO runtime Core entity. + * @ingroup ov_runtime_cpp_api * User applications can create several Core class instances, but in this case the underlying plugins * are created multiple times and not shared between several Core instances. The recommended way is to have * a single Core instance per application. diff --git a/src/inference/include/openvino/runtime/exception.hpp b/src/inference/include/openvino/runtime/exception.hpp index 1a1735eb8c33a7..4046dbcbd1b6c0 100644 --- a/src/inference/include/openvino/runtime/exception.hpp +++ b/src/inference/include/openvino/runtime/exception.hpp @@ -11,16 +11,19 @@ namespace ov { /** * @brief Thrown in case of cancelled asynchronous operation. + * @ingroup ov_runtime_cpp_api */ class OPENVINO_RUNTIME_API Cancelled : public Exception { using Exception::Exception; }; /** - * @brief Thrown in case of calling the InferRequest methods while the request is busy with compute operation. + * @brief Thrown in case of calling the InferRequest methods while the request is + * busy with compute operation. + * @ingroup ov_runtime_cpp_api */ class OPENVINO_RUNTIME_API Busy : public Exception { using Exception::Exception; }; -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/inference/include/openvino/runtime/infer_request.hpp b/src/inference/include/openvino/runtime/infer_request.hpp index 4c85e55dade9c7..3465ea1ef18a94 100644 --- a/src/inference/include/openvino/runtime/infer_request.hpp +++ b/src/inference/include/openvino/runtime/infer_request.hpp @@ -29,6 +29,7 @@ class CompiledModel; /** * @brief This is a class of infer request that can be run in asynchronous or synchronous manners. + * @ingroup ov_runtime_cpp_api */ class OPENVINO_RUNTIME_API InferRequest { std::shared_ptr _impl; diff --git a/src/inference/include/openvino/runtime/intel_gna/properties.hpp b/src/inference/include/openvino/runtime/intel_gna/properties.hpp index fc7d5d3599f1ac..52403d7b3ee969 100644 --- a/src/inference/include/openvino/runtime/intel_gna/properties.hpp +++ b/src/inference/include/openvino/runtime/intel_gna/properties.hpp @@ -14,20 +14,29 @@ namespace ov { +/** + * @defgroup ov_runtime_gna_prop_cpp_api Intel GNA specific properties + * @ingroup ov_runtime_cpp_api + * Set of Intel GNA specific properties. + */ + /** * @brief Namespace with Intel GNA specific properties + * @ingroup ov_runtime_gna_prop_cpp_api */ namespace intel_gna { /** * @brief Property to get an std::string of GNA Library version, usually in the form * ... + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property library_full_version{"GNA_LIBRARY_FULL_VERSION"}; /** * @brief Scale factor provided by the user to use static quantization. * This option should be used with floating point value serialized to string with . (dot) as a decimal separator + * @ingroup ov_runtime_gna_prop_cpp_api * @details In the case of multiple inputs, individual scale factors can be provided using the * map where key is layer name and value is scale factor * Example: @@ -45,11 +54,13 @@ static constexpr Property> scale_factors_per_input{ /** * @brief if turned on, dump GNA firmware model into specified file + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property firmware_model_image_path{"GNA_FIRMWARE_MODEL_IMAGE"}; /** * @brief Enum to define software acceleration mode + * @ingroup ov_runtime_gna_prop_cpp_api */ enum class ExecutionMode { AUTO = 0, //!< Uses Intel GNA if available, otherwise uses software execution mode on CPU. @@ -103,6 +114,7 @@ inline std::istream& operator>>(std::istream& is, ExecutionMode& execution_mode) /** * @brief Enum to define HW compile and execution targets + * @ingroup ov_runtime_gna_prop_cpp_api */ enum class HWGeneration { UNDEFINED = 0, //!< GNA HW generation is undefined @@ -143,6 +155,7 @@ inline std::istream& operator>>(std::istream& is, HWGeneration& hw_generation) { /** * @brief GNA proc_type setting that should be one of AUTO, HW, GNA_HW_WITH_SW_FBACK, * GNA_SW_EXACT or SW_FP32 + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property execution_mode{"GNA_DEVICE_MODE"}; @@ -153,22 +166,26 @@ static constexpr Property execution_mode{"GNA_DEVICE_MODE"}; * If HW is not present, use the option corresponding to the latest fully supported GNA HW generation. * A fully supported GNA HW generation means it must be supported by both the OV GNA Plugin and the core GNA Library. * Currently, the latest supported GNA HW generation corresponds to GNA_3_0. + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property execution_target{"GNA_HW_EXECUTION_TARGET"}; /** * @brief The option to override the GNA HW compile target. May be one of GNA_2_0, GNA_3_0. * By default the same as execution_target. + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property compile_target{"GNA_HW_COMPILE_TARGET"}; /** * @brief if enabled produced minimum memory footprint for compiled model in GNA memory, default value is true + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property memory_reuse{"GNA_COMPACT_MODE"}; /** * @brief Enum to define PWL design algorithm + * @ingroup ov_runtime_gna_prop_cpp_api */ enum class PWLDesignAlgorithm { UNDEFINED = 0, //!< PWL approximation algorithm is undefined @@ -213,6 +230,7 @@ inline std::istream& operator>>(std::istream& is, PWLDesignAlgorithm& pwl_design * If value is UNIFORM_DISTRIBUTION then simple uniform distribution is used to create * PWL approximation of activation functions. * Uniform distribution usually gives poor approximation with the same number of segments + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property pwl_design_algorithm{"GNA_PWL_DESIGN_ALGORITHM"}; @@ -220,6 +238,7 @@ static constexpr Property pwl_design_algorithm{"GNA_PWL_DESI * @brief The option to allow to specify the maximum error percent that the optimized algorithm finding * will be used to find PWL functions. * By default (in case of NO value set), 1.0 value is used. + * @ingroup ov_runtime_gna_prop_cpp_api */ static constexpr Property pwl_max_error_percent{"GNA_PWL_MAX_ERROR_PERCENT"}; diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp index 11aaab765c5174..7df9f2098995a4 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp @@ -3,7 +3,7 @@ // /** - * @brief a header that defines wrappers for internal GPU plugin-specific + * @brief A header that defines wrappers for internal GPU plugin-specific * shared Video Acceleration device contexts * and shared memory tensors which contain Video Acceleration surfaces * @@ -35,6 +35,7 @@ namespace ocl { * which is shared with Direct3D 11 buffer. * The plugin object derived from this class can be obtained with D3DContext::create_tensor() call. * @note User can also obtain OpenCL buffer handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class D3DBufferTensor : public ClBufferTensor { public: @@ -62,6 +63,7 @@ class D3DBufferTensor : public ClBufferTensor { * which is shared with Direct3D 11 2D texture. * The plugin object derived from this class can be obtained with D3DContext::create_tensor() call. * @note User can also obtain OpenCL 2D image handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class D3DSurface2DTensor : public ClImage2DTensor { public: @@ -99,6 +101,7 @@ class D3DSurface2DTensor : public ClImage2DTensor { * The plugin object derived from this class can be obtained either with * CompiledModel::get_context() or Core::create_context() calls. * @note User can also obtain OpenCL context handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class D3DContext : public ClContext { public: diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 3923fb2d81c733..0b040c804d30ba 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -22,13 +22,21 @@ namespace ov { namespace intel_gpu { +/** + * @defgroup ov_runtime_ocl_gpu_cpp_api Intel GPU OpenCL remote objects API + * @ingroup ov_runtime_cpp_api + * Set of C++ classes and properties to work with Remote API for Intel GPU OpenCL plugin. + */ + /** * @brief Namespace with Intel GPU OpenCL specific remote objects + * @ingroup ov_runtime_ocl_gpu_cpp_api */ namespace ocl { /** * @brief Shortcut for defining a handle parameter + * @ingroup ov_runtime_ocl_gpu_cpp_api */ using gpu_handle_param = void*; @@ -37,6 +45,7 @@ using gpu_handle_param = void*; * which can be shared with user-supplied OpenCL buffer. * The plugin object derived from this class can be obtained with ClContext::create_tensor() call. * @note User can obtain OpenCL buffer handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class ClBufferTensor : public RemoteTensor { public: @@ -81,6 +90,7 @@ class ClBufferTensor : public RemoteTensor { * which can be shared with user-supplied OpenCL 2D Image. * The plugin object derived from this class can be obtained with ClContext::create_tensor() call. * @note User can obtain OpenCL image handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class ClImage2DTensor : public RemoteTensor { public: @@ -125,6 +135,7 @@ class ClImage2DTensor : public RemoteTensor { * which can be shared with user-supplied USM device pointer. * The plugin object derived from this class can be obtained with ClContext::create_tensor() call. * @note User can obtain USM pointer from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class USMTensor : public RemoteTensor { public: @@ -155,6 +166,7 @@ class USMTensor : public RemoteTensor { * which is shared with OpenCL context object. * The plugin object derived from this class can be obtained either with * CompiledModel::get_context() or Core::create_context() calls. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class ClContext : public RemoteContext { protected: diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp index 7ab313d4d655cf..89f51fe1ac31b6 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp @@ -33,6 +33,7 @@ namespace ocl { * which is shared with VA output surface. * The plugin object derived from this class can be obtained with VAContext::create_tensor() call. * @note User can also obtain OpenCL 2D image handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class VASurfaceTensor : public ClImage2DTensor { public: @@ -69,6 +70,7 @@ class VASurfaceTensor : public ClImage2DTensor { * The plugin object derived from this class can be obtained either with * CompiledModel::get_context() or Core::create_context() calls. * @note User can also obtain OpenCL context handle from this class. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ class VAContext : public ClContext { public: diff --git a/src/inference/include/openvino/runtime/intel_gpu/properties.hpp b/src/inference/include/openvino/runtime/intel_gpu/properties.hpp index d363056a45e039..fff13d01521331 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/properties.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/properties.hpp @@ -14,30 +14,41 @@ namespace ov { +/** + * @defgroup ov_runtime_ocl_gpu_prop_cpp_api Intel GPU OpenCL specific properties + * @ingroup ov_runtime_cpp_api + * Set of Intel GPU OpenCL specific properties. + */ + /** * @brief Namespace with Intel GPU specific properties + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ namespace intel_gpu { /** * @brief Read-only property which defines size of memory in bytes available for the device. For iGPU it returns host * memory size, for dGPU - dedicated gpu memory size + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property device_total_mem_size{"GPU_DEVICE_TOTAL_MEM_SIZE"}; /** * @brief Read-only property to get microarchitecture identifier in major.minor.revision format + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property uarch_version{"GPU_UARCH_VERSION"}; /** * @brief Read-only property to get count of execution units for current GPU + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property execution_units_count{"GPU_EXECUTION_UNITS_COUNT"}; /** * @brief Read-only property to get statistics of GPU memory allocated by engine for each allocation type * It contains information about current memory usage + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property, PropertyMutability::RO> memory_statistics{ "GPU_MEMORY_STATISTICS"}; @@ -48,7 +59,9 @@ static constexpr Property, PropertyMutability::R * not too many iteration counts (less than 16, as a rule of thumb). Turning this key off will achieve better * performance for both graph loading time and inference time with many iteration counts (greater than 16). Note that * turning this key on will increase the graph loading time in proportion to the iteration counts. - * Thus, this key should be turned off if graph loading time is considered to be most important target to optimize.*/ + * Thus, this key should be turned off if graph loading time is considered to be most important target to optimize. + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api + */ static constexpr Property enable_loop_unrolling{"GPU_ENABLE_LOOP_UNROLLING"}; namespace hint { @@ -57,6 +70,7 @@ namespace hint { * - LOW is used for CL_QUEUE_THROTTLE_LOW_KHR OpenCL throttle hint * - MEDIUM (DEFAULT) is used for CL_QUEUE_THROTTLE_MED_KHR OpenCL throttle hint * - HIGH is used for CL_QUEUE_THROTTLE_HIGH_KHR OpenCL throttle hint + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ using ThrottleLevel = ov::hint::Priority; @@ -64,6 +78,7 @@ using ThrottleLevel = ov::hint::Priority; * @brief This key instructs the GPU plugin to use OpenCL queue throttle hints * as defined in https://www.khronos.org/registry/OpenCL/specs/opencl-2.1-extensions.pdf, * chapter 9.19. This option should be used with ov::intel_gpu::hint::ThrottleLevel values. + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property queue_throttle{"GPU_QUEUE_THROTTLE"}; @@ -74,6 +89,7 @@ static constexpr Property queue_throttle{"GPU_QUEUE_THROTTLE"}; * - LOW is used for CL_QUEUE_PRIORITY_LOW_KHR OpenCL priority hint * - MEDIUM (DEFAULT) is used for CL_QUEUE_PRIORITY_MED_KHR OpenCL priority hint * - HIGH is used for CL_QUEUE_PRIORITY_HIGH_KHR OpenCL priority hint + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property queue_priority{"GPU_QUEUE_PRIORITY"}; @@ -83,28 +99,49 @@ static constexpr Property queue_priority{"GPU_QUEUE_PRIORITY * - LOW - instructs the GPU Plugin to use LITTLE cores if they are available * - MEDIUM (DEFAULT) - instructs the GPU Plugin to use any available cores (BIG or LITTLE cores) * - HIGH - instructs the GPU Plugin to use BIG cores if they are available + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property host_task_priority{"GPU_HOST_TASK_PRIORITY"}; /** * @brief This key identifies available device memory size in bytes + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ static constexpr Property available_device_mem{"AVAILABLE_DEVICE_MEM_SIZE"}; } // namespace hint /** * @brief These keys instruct the GPU plugin to use surface/buffer memory type. + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ namespace memory_type { -static constexpr auto surface = "GPU_SURFACE"; //!< Native video decoder surface -static constexpr auto buffer = "GPU_BUFFER"; //!< OpenCL buffer + +/** + * @brief Native video decoder surface + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api + */ +static constexpr auto surface = "GPU_SURFACE"; + +/* + * @brief OpenCL buffer + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api + */ +static constexpr auto buffer = "GPU_BUFFER"; + } // namespace memory_type /** * @brief Possible return value for ov::device::capabilities property + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api */ namespace capability { -constexpr static const auto HW_MATMUL = "GPU_HW_MATMUL"; //!< Device has hardware block for matrix multiplication + +/** + * @brief Device has hardware block for matrix multiplication + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api + */ +constexpr static const auto HW_MATMUL = "GPU_HW_MATMUL"; + } // namespace capability } // namespace intel_gpu } // namespace ov diff --git a/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp b/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp index 2487c16a6b23dd..a0218b9b4514be 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp @@ -19,6 +19,7 @@ using gpu_handle_param = void*; /** * @brief Enum to define the type of the shared context + * @ingroup ov_runtime_ocl_gpu_cpp_api */ enum class ContextType { OCL = 0, //!< Pure OpenCL context @@ -54,40 +55,47 @@ inline std::istream& operator>>(std::istream& is, ContextType& context_type) { /** * @brief Shared device context type: can be either pure OpenCL (OCL) * or shared video decoder (VA_SHARED) context + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property context_type{"CONTEXT_TYPE"}; /** * @brief This key identifies OpenCL context handle * in a shared context or shared memory blob parameter map + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property ocl_context{"OCL_CONTEXT"}; /** * @brief This key identifies ID of device in OpenCL context * if multiple devices are present in the context + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property ocl_context_device_id{"OCL_CONTEXT_DEVICE_ID"}; /** * @brief In case of multi-tile system, * this key identifies tile within given context + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property tile_id{"TILE_ID"}; /** * @brief This key identifies OpenCL queue handle in a shared context + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property ocl_queue{"OCL_QUEUE"}; /** * @brief This key identifies video acceleration device/display handle * in a shared context or shared memory blob parameter map + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property va_device{"VA_DEVICE"}; /** * @brief Enum to define the type of the shared memory buffer + * @ingroup ov_runtime_ocl_gpu_cpp_api */ enum class SharedMemType { OCL_BUFFER = 0, //!< Shared OpenCL buffer blob @@ -148,18 +156,21 @@ inline std::istream& operator>>(std::istream& is, SharedMemType& share_mem_type) /** * @brief This key identifies type of internal shared memory * in a shared memory blob parameter map. + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property shared_mem_type{"SHARED_MEM_TYPE"}; /** * @brief This key identifies OpenCL memory handle * in a shared memory blob parameter map + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property mem_handle{"MEM_HANDLE"}; /** * @brief This key identifies video decoder surface handle * in a shared memory blob parameter map + * @ingroup ov_runtime_ocl_gpu_cpp_api */ #ifdef _WIN32 static constexpr Property dev_object_handle{"DEV_OBJECT_HANDLE"}; @@ -170,6 +181,7 @@ static constexpr Property dev_object_handle{"DEV_OBJECT_HANDLE"}; /** * @brief This key identifies video decoder surface plane * in a shared memory blob parameter map + * @ingroup ov_runtime_ocl_gpu_cpp_api */ static constexpr Property va_plane{"VA_PLANE"}; diff --git a/src/inference/include/openvino/runtime/profiling_info.hpp b/src/inference/include/openvino/runtime/profiling_info.hpp index 701cf719c848d2..1ab9056940ef17 100644 --- a/src/inference/include/openvino/runtime/profiling_info.hpp +++ b/src/inference/include/openvino/runtime/profiling_info.hpp @@ -18,6 +18,7 @@ namespace ov { /** * @struct ProfilingInfo * @brief Represents basic inference profiling information per operation. + * @ingroup ov_runtime_cpp_api * * If the operation is executed using tiling, the sum time per each tile is indicated as the total execution time. * Due to parallel execution, the total execution time for all nodes might be greater than the total inference time. @@ -41,6 +42,7 @@ struct ProfilingInfo { * @brief The absolute time, in microseconds, that the node ran (in total). */ std::chrono::microseconds real_time; + /** * @brief The net host CPU time that the node ran. */ diff --git a/src/inference/include/openvino/runtime/properties.hpp b/src/inference/include/openvino/runtime/properties.hpp index 90a5a8e8e68f09..e37aeb873a7073 100644 --- a/src/inference/include/openvino/runtime/properties.hpp +++ b/src/inference/include/openvino/runtime/properties.hpp @@ -21,6 +21,8 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/common.hpp" + + namespace ov { /** diff --git a/src/inference/include/openvino/runtime/remote_context.hpp b/src/inference/include/openvino/runtime/remote_context.hpp index 876ab77174e129..819347b15a8635 100644 --- a/src/inference/include/openvino/runtime/remote_context.hpp +++ b/src/inference/include/openvino/runtime/remote_context.hpp @@ -29,6 +29,7 @@ class CompiledModel; /** * @brief This class represents an abstraction + * @ingroup ov_runtime_cpp_api * for remote (non-CPU) accelerator device-specific inference context. * Such context represents a scope on the device within which compiled * models and remote memory tensors can exist, function, and exchange data. diff --git a/src/inference/include/openvino/runtime/remote_tensor.hpp b/src/inference/include/openvino/runtime/remote_tensor.hpp index bb0312b63ec028..82259e2957a97c 100644 --- a/src/inference/include/openvino/runtime/remote_tensor.hpp +++ b/src/inference/include/openvino/runtime/remote_tensor.hpp @@ -17,7 +17,8 @@ namespace ov { class RemoteContext; /** - * @brief Remote memory access and interpretation API. + * @brief Remote memory access and interoperability API. + * @ingroup ov_runtime_cpp_api */ class OPENVINO_RUNTIME_API RemoteTensor : public Tensor { using Tensor::Tensor; diff --git a/src/inference/include/openvino/runtime/variable_state.hpp b/src/inference/include/openvino/runtime/variable_state.hpp index 94b724da3560b3..42ccf7a7747005 100644 --- a/src/inference/include/openvino/runtime/variable_state.hpp +++ b/src/inference/include/openvino/runtime/variable_state.hpp @@ -25,6 +25,7 @@ class InferRequest; /** * @brief VariableState class + * @ingroup ov_runtime_cpp_api */ class OPENVINO_RUNTIME_API VariableState { std::shared_ptr _impl;