Skip to content

Commit

Permalink
Try to fix API reference for new API
Browse files Browse the repository at this point in the history
  • Loading branch information
ilya-lavrenov committed Mar 20, 2022
1 parent 90717b1 commit e45cda6
Show file tree
Hide file tree
Showing 21 changed files with 138 additions and 13 deletions.
5 changes: 3 additions & 2 deletions docs/api/api_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ API references available:

.. toctree::
:maxdepth: 2

../groupie_cpp_api

../groupov_cpp_api
../groupie_c_api
ie_python_api/api
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
*/

/**
* @ingroup ie_cpp_api
* @defgroup ie_transformation_api Inference Engine Transformation API
* @brief Defines Inference Engine Transformations API which is used to transform ngraph::Function
*
Expand Down
23 changes: 23 additions & 0 deletions src/core/include/openvino/core/core_visibility.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,29 @@
// OPENVINO_API is used for the public API symbols. It either DLL imports or DLL exports
// (or does nothing for static build)

/**
* @defgroup ov_cpp_api OpenVINO Runtime C++ API
* OpenVINO Runtime C++ API
*
* @defgroup ov_model_cpp_api OpenVINO Core C++ API to work with ov::Model
* @ingroup ov_cpp_api
* OpenVINO Core C++ API to work with ov::Model, dynamic and static shapes, types
*
* @defgroup ov_ops_cpp_api OpenVINO C++ API to create operations
* @ingroup ov_cpp_api
* OpenVINO C++ API to create operations from different opsets. Such API is used to
* creation models from code, write transformations and traverse the model graph
*
* @defgroup ov_opset_cpp_api OpenVINO C++ API to work with operation sets
* @ingroup ov_cpp_api
* OpenVINO C++ API to work with operation sets
*
* @defgroup ov_runtime_cpp_api OpenVINO Inference C++ API
* @ingroup ov_cpp_api
* OpenVINO Inference C++ API provides ov::Core, ov::CompiledModel, ov::InferRequest
* and ov::Tensor classes
*/

#ifdef _WIN32
# pragma warning(disable : 4251)
# pragma warning(disable : 4275)
Expand Down
6 changes: 5 additions & 1 deletion src/core/include/openvino/core/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ class FrontEnd;
}

class ModelAccessor;
/// A user-defined model.

/**
* @brief A user-defined model
* @ingroup ov_model_cpp_api
*/
class OPENVINO_API Model : public std::enable_shared_from_this<Model> {
friend class frontend::FrontEnd;
friend OPENVINO_API std::shared_ptr<Model> clone_model(const Model& func,
Expand Down
1 change: 0 additions & 1 deletion src/inference/include/ie/ie_version.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include "ie_api.h"

/**
* @ingroup ie_cpp_api
* @brief Inference Engine C++ API
*/
namespace InferenceEngine {
Expand Down
1 change: 1 addition & 0 deletions src/inference/include/openvino/runtime/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ namespace ie = InferenceEngine;

/**
* @brief This type of map is used for result of Core::query_model
* @ingroup ov_runtime_cpp_api
* - `key` means operation name
* - `value` means device name supporting this operation
*/
Expand Down
1 change: 1 addition & 0 deletions src/inference/include/openvino/runtime/compiled_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class InferRequest;

/**
* @brief This class represents a compiled model.
* @ingroup ov_runtime_cpp_api
* A model is compiled by a specific device by applying multiple optimization
* transformations, then mapping to compute kernels.
*/
Expand Down
1 change: 1 addition & 0 deletions src/inference/include/openvino/runtime/core.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ namespace ov {

/**
* @brief This class represents an OpenVINO runtime Core entity.
* @ingroup ov_runtime_cpp_api
* User applications can create several Core class instances, but in this case the underlying plugins
* are created multiple times and not shared between several Core instances. The recommended way is to have
* a single Core instance per application.
Expand Down
7 changes: 5 additions & 2 deletions src/inference/include/openvino/runtime/exception.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,19 @@ namespace ov {

/**
* @brief Thrown in case of cancelled asynchronous operation.
* @ingroup ov_runtime_cpp_api
*/
class OPENVINO_RUNTIME_API Cancelled : public Exception {
using Exception::Exception;
};

/**
* @brief Thrown in case of calling the InferRequest methods while the request is busy with compute operation.
* @brief Thrown in case of calling the InferRequest methods while the request is
* busy with compute operation.
* @ingroup ov_runtime_cpp_api
*/
class OPENVINO_RUNTIME_API Busy : public Exception {
using Exception::Exception;
};

} // namespace ov
} // namespace ov
1 change: 1 addition & 0 deletions src/inference/include/openvino/runtime/infer_request.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class CompiledModel;

/**
* @brief This is a class of infer request that can be run in asynchronous or synchronous manners.
* @ingroup ov_runtime_cpp_api
*/
class OPENVINO_RUNTIME_API InferRequest {
std::shared_ptr<InferenceEngine::IInferRequestInternal> _impl;
Expand Down
19 changes: 19 additions & 0 deletions src/inference/include/openvino/runtime/intel_gna/properties.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,29 @@

namespace ov {

/**
* @defgroup ov_runtime_gna_prop_cpp_api Intel GNA specific properties
* @ingroup ov_runtime_cpp_api
* Set of Intel GNA specific properties.
*/

/**
* @brief Namespace with Intel GNA specific properties
* @ingroup ov_runtime_gna_prop_cpp_api
*/
namespace intel_gna {

/**
* @brief Property to get an std::string of GNA Library version, usually in the form
* <API_REVISION>.<RELEASE_LINE>.<RELEASE>.<BUILD>
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<std::string, PropertyMutability::RO> library_full_version{"GNA_LIBRARY_FULL_VERSION"};

/**
* @brief Scale factor provided by the user to use static quantization.
* This option should be used with floating point value serialized to string with . (dot) as a decimal separator
* @ingroup ov_runtime_gna_prop_cpp_api
* @details In the case of multiple inputs, individual scale factors can be provided using the
* map where key is layer name and value is scale factor
* Example:
Expand All @@ -45,11 +54,13 @@ static constexpr Property<std::map<std::string, float>> scale_factors_per_input{

/**
* @brief if turned on, dump GNA firmware model into specified file
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<std::string> firmware_model_image_path{"GNA_FIRMWARE_MODEL_IMAGE"};

/**
* @brief Enum to define software acceleration mode
* @ingroup ov_runtime_gna_prop_cpp_api
*/
enum class ExecutionMode {
AUTO = 0, //!< Uses Intel GNA if available, otherwise uses software execution mode on CPU.
Expand Down Expand Up @@ -103,6 +114,7 @@ inline std::istream& operator>>(std::istream& is, ExecutionMode& execution_mode)

/**
* @brief Enum to define HW compile and execution targets
* @ingroup ov_runtime_gna_prop_cpp_api
*/
enum class HWGeneration {
UNDEFINED = 0, //!< GNA HW generation is undefined
Expand Down Expand Up @@ -143,6 +155,7 @@ inline std::istream& operator>>(std::istream& is, HWGeneration& hw_generation) {
/**
* @brief GNA proc_type setting that should be one of AUTO, HW, GNA_HW_WITH_SW_FBACK,
* GNA_SW_EXACT or SW_FP32
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<ExecutionMode> execution_mode{"GNA_DEVICE_MODE"};

Expand All @@ -153,22 +166,26 @@ static constexpr Property<ExecutionMode> execution_mode{"GNA_DEVICE_MODE"};
* If HW is not present, use the option corresponding to the latest fully supported GNA HW generation.
* A fully supported GNA HW generation means it must be supported by both the OV GNA Plugin and the core GNA Library.
* Currently, the latest supported GNA HW generation corresponds to GNA_3_0.
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<HWGeneration> execution_target{"GNA_HW_EXECUTION_TARGET"};

/**
* @brief The option to override the GNA HW compile target. May be one of GNA_2_0, GNA_3_0.
* By default the same as execution_target.
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<HWGeneration> compile_target{"GNA_HW_COMPILE_TARGET"};

/**
* @brief if enabled produced minimum memory footprint for compiled model in GNA memory, default value is true
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<bool> memory_reuse{"GNA_COMPACT_MODE"};

/**
* @brief Enum to define PWL design algorithm
* @ingroup ov_runtime_gna_prop_cpp_api
*/
enum class PWLDesignAlgorithm {
UNDEFINED = 0, //!< PWL approximation algorithm is undefined
Expand Down Expand Up @@ -213,13 +230,15 @@ inline std::istream& operator>>(std::istream& is, PWLDesignAlgorithm& pwl_design
* If value is UNIFORM_DISTRIBUTION then simple uniform distribution is used to create
* PWL approximation of activation functions.
* Uniform distribution usually gives poor approximation with the same number of segments
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<PWLDesignAlgorithm> pwl_design_algorithm{"GNA_PWL_DESIGN_ALGORITHM"};

/**
* @brief The option to allow to specify the maximum error percent that the optimized algorithm finding
* will be used to find PWL functions.
* By default (in case of NO value set), 1.0 value is used.
* @ingroup ov_runtime_gna_prop_cpp_api
*/
static constexpr Property<float> pwl_max_error_percent{"GNA_PWL_MAX_ERROR_PERCENT"};

Expand Down
5 changes: 4 additions & 1 deletion src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
//

/**
* @brief a header that defines wrappers for internal GPU plugin-specific
* @brief A header that defines wrappers for internal GPU plugin-specific
* shared Video Acceleration device contexts
* and shared memory tensors which contain Video Acceleration surfaces
*
Expand Down Expand Up @@ -35,6 +35,7 @@ namespace ocl {
* which is shared with Direct3D 11 buffer.
* The plugin object derived from this class can be obtained with D3DContext::create_tensor() call.
* @note User can also obtain OpenCL buffer handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class D3DBufferTensor : public ClBufferTensor {
public:
Expand Down Expand Up @@ -62,6 +63,7 @@ class D3DBufferTensor : public ClBufferTensor {
* which is shared with Direct3D 11 2D texture.
* The plugin object derived from this class can be obtained with D3DContext::create_tensor() call.
* @note User can also obtain OpenCL 2D image handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class D3DSurface2DTensor : public ClImage2DTensor {
public:
Expand Down Expand Up @@ -99,6 +101,7 @@ class D3DSurface2DTensor : public ClImage2DTensor {
* The plugin object derived from this class can be obtained either with
* CompiledModel::get_context() or Core::create_context() calls.
* @note User can also obtain OpenCL context handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class D3DContext : public ClContext {
public:
Expand Down
12 changes: 12 additions & 0 deletions src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,21 @@
namespace ov {
namespace intel_gpu {

/**
* @defgroup ov_runtime_ocl_gpu_cpp_api Intel GPU OpenCL remote objects API
* @ingroup ov_runtime_cpp_api
* Set of C++ classes and properties to work with Remote API for Intel GPU OpenCL plugin.
*/

/**
* @brief Namespace with Intel GPU OpenCL specific remote objects
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
namespace ocl {

/**
* @brief Shortcut for defining a handle parameter
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
using gpu_handle_param = void*;

Expand All @@ -37,6 +45,7 @@ using gpu_handle_param = void*;
* which can be shared with user-supplied OpenCL buffer.
* The plugin object derived from this class can be obtained with ClContext::create_tensor() call.
* @note User can obtain OpenCL buffer handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class ClBufferTensor : public RemoteTensor {
public:
Expand Down Expand Up @@ -81,6 +90,7 @@ class ClBufferTensor : public RemoteTensor {
* which can be shared with user-supplied OpenCL 2D Image.
* The plugin object derived from this class can be obtained with ClContext::create_tensor() call.
* @note User can obtain OpenCL image handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class ClImage2DTensor : public RemoteTensor {
public:
Expand Down Expand Up @@ -125,6 +135,7 @@ class ClImage2DTensor : public RemoteTensor {
* which can be shared with user-supplied USM device pointer.
* The plugin object derived from this class can be obtained with ClContext::create_tensor() call.
* @note User can obtain USM pointer from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class USMTensor : public RemoteTensor {
public:
Expand Down Expand Up @@ -155,6 +166,7 @@ class USMTensor : public RemoteTensor {
* which is shared with OpenCL context object.
* The plugin object derived from this class can be obtained either with
* CompiledModel::get_context() or Core::create_context() calls.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class ClContext : public RemoteContext {
protected:
Expand Down
2 changes: 2 additions & 0 deletions src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ namespace ocl {
* which is shared with VA output surface.
* The plugin object derived from this class can be obtained with VAContext::create_tensor() call.
* @note User can also obtain OpenCL 2D image handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class VASurfaceTensor : public ClImage2DTensor {
public:
Expand Down Expand Up @@ -69,6 +70,7 @@ class VASurfaceTensor : public ClImage2DTensor {
* The plugin object derived from this class can be obtained either with
* CompiledModel::get_context() or Core::create_context() calls.
* @note User can also obtain OpenCL context handle from this class.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
class VAContext : public ClContext {
public:
Expand Down
Loading

0 comments on commit e45cda6

Please sign in to comment.