Skip to content

Commit

Permalink
[OpenVINO-EP] use logging infrastructure to display EP log messages (#…
Browse files Browse the repository at this point in the history
…1289)

* Add logging messages

Implements logging messages at INFO, WARNING and ERROR levels.
Utilizes ONNX-RT's logging infrastrcture.
Reversed IsGraphSupported check logic to facilitate logging.

* Add MO exception text to WARNING log message
  • Loading branch information
smkarlap authored and skottmckay committed Jul 1, 2019
1 parent 28759e2 commit 5af2aec
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 55 deletions.
116 changes: 61 additions & 55 deletions onnxruntime/core/providers/openvino/openvino_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@
#include "core/framework/compute_capability.h"
#include "core/framework/tensorprotoutils.h"
#include "core/session/onnxruntime_cxx_api.h"
#include "core/util/protobuf_parsing_utils.h"
#include "core/common/logging/logging.h"

#include "openvino_execution_provider.h"
#include "core/graph/model.h"
#include "openvino_graph.h"

#include "core/util/protobuf_parsing_utils.h"
namespace onnxruntime {

constexpr const char* OpenVINO = "OpenVINO";
Expand Down Expand Up @@ -454,74 +455,78 @@ std::vector<std::unique_ptr<ComputeCapability>> OpenVINOExecutionProvider::GetCa

std::set<const onnxruntime::NodeArg*> fused_inputs, fused_outputs;

if (IsGraphSupported(graph_viewer,device_id)) {
std::string model_proto_strbuf;
model_proto.SerializeToString(&model_proto_strbuf);
if (!IsGraphSupported(graph_viewer,device_id)) {
LOGS_DEFAULT(WARNING) << openvino_ep::OpenVINOGraph::log_tag << "Rejecting as graph has unsupported operations.";
return result;
}

std::string xml_string, weights_string;
std::string model_proto_strbuf;
model_proto.SerializeToString(&model_proto_strbuf);

// Try converting with OpenVINO's Model Optimizer
try {
openvino_ep::OpenVINOGraph::ConvertONNXModelToOpenVINOIR(model_proto_strbuf, xml_string, weights_string, precision_fp32);
} catch (const char* msg) {
// Model Optimizer cannot convert this model.
return result;
}
std::string xml_string, weights_string;

auto node_indexes = graph_viewer.GetNodesInTopologicalOrder();
// Try converting with OpenVINO's Model Optimizer
try {
openvino_ep::OpenVINOGraph::ConvertONNXModelToOpenVINOIR(model_proto_strbuf, xml_string, weights_string, precision_fp32);
} catch (const char* msg) {
// Model Optimizer cannot convert this model.
LOGS_DEFAULT(WARNING) << openvino_ep::OpenVINOGraph::log_tag << "Rejecting as Model Optimizer cannot convert this model." << msg;
return result;
}

for (auto index : node_indexes) {
sub_graph->nodes.push_back(index);
const auto node = graph_viewer.GetNode(index);
auto node_indexes = graph_viewer.GetNodesInTopologicalOrder();

// Track graph inputs and initializers
for (auto input_def : node->InputDefs()) {
if (fused_outputs.find(input_def) == fused_outputs.end()) {
fused_inputs.insert(input_def);
} else {
fused_outputs.erase(input_def);
}
}
for (auto index : node_indexes) {
sub_graph->nodes.push_back(index);
const auto node = graph_viewer.GetNode(index);

// Track graph outputs
for (auto output_def : node->OutputDefs()) {
if (fused_inputs.find(output_def) == fused_inputs.end()) {
fused_outputs.insert(output_def);
} else {
fused_inputs.erase(output_def);
}
// Track graph inputs and initializers
for (auto input_def : node->InputDefs()) {
if (fused_outputs.find(input_def) == fused_outputs.end()) {
fused_inputs.insert(input_def);
} else {
fused_outputs.erase(input_def);
}
}

ONNX_NAMESPACE::AttributeProto xml_str_attr;
xml_str_attr.set_name("xml_str");
xml_str_attr.set_type(ONNX_NAMESPACE::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
xml_str_attr.set_s(xml_string);

ONNX_NAMESPACE::AttributeProto weights_str_attr;
weights_str_attr.set_name("weights_str");
weights_str_attr.set_type(ONNX_NAMESPACE::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
weights_str_attr.set_s(weights_string);

auto meta_def = std::make_unique<::onnxruntime::IndexedSubGraph::MetaDef>();
meta_def->attributes["xml_str"] = xml_str_attr;
meta_def->attributes["weights_str"] = weights_str_attr;
meta_def->name = "OpenVINOKernel_" + std::to_string(counter++);
meta_def->domain = "OpenVINO";
meta_def->since_version = 1;

for (auto input : fused_inputs) {
meta_def->inputs.push_back(input->Name());
// Track graph outputs
for (auto output_def : node->OutputDefs()) {
if (fused_inputs.find(output_def) == fused_inputs.end()) {
fused_outputs.insert(output_def);
} else {
fused_inputs.erase(output_def);
}
}
}

for (auto output : fused_outputs) {
meta_def->outputs.push_back(output->Name());
}
ONNX_NAMESPACE::AttributeProto xml_str_attr;
xml_str_attr.set_name("xml_str");
xml_str_attr.set_type(ONNX_NAMESPACE::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
xml_str_attr.set_s(xml_string);

ONNX_NAMESPACE::AttributeProto weights_str_attr;
weights_str_attr.set_name("weights_str");
weights_str_attr.set_type(ONNX_NAMESPACE::AttributeProto_AttributeType::AttributeProto_AttributeType_STRING);
weights_str_attr.set_s(weights_string);

auto meta_def = std::make_unique<::onnxruntime::IndexedSubGraph::MetaDef>();
meta_def->attributes["xml_str"] = xml_str_attr;
meta_def->attributes["weights_str"] = weights_str_attr;
meta_def->name = "OpenVINOKernel_" + std::to_string(counter++);
meta_def->domain = "OpenVINO";
meta_def->since_version = 1;

for (auto input : fused_inputs) {
meta_def->inputs.push_back(input->Name());
}

sub_graph->SetMetaDef(meta_def);
result.push_back(std::make_unique<ComputeCapability>(std::move(sub_graph)));
for (auto output : fused_outputs) {
meta_def->outputs.push_back(output->Name());
}

sub_graph->SetMetaDef(meta_def);
result.push_back(std::make_unique<ComputeCapability>(std::move(sub_graph)));

return result;
}

Expand All @@ -534,6 +539,7 @@ common::Status OpenVINOExecutionProvider::Compile(
openvino_graph = std::make_shared<openvino_ep::OpenVINOGraph>(fused_node);

} catch (const char* msg) {
LOGS_DEFAULT(ERROR) << openvino_ep::OpenVINOGraph::log_tag << "Compilation error: " << msg;
return Status(common::StatusCategory::ONNXRUNTIME,
common::StatusCode::NOT_IMPLEMENTED, msg);
}
Expand Down
16 changes: 16 additions & 0 deletions onnxruntime/core/providers/openvino/openvino_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,16 @@
#include "core/graph/graph.h"
#include "core/framework/tensorprotoutils.h"
#include "core/session/onnxruntime_cxx_api.h"
#include "core/common/logging/logging.h"

#include "openvino_graph.h"

namespace onnxruntime{
namespace openvino_ep {


const std::string OpenVINOGraph::log_tag = "[OpenVINO-EP] ";

OpenVINOGraph::OpenVINOGraph(const onnxruntime::Node* fused_node) {

device_id_ = "CPU";
Expand Down Expand Up @@ -227,6 +231,8 @@ std::vector<InferenceEngine::InferRequest::Ptr> OpenVINOGraph::GetExecutableHand
plugin_path)
.getPluginByDevice(device);

LOGS_DEFAULT(INFO) << log_tag << "Loaded plugins";

// Configure input & output
// Prepare input blobs

Expand Down Expand Up @@ -301,11 +307,15 @@ std::vector<InferenceEngine::InferRequest::Ptr> OpenVINOGraph::GetExecutableHand
InferenceEngine::ExecutableNetwork exeNetwork = plugin.LoadNetwork(*network,
{});

LOGS_DEFAULT(INFO) << log_tag << "Network loaded into accelerator plug-in succesfully";

// Create infer request
std::vector<InferenceEngine::InferRequest::Ptr> infer_requests;
for (size_t i = 0; i < num_inf_reqs_; i++) {
infer_requests.push_back(exeNetwork.CreateInferRequestPtr());
}
LOGS_DEFAULT(INFO) << log_tag << "Infer requests created: " << num_inf_reqs_;

return infer_requests;
}

Expand All @@ -321,6 +331,8 @@ size_t OpenVINOGraph::DeduceBatchSize(Ort::CustomOpApi ort, const OrtValue* inpu
batch_size = input_shape[0];
}

LOGS_DEFAULT(INFO) << log_tag << "Deduced batch size: " << batch_size;

return batch_size;
}

Expand Down Expand Up @@ -424,6 +436,8 @@ void OpenVINOGraph::Infer(Ort::CustomOpApi ort, OrtKernelContext* context) {
// Currently allows only one Infer execution at a time
std::lock_guard<std::mutex> lock(compute_lock_);

LOGS_DEFAULT(INFO) << log_tag << "Starting inference";

// Get Input and Output tensors
size_t input_count = openvino_network_->getInputsInfo().size();
size_t output_count = openvino_network_->getOutputsInfo().size();
Expand Down Expand Up @@ -465,6 +479,8 @@ void OpenVINOGraph::Infer(Ort::CustomOpApi ort, OrtKernelContext* context) {
size_t batch_slice_idx = full_parallel_runs * num_inf_reqs_ + inf_req_idx;
CompleteAsyncInference(ort, output_tensors, batch_slice_idx, inf_req_idx);
}

LOGS_DEFAULT(INFO) << log_tag << "Inference successful";
}

} // namespace openvino_ep
Expand Down
2 changes: 2 additions & 0 deletions onnxruntime/core/providers/openvino/openvino_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class OpenVINOGraph {

static void ConvertONNXModelToOpenVINOIR(const std::string& onnx_model, std::string& openvino_xml, std::string& openvino_bin, bool precision_fp32);

static const std::string log_tag;

private:
std::shared_ptr<InferenceEngine::CNNNetwork> BuildOpenVINONetworkWithMO();

Expand Down

0 comments on commit 5af2aec

Please sign in to comment.