This repository has been archived by the owner on Jan 3, 2023. It is now read-only.
forked from microsoft/onnxruntime
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Revert "Remove squeezenet_test.cpp since it is almost same with the c…
…++ sample" This reverts commit a97575f.
- Loading branch information
Showing
2 changed files
with
138 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
// Copyright 2019 JD.com Inc. JD AI | ||
// | ||
// Copyright(c) Microsoft Corporation.All rights reserved. | ||
// Licensed under the MIT License. | ||
// | ||
|
||
#include <cassert> | ||
#include <cmath> | ||
#include <core/session/onnxruntime_cxx_api.h> | ||
#include <core/providers/nnapi/nnapi_provider_factory.h> | ||
#include <cstdlib> | ||
#include <cstdio> | ||
#include <vector> | ||
|
||
int main(int argc, char* argv[]) { | ||
//************************************************************************* | ||
// initialize enviroment...one enviroment per process | ||
// enviroment maintains thread pools and other state info | ||
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); | ||
|
||
// initialize session options if needed | ||
Ort::SessionOptions session_options; | ||
OrtSessionOptionsAppendExecutionProvider_Nnapi(session_options); | ||
session_options.SetThreadPoolSize(1); | ||
|
||
// If onnxruntime.dll is built with CUDA enabled, we can uncomment out this line to use CUDA for this | ||
// session (we also need to include cuda_provider_factory.h above which defines it) | ||
// #include "cuda_provider_factory.h" | ||
// OrtSessionOptionsAppendExecutionProvider_CUDA(session_opsions, 1); | ||
|
||
// Sets graph optimization level | ||
// Available levels are | ||
// 0 -> To disable all optimizations | ||
// 1 -> To enable basic optimizations (Such as redundant node removals) | ||
// 2 -> To enable all optimizations (Includes level 1 + more complex optimizations like node fusions) | ||
session_options.SetGraphOptimizationLevel(1); | ||
|
||
//************************************************************************* | ||
// create session and load model into memory | ||
// using squeezenet version 1.3 | ||
// URL = https://github.com/onnx/models/tree/master/squeezenet | ||
const char* model_path = "squeezenet.onnx"; | ||
Ort::Session session(env, model_path, session_options); | ||
|
||
//************************************************************************* | ||
// print model input layer (node names, types, shape etc.) | ||
Ort::Allocator allocator = Ort::Allocator::CreateDefault(); | ||
|
||
// print number of model input nodes | ||
size_t num_input_nodes = session.GetInputCount(); | ||
std::vector<const char*> input_node_names(num_input_nodes); | ||
std::vector<int64_t> input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}. | ||
// Otherwise need vector<vector<>> | ||
|
||
printf("Number of inputs = %zu\n", num_input_nodes); | ||
|
||
// iterate over all input nodes | ||
for (size_t i = 0; i < num_input_nodes; i++) { | ||
// print input node names | ||
char* input_name = session.GetInputName(i, allocator); | ||
printf("Input %ld : name=%s\n", i, input_name); | ||
input_node_names[i] = input_name; | ||
|
||
// print input node types | ||
Ort::TypeInfo type_info = session.GetInputTypeInfo(i); | ||
auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); | ||
|
||
ONNXTensorElementDataType type = tensor_info.GetElementType(); | ||
printf("Input %ld : type=%d\n", i, type); | ||
|
||
// print input shapes/dims | ||
input_node_dims = tensor_info.GetShape(); | ||
printf("Input %ld : num_dims=%zu\n", i, input_node_dims.size()); | ||
for (size_t j = 0; j < input_node_dims.size(); j++) | ||
printf("Input %ld : dim %ld=%jd\n", i, j, input_node_dims[j]); | ||
} | ||
|
||
// Results should be... | ||
// Number of inputs = 1 | ||
// Input 0 : name = data_0 | ||
// Input 0 : type = 1 | ||
// Input 0 : num_dims = 4 | ||
// Input 0 : dim 0 = 1 | ||
// Input 0 : dim 1 = 3 | ||
// Input 0 : dim 2 = 224 | ||
// Input 0 : dim 3 = 224 | ||
|
||
//************************************************************************* | ||
// Similar operations to get output node information. | ||
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName() | ||
// OrtSessionGetOutputTypeInfo() as shown above. | ||
|
||
//************************************************************************* | ||
// Score the model using sample data, and inspect values | ||
|
||
size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size | ||
// use OrtGetTensorShapeElementCount() to get official size! | ||
|
||
std::vector<float> input_tensor_values(input_tensor_size); | ||
std::vector<const char*> output_node_names = {"softmaxout_1"}; | ||
|
||
// initialize input data with values in [0.0, 1.0] | ||
for (unsigned int i = 0; i < input_tensor_size; i++) | ||
input_tensor_values[i] = (float)i / (input_tensor_size + 1); | ||
|
||
// create input tensor object from data values | ||
Ort::AllocatorInfo allocator_info = Ort::AllocatorInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); | ||
Ort::Value input_tensor = Ort::Value::CreateTensor<float>(allocator_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), 4); | ||
assert(input_tensor.IsTensor()); | ||
|
||
// score model & input tensor, get back output tensor | ||
auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, 1, output_node_names.data(), 1); | ||
assert(output_tensors.size() == 1 && output_tensors.front().IsTensor()); | ||
|
||
// Get pointer to output tensor float values | ||
float* floatarr = output_tensors.front().GetTensorMutableData<float>(); | ||
assert(abs(floatarr[0] - 0.000045) < 1e-6); | ||
|
||
// score the model, and print scores for first 5 classes | ||
for (int i = 0; i < 5; i++) | ||
printf("Score for class [%d] = %f\n", i, floatarr[i]); | ||
|
||
// Results should be as below... | ||
// Score for class[0] = 0.000045 | ||
// Score for class[1] = 0.003846 | ||
// Score for class[2] = 0.000125 | ||
// Score for class[3] = 0.001180 | ||
// Score for class[4] = 0.001317 | ||
printf("Done!\n"); | ||
return 0; | ||
} |