Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Revert "Remove squeezenet_test.cpp since it is almost same with the c…
Browse files Browse the repository at this point in the history
…++ sample"

This reverts commit a97575f.
  • Loading branch information
daquexian committed Jun 18, 2019
1 parent 7d753f5 commit 2a0670e
Show file tree
Hide file tree
Showing 2 changed files with 138 additions and 0 deletions.
7 changes: 7 additions & 0 deletions cmake/onnxruntime_unittests.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -679,3 +679,10 @@ endif()
list(APPEND onnxruntime_mlas_test_libs Threads::Threads)
target_link_libraries(onnxruntime_mlas_test PRIVATE ${onnxruntime_mlas_test_libs})
set_target_properties(onnxruntime_mlas_test PROPERTIES FOLDER "ONNXRuntimeTest")

add_executable(onnxruntime_nnapi_squeezenet_test ${TEST_SRC_DIR}/providers/nnapi/squeezenet_test.cpp)
add_dependencies(onnxruntime_nnapi_squeezenet_test ${onnxruntime_test_providers_dependencies})
target_link_libraries(onnxruntime_nnapi_squeezenet_test PRIVATE ${onnxruntime_test_providers_libs} ${onnxruntime_EXTERNAL_LIBRARIES})
set_target_properties(onnxruntime_nnapi_squeezenet_test PROPERTIES FOLDER "ONNXRuntimeTest")
target_include_directories(onnxruntime_nnapi_squeezenet_test PRIVATE ${TEST_INC_DIR})
onnxruntime_add_include_to_target(onnxruntime_nnapi_squeezenet_test date_interface gsl)
131 changes: 131 additions & 0 deletions onnxruntime/test/providers/nnapi/squeezenet_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
// Copyright 2019 JD.com Inc. JD AI
//
// Copyright(c) Microsoft Corporation.All rights reserved.
// Licensed under the MIT License.
//

#include <cassert>
#include <cmath>
#include <core/session/onnxruntime_cxx_api.h>
#include <core/providers/nnapi/nnapi_provider_factory.h>
#include <cstdlib>
#include <cstdio>
#include <vector>

int main(int argc, char* argv[]) {
//*************************************************************************
// initialize enviroment...one enviroment per process
// enviroment maintains thread pools and other state info
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");

// initialize session options if needed
Ort::SessionOptions session_options;
OrtSessionOptionsAppendExecutionProvider_Nnapi(session_options);
session_options.SetThreadPoolSize(1);

// If onnxruntime.dll is built with CUDA enabled, we can uncomment out this line to use CUDA for this
// session (we also need to include cuda_provider_factory.h above which defines it)
// #include "cuda_provider_factory.h"
// OrtSessionOptionsAppendExecutionProvider_CUDA(session_opsions, 1);

// Sets graph optimization level
// Available levels are
// 0 -> To disable all optimizations
// 1 -> To enable basic optimizations (Such as redundant node removals)
// 2 -> To enable all optimizations (Includes level 1 + more complex optimizations like node fusions)
session_options.SetGraphOptimizationLevel(1);

//*************************************************************************
// create session and load model into memory
// using squeezenet version 1.3
// URL = https://github.com/onnx/models/tree/master/squeezenet
const char* model_path = "squeezenet.onnx";
Ort::Session session(env, model_path, session_options);

//*************************************************************************
// print model input layer (node names, types, shape etc.)
Ort::Allocator allocator = Ort::Allocator::CreateDefault();

// print number of model input nodes
size_t num_input_nodes = session.GetInputCount();
std::vector<const char*> input_node_names(num_input_nodes);
std::vector<int64_t> input_node_dims; // simplify... this model has only 1 input node {1, 3, 224, 224}.
// Otherwise need vector<vector<>>

printf("Number of inputs = %zu\n", num_input_nodes);

// iterate over all input nodes
for (size_t i = 0; i < num_input_nodes; i++) {
// print input node names
char* input_name = session.GetInputName(i, allocator);
printf("Input %ld : name=%s\n", i, input_name);
input_node_names[i] = input_name;

// print input node types
Ort::TypeInfo type_info = session.GetInputTypeInfo(i);
auto tensor_info = type_info.GetTensorTypeAndShapeInfo();

ONNXTensorElementDataType type = tensor_info.GetElementType();
printf("Input %ld : type=%d\n", i, type);

// print input shapes/dims
input_node_dims = tensor_info.GetShape();
printf("Input %ld : num_dims=%zu\n", i, input_node_dims.size());
for (size_t j = 0; j < input_node_dims.size(); j++)
printf("Input %ld : dim %ld=%jd\n", i, j, input_node_dims[j]);
}

// Results should be...
// Number of inputs = 1
// Input 0 : name = data_0
// Input 0 : type = 1
// Input 0 : num_dims = 4
// Input 0 : dim 0 = 1
// Input 0 : dim 1 = 3
// Input 0 : dim 2 = 224
// Input 0 : dim 3 = 224

//*************************************************************************
// Similar operations to get output node information.
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName()
// OrtSessionGetOutputTypeInfo() as shown above.

//*************************************************************************
// Score the model using sample data, and inspect values

size_t input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
// use OrtGetTensorShapeElementCount() to get official size!

std::vector<float> input_tensor_values(input_tensor_size);
std::vector<const char*> output_node_names = {"softmaxout_1"};

// initialize input data with values in [0.0, 1.0]
for (unsigned int i = 0; i < input_tensor_size; i++)
input_tensor_values[i] = (float)i / (input_tensor_size + 1);

// create input tensor object from data values
Ort::AllocatorInfo allocator_info = Ort::AllocatorInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
Ort::Value input_tensor = Ort::Value::CreateTensor<float>(allocator_info, input_tensor_values.data(), input_tensor_size, input_node_dims.data(), 4);
assert(input_tensor.IsTensor());

// score model & input tensor, get back output tensor
auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_node_names.data(), &input_tensor, 1, output_node_names.data(), 1);
assert(output_tensors.size() == 1 && output_tensors.front().IsTensor());

// Get pointer to output tensor float values
float* floatarr = output_tensors.front().GetTensorMutableData<float>();
assert(abs(floatarr[0] - 0.000045) < 1e-6);

// score the model, and print scores for first 5 classes
for (int i = 0; i < 5; i++)
printf("Score for class [%d] = %f\n", i, floatarr[i]);

// Results should be as below...
// Score for class[0] = 0.000045
// Score for class[1] = 0.003846
// Score for class[2] = 0.000125
// Score for class[3] = 0.001180
// Score for class[4] = 0.001317
printf("Done!\n");
return 0;
}

0 comments on commit 2a0670e

Please sign in to comment.