Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ORT C++ API support and sample code; Use DNNL everywhere & enable… #841

Merged
merged 10 commits into from
Feb 19, 2020
28 changes: 27 additions & 1 deletion onnxruntime/cppbuild.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,33 @@ git submodule update --init --recursive --jobs $MAKEJ
git submodule foreach --recursive git reset --hard
patch -p1 < ../../../onnxruntime.patch
which ctest3 &> /dev/null && CTEST="ctest3" || CTEST="ctest"
MAKEFLAGS="-j $MAKEJ" bash build.sh --cmake_path "$CMAKE" --ctest_path "$CTEST" --config Release --use_dnnl --use_mklml --build_shared_lib
#MAKEFLAGS="-j $MAKEJ" bash build.sh --cmake_path "$CMAKE" --ctest_path "$CTEST" --config Release --use_dnnl --build_shared_lib
sedinplace '/std::nullptr_t/d' include/onnxruntime/core/session/onnxruntime_cxx_api.h


sedinplace 's/onnxruntime_c_api.h/onnxruntime\/core\/session\/onnxruntime_c_api.h/g' include/onnxruntime/core/providers/dnnl/dnnl_provider_factory.h

sedinplace 's/: Base<OrtEnv>{p} {}/{p_ = p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h
sedinplace 's/: Base<OrtSessionOptions>{p} {}/{p_ = p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h
sedinplace 's/: Base<OrtTensorTypeAndShapeInfo>{p} {}/{p_ = p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h
sedinplace 's/: Base<OrtValue>{p} {}/ {p_= p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h
sedinplace 's/: Base<OrtTypeInfo>{p} {}/ {p_= p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h
sedinplace 's/: Base<OrtMemoryInfo>{p} {}/ {p_= p;}/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h

#TODO: Look into restoring this, would prevent one instance of dropping to C API
sedinplace 's/Unowned<TensorTypeAndShapeInfo> GetTensorTypeAndShapeInfo() const;//g' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace '/stub_api/d' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace '/OrtGetApiBase/d' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace '/delete/d' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace '/s_api/d' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace 's/std::string&&/std::string/g' include/onnxruntime/core/session/onnxruntime_cxx_api.h

sedinplace '/inline Unowned<TensorTypeAndShapeInfo> TypeInfo/,+4d' include/onnxruntime/core/session/onnxruntime_cxx_inline.h

cp -r include/* ../include
cp -r build/Linux/Release/lib* build/Linux/Release/dnnl/install/lib*/libdnnl* ../lib
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/platform/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
<dependencies>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>mkl-dnn-platform</artifactId>
<version>0.21.3-${project.parent.version}</version>
<artifactId>dnnl-platform</artifactId>
<version>1.2.0-${project.parent.version}</version>
</dependency>
<dependency>
<groupId>${project.groupId}</groupId>
Expand Down
7 changes: 3 additions & 4 deletions onnxruntime/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@

<properties>
<javacpp.nativeRequires>
requires org.bytedeco.mkldnn.${javacpp.platform.module};
requires org.bytedeco.dnnl.${javacpp.platform.module};
</javacpp.nativeRequires>
</properties>

<dependencies>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>mkl-dnn</artifactId>
<version>0.21.3-${project.parent.version}</version>
<artifactId>dnnl</artifactId>
<version>1.2.0-${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.bytedeco</groupId>
Expand Down Expand Up @@ -55,7 +55,6 @@
<exclude>org/bytedeco/ngraph/${javacpp.platform}${javacpp.platform.extension}/*.exp</exclude>
<exclude>org/bytedeco/ngraph/${javacpp.platform}${javacpp.platform.extension}/*.lib</exclude>
<exclude>org/bytedeco/ngraph/${javacpp.platform}${javacpp.platform.extension}/*.obj</exclude>
<exclude>org/bytedeco/ngraph/${javacpp.platform}${javacpp.platform.extension}/*mklml*</exclude>
<exclude>org/bytedeco/ngraph/${javacpp.platform}${javacpp.platform.extension}/*omp*</exclude>
</excludes>
</configuration>
Expand Down
175 changes: 175 additions & 0 deletions onnxruntime/samples/CXXApiSample.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
// Copyright(c) Microsoft Corporation.All rights reserved.
// Licensed under the MIT License.
//

import java.nio.file.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.indexer.*;
import org.bytedeco.onnxruntime.*;
import static org.bytedeco.onnxruntime.global.onnxruntime.*;

public class CXXApiSample {

static final OrtApi g_ort = OrtGetApiBase().GetApi().call(ORT_API_VERSION);

//*****************************************************************************
// helper function to check for status
static void CheckStatus(OrtStatus status) {
if (status != null && !status.isNull()) {
String msg = g_ort.GetErrorMessage().call(status).getString();
System.err.println(msg);
g_ort.ReleaseStatus().call(status);
System.exit(1);
}
}

public static void main(String[] args) throws Exception {
//*************************************************************************
// initialize enviroment...one enviroment per process
// enviroment maintains thread pools and other state info

Env env = new Env(ORT_LOGGING_LEVEL_WARNING, new BytePointer("test"));

// initialize session options if needed
SessionOptions session_options = new SessionOptions();
session_options.SetIntraOpNumThreads(1);

// Sets graph optimization level
session_options.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);

// Optionally add more execution providers via session_options
// E.g. for CUDA include cuda_provider_factory.h and uncomment the following line:
// OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 1);

OrtSessionOptionsAppendExecutionProvider_Dnnl(session_options.asOrtSessionOptions(), 1);
//*************************************************************************
// create session and load model into memory
// using squeezenet version 1.3
// URL = https://github.com/onnx/models/tree/master/squeezenet

String model_path = args.length > 0 ? args[0] : "squeezenet.onnx";
Session session = new Session(env, model_path, session_options);

System.out.println("Using Onnxruntime C++ API");

//*************************************************************************
// print model input layer (node names, types, shape etc.)
Long num_input_nodes = session.GetInputCount();

AllocatorWithDefaultOptions allocator = new AllocatorWithDefaultOptions();

// print number of model input nodes

PointerPointer input_node_names = new PointerPointer(num_input_nodes);
LongPointer input_node_dims = null; // simplify... this model has only 1 input node {1, 3, 224, 224}.
// Otherwise need vector<vector<>>

System.out.println("Number of inputs = " + num_input_nodes);

// iterate over all input nodes
for (long i = 0; i < num_input_nodes; i++) {
// print input node names
BytePointer input_name = session.GetInputName(i, allocator.asOrtAllocator());
System.out.println("Input " + i + " : name=" + input_name.getString());
input_node_names.put(i, input_name);

// print input node types
TypeInfo type_info = session.GetInputTypeInfo(i);

OrtTypeInfo ort_type_info = type_info.asOrtTypeInfo();

//Using C API here because GetTensorTypeAndShapeInfo() isn't there
PointerPointer<OrtTensorTypeAndShapeInfo> tensor_infos = new PointerPointer<OrtTensorTypeAndShapeInfo>(1);
CheckStatus(g_ort.CastTypeInfoToTensorInfo().call(ort_type_info, tensor_infos));
OrtTensorTypeAndShapeInfo ort_tensor_info = tensor_infos.get(OrtTensorTypeAndShapeInfo.class);
IntPointer type = new IntPointer(1);
CheckStatus(g_ort.GetTensorElementType().call(ort_tensor_info, type));
System.out.println("Input " + i + " : type=" + type.get());

TensorTypeAndShapeInfo tensor_info = new TensorTypeAndShapeInfo(ort_tensor_info);

//Back to C++ API
// print input shapes/dims
input_node_dims = tensor_info.GetShape();


System.out.println("Input " + i + " : num_dims=" + input_node_dims.capacity());
for (long j = 0; j < input_node_dims.capacity(); j++)
System.out.println("Input " + i + " : dim " + j + "=" + input_node_dims.get(j));


g_ort.ReleaseTypeInfo().call(ort_type_info);
}

// Results should be...
// Number of inputs = 1
// Input 0 : name = data_0
// Input 0 : type = 1
// Input 0 : num_dims = 4
// Input 0 : dim 0 = 1
// Input 0 : dim 1 = 3
// Input 0 : dim 2 = 224
// Input 0 : dim 3 = 224

//*************************************************************************
// Similar operations to get output node information.
// Use OrtSessionGetOutputCount(), OrtSessionGetOutputName()
// OrtSessionGetOutputTypeInfo() as shown above.

//*************************************************************************
// Score the model using sample data, and inspect values


long input_tensor_size = 224 * 224 * 3; // simplify ... using known dim values to calculate size
// use OrtGetTensorShapeElementCount() to get official size!

FloatPointer input_tensor_values = new FloatPointer(input_tensor_size);
PointerPointer output_node_names = new PointerPointer("softmaxout_1");

// initialize input data with values in [0.0, 1.0]
FloatIndexer idx = FloatIndexer.create(input_tensor_values);
for (long i = 0; i < input_tensor_size; i++)
idx.put(i, (float)i / (input_tensor_size + 1));

// create input tensor object from data values

MemoryInfo memory_info = MemoryInfo.CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
OrtMemoryInfo ort_memory_info = memory_info.asOrtMemoryInfo();

//Value::CreateTensor in C++ API only takes C OrtMemoryInfo, not C++ MemoryInfo
Value input_tensor = Value.CreateTensor(ort_memory_info, input_tensor_values, input_tensor_size * Float.SIZE / 8, input_node_dims, 4, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);

boolean is_tens = input_tensor.IsTensor();
System.out.println(is_tens);
assert is_tens;

ValueVector output_tensor = session.Run(new RunOptions(), input_node_names, input_tensor, 1, output_node_names, 1);

boolean is_tensor = output_tensor.get(0).IsTensor();
assert output_tensor.size()==1 && is_tensor;

// Get pointer to output tensor float values
FloatPointer floatarr = output_tensor.get(0).GetTensorMutableDataFloat();
assert Math.abs(floatarr.get(0) - 0.000045) < 1e-6;

// score the model, and print scores for first 5 classes
for (int i = 0; i < 5; i++)
System.out.println("Score for class [" + i + "] = " + floatarr.get(i));

// Results should be as below...
// Score for class[0] = 0.000045
// Score for class[1] = 0.003846
// Score for class[2] = 0.000125
// Score for class[3] = 0.001180
// Score for class[4] = 0.001317

g_ort.ReleaseMemoryInfo().call(ort_memory_info);
//g_ort.ReleaseValue().call(ort_output_tensor);
//g_ort.ReleaseValue().call(input_tensor);
//g_ort.ReleaseSession().call(session);
//g_ort.ReleaseSessionOptions().call(session_options);
//g_ort.ReleaseEnv().call(env);
System.out.println("Done!");
System.exit(0);
}
}
4 changes: 2 additions & 2 deletions onnxruntime/samples/pom.xml
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>org.bytedeco.onnxruntime</groupId>
<artifactId>capisample</artifactId>
<artifactId>cxxapisample</artifactId>
<version>1.5.3-SNAPSHOT</version>
<properties>
<exec.mainClass>CApiSample</exec.mainClass>
<exec.mainClass>CXXApiSample</exec.mainClass>
</properties>
<dependencies>
<dependency>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Targeted by JavaCPP version 1.5.3-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.onnxruntime;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.onnxruntime.global.onnxruntime.*;


@Namespace("Ort") @NoOffset @Properties(inherit = org.bytedeco.onnxruntime.presets.onnxruntime.class)
public class AllocatorWithDefaultOptions extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AllocatorWithDefaultOptions(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AllocatorWithDefaultOptions(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AllocatorWithDefaultOptions position(long position) {
return (AllocatorWithDefaultOptions)super.position(position);
}

public AllocatorWithDefaultOptions() { super((Pointer)null); allocate(); }
private native void allocate();

public native @Name("operator OrtAllocator*") OrtAllocator asOrtAllocator();

public native Pointer Alloc(@Cast("size_t") long size);
public native void Free(Pointer p);

public native @Const OrtMemoryInfo GetInfo();
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Targeted by JavaCPP version 1.5.3-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.onnxruntime;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.onnxruntime.global.onnxruntime.*;

@Name("Ort::Base<OrtCustomOpDomain>") @NoOffset @Properties(inherit = org.bytedeco.onnxruntime.presets.onnxruntime.class)
public class BasedCustomOpDomain extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BasedCustomOpDomain(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public BasedCustomOpDomain(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public BasedCustomOpDomain position(long position) {
return (BasedCustomOpDomain)super.position(position);
}

public BasedCustomOpDomain() { super((Pointer)null); allocate(); }
private native void allocate();
public BasedCustomOpDomain(OrtCustomOpDomain p) { super((Pointer)null); allocate(p); }
private native void allocate(OrtCustomOpDomain p);

public native @Name("operator OrtCustomOpDomain*") OrtCustomOpDomain asOrtCustomOpDomain();

public native OrtCustomOpDomain release();
}
31 changes: 31 additions & 0 deletions onnxruntime/src/gen/java/org/bytedeco/onnxruntime/BasedEnv.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Targeted by JavaCPP version 1.5.3-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.onnxruntime;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.onnxruntime.global.onnxruntime.*;

@Name("Ort::Base<OrtEnv>") @NoOffset @Properties(inherit = org.bytedeco.onnxruntime.presets.onnxruntime.class)
public class BasedEnv extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BasedEnv(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public BasedEnv(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public BasedEnv position(long position) {
return (BasedEnv)super.position(position);
}

public BasedEnv() { super((Pointer)null); allocate(); }
private native void allocate();
public BasedEnv(OrtEnv p) { super((Pointer)null); allocate(p); }
private native void allocate(OrtEnv p);

public native @Name("operator OrtEnv*") OrtEnv asOrtEnv();

public native OrtEnv release();
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Targeted by JavaCPP version 1.5.3-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.onnxruntime;

import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.onnxruntime.global.onnxruntime.*;

@Name("Ort::Base<OrtMemoryInfo>") @NoOffset @Properties(inherit = org.bytedeco.onnxruntime.presets.onnxruntime.class)
public class BasedMemoryInfo extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BasedMemoryInfo(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public BasedMemoryInfo(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public BasedMemoryInfo position(long position) {
return (BasedMemoryInfo)super.position(position);
}

public BasedMemoryInfo() { super((Pointer)null); allocate(); }
private native void allocate();
public BasedMemoryInfo(OrtMemoryInfo p) { super((Pointer)null); allocate(p); }
private native void allocate(OrtMemoryInfo p);

public native @Name("operator OrtMemoryInfo*") OrtMemoryInfo asOrtMemoryInfo();

public native OrtMemoryInfo release();
}
Loading