diff --git a/samples/c_cxx/CMakeLists.txt b/samples/c_cxx/CMakeLists.txt index 82bde8554446b..e477bffe79b49 100644 --- a/samples/c_cxx/CMakeLists.txt +++ b/samples/c_cxx/CMakeLists.txt @@ -21,10 +21,22 @@ option(onnxruntime_USE_NGRAPH "Build with nGraph support" OFF) option(onnxruntime_USE_NUPHAR "Build with Nuphar" OFF) option(onnxruntime_USE_BRAINSLICE "Build with BrainSlice" OFF) option(onnxruntime_USE_TENSORRT "Build with TensorRT support" OFF) +option(LIBPNG_ROOTDIR "libpng root dir") #if JPEG lib is available, we'll use it for image decoding, otherwise we'll use WIC find_package(JPEG) -find_package(PNG) +if(LIBPNG_ROOTDIR) + set(PNG_FOUND true) + if(WIN32) + set(PNG_LIBRARIES debug libpng16_d optimized libpng16) + else() + set(PNG_LIBRARIES png16) + endif() + set(PNG_INCLUDE_DIRS "${LIBPNG_ROOTDIR}/include") + set(PNG_LIBDIR "${LIBPNG_ROOTDIR}/lib") +else() + find_package(PNG) +endif() if(onnxruntime_USE_CUDA) add_definitions(-DUSE_CUDA) diff --git a/samples/c_cxx/README.md b/samples/c_cxx/README.md index 52d29ce3eca76..801311a3bde9b 100644 --- a/samples/c_cxx/README.md +++ b/samples/c_cxx/README.md @@ -9,6 +9,9 @@ This directory contains a few C/C++ sample applications for demoing onnxruntime ## Prerequisites 1. Visual Studio 2015/2017/2019 2. cmake(version >=3.13) +3. (optional) [libpng 1.6](http://www.libpng.org/pub/png/libpng.html) + +You may get a precompiled libpng library from [https://onnxruntimetestdata.blob.core.windows.net/models/libpng.zip](https://onnxruntimetestdata.blob.core.windows.net/models/libpng.zip) ## Install ONNX Runtime You may either get a prebuit onnxruntime from nuget.org, or build it from source by following the [BUILD.md document](../../../BUILD.md). @@ -26,8 +29,9 @@ When the solution is loaded, change the build configuration to "RelWithDebInfo"( Open cmd.exe, change your current directory to samples\c_cxx, then run ```bat mkdir build -cmake .. -A x64 -T host=x64 +cmake .. -A x64 -T host=x64 -DLIBPNG_ROOTDIR=C:\path\to\your\libpng\binary ``` +You may omit the "-DLIBPNG_ROOTDIR=..." argument if you don't have the libpng library. You may append "-Donnxruntime_USE_CUDA=ON" to the last command args if your onnxruntime binary was built with CUDA support. Then you can open the onnxruntime_samples.sln file in the "build" directory and build the solution. diff --git a/samples/c_cxx/fns_candy_style_transfer/CMakeLists.txt b/samples/c_cxx/fns_candy_style_transfer/CMakeLists.txt index 1abd4dc821472..8f4edc59b9954 100644 --- a/samples/c_cxx/fns_candy_style_transfer/CMakeLists.txt +++ b/samples/c_cxx/fns_candy_style_transfer/CMakeLists.txt @@ -3,4 +3,7 @@ add_executable(fns_candy_style_transfer "fns_candy_style_transfer.c") target_include_directories(fns_candy_style_transfer PRIVATE ${PROJECT_SOURCE_DIR}/include ${PNG_INCLUDE_DIRS}) -target_link_libraries(fns_candy_style_transfer PRIVATE onnxruntime ${PNG_LIBRARIES}) \ No newline at end of file +target_link_libraries(fns_candy_style_transfer PRIVATE onnxruntime ${PNG_LIBRARIES}) +if(PNG_LIBDIR) + target_link_directories(fns_candy_style_transfer PRIVATE ${PNG_LIBDIR}) +endif() \ No newline at end of file diff --git a/samples/c_cxx/fns_candy_style_transfer/README.md b/samples/c_cxx/fns_candy_style_transfer/README.md new file mode 100644 index 0000000000000..4211aa8d1ec59 --- /dev/null +++ b/samples/c_cxx/fns_candy_style_transfer/README.md @@ -0,0 +1,19 @@ +# Build +See [../README.md](../README.md) + +# Prepare data +Please download the model from (candy.onnx)[https://raw.githubusercontent.com/microsoft/Windows-Machine-Learning/master/Samples/FNSCandyStyleTransfer/UWP/cs/Assets/candy.onnx] + +Then prepare an image: +1. In png format +2. With dimension of 720x720 + +# Run +``` +fns_candy_style_transfer.exe +``` + + + + + diff --git a/samples/c_cxx/fns_candy_style_transfer/fns_candy_style_transfer.c b/samples/c_cxx/fns_candy_style_transfer/fns_candy_style_transfer.c index 7085084996ea6..5c191ca540286 100644 --- a/samples/c_cxx/fns_candy_style_transfer/fns_candy_style_transfer.c +++ b/samples/c_cxx/fns_candy_style_transfer/fns_candy_style_transfer.c @@ -5,8 +5,10 @@ #include #include #include - -#define ORT_THROW_ON_ERROR(expr) \ +#ifdef _WIN32 +#include +#endif +#define ORT_ABORT_ON_ERROR(expr) \ do { \ OrtStatus* onnx_status = (expr); \ if (onnx_status != NULL) { \ @@ -94,21 +96,21 @@ static int read_png_file(const char* input_file, size_t* height, size_t* width, */ static int write_tensor_to_png_file(OrtValue* tensor, const char* output_file) { struct OrtTensorTypeAndShapeInfo* shape_info; - ORT_THROW_ON_ERROR(OrtGetTensorTypeAndShape(tensor, &shape_info)); + ORT_ABORT_ON_ERROR(OrtGetTensorTypeAndShape(tensor, &shape_info)); size_t dim_count; - ORT_THROW_ON_ERROR(OrtGetDimensionsCount(shape_info, &dim_count)); + ORT_ABORT_ON_ERROR(OrtGetDimensionsCount(shape_info, &dim_count)); if (dim_count != 4) { printf("output tensor must have 4 dimensions"); return -1; } int64_t dims[4]; - ORT_THROW_ON_ERROR(OrtGetDimensions(shape_info, dims, sizeof(dims) / sizeof(dims[0]))); + ORT_ABORT_ON_ERROR(OrtGetDimensions(shape_info, dims, sizeof(dims) / sizeof(dims[0]))); if (dims[0] != 1 || dims[1] != 3) { printf("output tensor shape error"); return -1; } float* f; - ORT_THROW_ON_ERROR(OrtGetTensorMutableData(tensor, (void**)&f)); + ORT_ABORT_ON_ERROR(OrtGetTensorMutableData(tensor, (void**)&f)); png_bytep model_output_bytes; png_image image; memset(&image, 0, (sizeof image)); @@ -129,12 +131,33 @@ static int write_tensor_to_png_file(OrtValue* tensor, const char* output_file) { static void usage() { printf("usage: \n"); } -int run_inference(OrtSession* session, const char* input_file, const char* output_file) { +static char* convert_string(const wchar_t* input) { + size_t src_len = wcslen(input) + 1; + if (src_len > INT_MAX) { + printf("size overflow\n"); + abort(); + } + const int len = WideCharToMultiByte(CP_ACP, 0, input, (int)src_len, NULL, 0, NULL, NULL); + assert(len > 0); + char* ret = (char*)malloc(len); + assert(ret != NULL); + const int r = WideCharToMultiByte(CP_ACP, 0, input, (int)src_len, ret, len, NULL, NULL); + assert(len == r); + return ret; +} + +int run_inference(OrtSession* session, const ORTCHAR_T* input_file, const ORTCHAR_T* output_file) { size_t input_height; size_t input_width; float* model_input; size_t model_input_ele_count; - if (read_png_file(input_file, &input_height, &input_width, &model_input, &model_input_ele_count) != 0) { +#ifdef _WIN32 + char* output_file_p = convert_string(output_file); + char* input_file_p = convert_string(input_file); +#else + char* input_file_p = input_file; +#endif + if (read_png_file(input_file_p, &input_height, &input_width, &model_input, &model_input_ele_count) != 0) { return -1; } if (input_height != 720 || input_width != 720) { @@ -143,69 +166,82 @@ int run_inference(OrtSession* session, const char* input_file, const char* outpu return -1; } OrtAllocatorInfo* allocator_info; - ORT_THROW_ON_ERROR(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info)); + ORT_ABORT_ON_ERROR(OrtCreateCpuAllocatorInfo(OrtArenaAllocator, OrtMemTypeDefault, &allocator_info)); const int64_t input_shape[] = {1, 3, 720, 720}; const size_t input_shape_len = sizeof(input_shape) / sizeof(input_shape[0]); const size_t model_input_len = model_input_ele_count * sizeof(float); OrtValue* input_tensor = NULL; - ORT_THROW_ON_ERROR(OrtCreateTensorWithDataAsOrtValue(allocator_info, model_input, model_input_len, input_shape, + ORT_ABORT_ON_ERROR(OrtCreateTensorWithDataAsOrtValue(allocator_info, model_input, model_input_len, input_shape, input_shape_len, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, &input_tensor)); assert(input_tensor != NULL); int is_tensor; - ORT_THROW_ON_ERROR(OrtIsTensor(input_tensor, &is_tensor)); + ORT_ABORT_ON_ERROR(OrtIsTensor(input_tensor, &is_tensor)); assert(is_tensor); OrtReleaseAllocatorInfo(allocator_info); const char* input_names[] = {"inputImage"}; const char* output_names[] = {"outputImage"}; OrtValue* output_tensor = NULL; - ORT_THROW_ON_ERROR( + ORT_ABORT_ON_ERROR( OrtRun(session, NULL, input_names, (const OrtValue* const*)&input_tensor, 1, output_names, 1, &output_tensor)); assert(output_tensor != NULL); - ORT_THROW_ON_ERROR(OrtIsTensor(output_tensor, &is_tensor)); + ORT_ABORT_ON_ERROR(OrtIsTensor(output_tensor, &is_tensor)); assert(is_tensor); int ret = 0; - if (write_tensor_to_png_file(output_tensor, output_file) != 0) { + if (write_tensor_to_png_file(output_tensor, output_file_p) != 0) { ret = -1; } OrtReleaseValue(output_tensor); OrtReleaseValue(input_tensor); free(model_input); +#ifdef _WIN32 + free(input_file_p); + free(output_file_p); +#endif // _WIN32 return ret; } void verify_input_output_count(OrtSession* session) { size_t count; - ORT_THROW_ON_ERROR(OrtSessionGetInputCount(session, &count)); + ORT_ABORT_ON_ERROR(OrtSessionGetInputCount(session, &count)); assert(count == 1); - ORT_THROW_ON_ERROR(OrtSessionGetOutputCount(session, &count)); + ORT_ABORT_ON_ERROR(OrtSessionGetOutputCount(session, &count)); assert(count == 1); } #ifdef USE_CUDA void enable_cuda(OrtSessionOptions* session_options) { - ORT_THROW_ON_ERROR(OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0)); + ORT_ABORT_ON_ERROR(OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0)); } #endif +#ifdef _WIN32 +int wmain(int argc, wchar_t* argv[]) { +#else int main(int argc, char* argv[]) { +#endif if (argc < 4) { usage(); return -1; } - char* model_path = argv[1]; - char* input_file = argv[2]; - char* output_file = argv[3]; +#ifdef _WIN32 + //CoInitializeEx is only needed if Windows Image Component will be used in this program for image loading/saving. + HRESULT hr = CoInitializeEx(NULL, COINIT_MULTITHREADED); + if (!SUCCEEDED(hr)) return -1; +#endif + ORTCHAR_T* model_path = argv[1]; + ORTCHAR_T* input_file = argv[2]; + ORTCHAR_T* output_file = argv[3]; OrtEnv* env; - ORT_THROW_ON_ERROR(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env)); + ORT_ABORT_ON_ERROR(OrtCreateEnv(ORT_LOGGING_LEVEL_WARNING, "test", &env)); OrtSessionOptions* session_options; - ORT_THROW_ON_ERROR(OrtCreateSessionOptions(&session_options)); + ORT_ABORT_ON_ERROR(OrtCreateSessionOptions(&session_options)); #ifdef USE_CUDA enable_cuda(session_options); #endif OrtSession* session; - ORT_THROW_ON_ERROR(OrtCreateSession(env, model_path, session_options, &session)); + ORT_ABORT_ON_ERROR(OrtCreateSession(env, model_path, session_options, &session)); verify_input_output_count(session); int ret = run_inference(session, input_file, output_file); OrtReleaseSessionOptions(session_options); @@ -214,5 +250,8 @@ int main(int argc, char* argv[]) { if (ret != 0) { fprintf(stderr, "fail\n"); } +#ifdef _WIN32 + CoUninitialize(); +#endif return ret; }