diff --git a/cmake/get_boost.cmake b/cmake/get_boost.cmake index 265d088698571..ffe6d7cf6928a 100644 --- a/cmake/get_boost.cmake +++ b/cmake/get_boost.cmake @@ -7,10 +7,18 @@ set(BOOST_USE_STATIC_LIBS true CACHE BOOL "") set(BOOST_COMPONENTS program_options system thread) +# These components are only needed for Windows if(WIN32) - message(FATAL_ERROR "Windows not currently supported") + list(APPEND BOOST_COMPONENTS date_time regex) endif() +# MSVC doesn't set these variables +if(WIN32) + set(CMAKE_STATIC_LIBRARY_PREFIX lib) + set(CMAKE_SHARED_LIBRARY_PREFIX lib) +endif() + +# Set lib prefixes and suffixes for linking if(BOOST_USE_STATIC_LIBS) set(LIBRARY_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX}) set(LIBRARY_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX}) @@ -19,6 +27,7 @@ else() set(LIBRARY_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() +# Create list of components in Boost format foreach(component ${BOOST_COMPONENTS}) list(APPEND BOOST_COMPONENTS_FOR_BUILD --with-${component}) endforeach() @@ -43,6 +52,16 @@ macro(DOWNLOAD_BOOST) set(VARIANT "debug") endif() + set(WINDOWS_B2_OPTIONS) + set(WINDOWS_LIB_NAME_SCHEME) + if(WIN32) + set(BOOTSTRAP_FILE_TYPE "bat") + set(WINDOWS_B2_OPTIONS "toolset=msvc-14.1" "architecture=x86" "address-model=64") + set(WINDOWS_LIB_NAME_SCHEME "-vc141-mt-gd-x64-1_69") + else() + set(BOOTSTRAP_FILE_TYPE "sh") + endif() + message(STATUS "Adding Boost components") include(ExternalProject) ExternalProject_Add( @@ -52,21 +71,25 @@ macro(DOWNLOAD_BOOST) DOWNLOAD_DIR ${BOOST_ROOT_DIR} SOURCE_DIR ${BOOST_ROOT_DIR} UPDATE_COMMAND "" - CONFIGURE_COMMAND ./bootstrap.sh --prefix=${BOOST_ROOT_DIR} - BUILD_COMMAND ./b2 install ${BOOST_MAYBE_STATIC} --prefix=${BOOST_ROOT_DIR} variant=${VARIANT} ${BOOST_COMPONENTS_FOR_BUILD} + CONFIGURE_COMMAND ./bootstrap.${BOOTSTRAP_FILE_TYPE} --prefix=${BOOST_ROOT_DIR} + BUILD_COMMAND ./b2 install ${BOOST_MAYBE_STATIC} --prefix=${BOOST_ROOT_DIR} variant=${VARIANT} ${WINDOWS_B2_OPTIONS} ${BOOST_COMPONENTS_FOR_BUILD} BUILD_IN_SOURCE true INSTALL_COMMAND "" INSTALL_DIR ${BOOST_ROOT_DIR} - LOG_BUILD ON ) + # Set include folders ExternalProject_Get_Property(Boost INSTALL_DIR) set(Boost_INCLUDE_DIR ${INSTALL_DIR}/include) + if(WIN32) + set(Boost_INCLUDE_DIR ${INSTALL_DIR}/include/boost-1_69) + endif() + # Set libraries to link macro(libraries_to_fullpath varname) set(${varname}) foreach(component ${BOOST_COMPONENTS}) - list(APPEND ${varname} ${INSTALL_DIR}/lib/${LIBRARY_PREFIX}boost_${component}${LIBRARY_SUFFIX}) + list(APPEND ${varname} ${INSTALL_DIR}/lib/${LIBRARY_PREFIX}boost_${component}${WINDOWS_LIB_NAME_SCHEME}${LIBRARY_SUFFIX}) endforeach() endmacro() diff --git a/cmake/onnxruntime_server.cmake b/cmake/onnxruntime_server.cmake index f67df80e4da35..7e85aae615bda 100644 --- a/cmake/onnxruntime_server.cmake +++ b/cmake/onnxruntime_server.cmake @@ -4,8 +4,10 @@ set(SERVER_APP_NAME "onnxruntime_server") # Generate .h and .cc files from protobuf file -add_library(server_proto - ${ONNXRUNTIME_ROOT}/server/protobuf/predict.proto) +add_library(server_proto ${ONNXRUNTIME_ROOT}/server/protobuf/predict.proto) +if(WIN32) + target_compile_options(server_proto PRIVATE "/wd4125" "/wd4456") +endif() target_include_directories(server_proto PUBLIC $ "${CMAKE_CURRENT_BINARY_DIR}/.." ${CMAKE_CURRENT_BINARY_DIR}/onnx) target_compile_definitions(server_proto PUBLIC $) onnxruntime_protobuf_generate(APPEND_PATH IMPORT_DIRS ${REPO_ROOT}/cmake/external/protobuf/src ${ONNXRUNTIME_ROOT}/server/protobuf ${ONNXRUNTIME_ROOT}/core/protobuf TARGET server_proto) diff --git a/onnxruntime/server/converter.cc b/onnxruntime/server/converter.cc index 56fd51cd10c4c..c35fa03fec369 100644 --- a/onnxruntime/server/converter.cc +++ b/onnxruntime/server/converter.cc @@ -85,7 +85,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u if (using_raw_data) { tensor_proto.set_raw_data(data, tensor.Size()); } else { - for (int i = 0, count = tensor.Shape().Size(); i < count; ++i) { + for (size_t i = 0, count = tensor.Shape().Size(); i < count; ++i) { tensor_proto.add_float_data(data[i]); } } @@ -96,7 +96,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u if (using_raw_data) { tensor_proto.set_raw_data(data, tensor.Size()); } else { - for (int i = 0, count = tensor.Shape().Size(); i < count; ++i) { + for (size_t i = 0, count = tensor.Shape().Size(); i < count; ++i) { tensor_proto.add_int32_data(data[i]); } } @@ -108,7 +108,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -120,7 +120,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -132,7 +132,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -144,7 +144,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -156,7 +156,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -168,7 +168,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto i32data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -188,7 +188,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(raw_data.data(), raw_data.size() * sizeof(uint16_t)); } else { auto i32data = reinterpret_cast(raw_data.data()); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(int32_t)); i < count; ++i) { tensor_proto.add_int32_data(i32data[i]); } } @@ -197,7 +197,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u case onnx::TensorProto_DataType_STRING: { // Target: string_data // string could not be written into "raw_data" const auto* data = tensor.Data(); - for (int i = 0, count = tensor.Shape().Size(); i < count; ++i) { + for (size_t i = 0, count = tensor.Shape().Size(); i < count; ++i) { tensor_proto.add_string_data(data[i]); } break; @@ -207,7 +207,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u if (using_raw_data) { tensor_proto.set_raw_data(data, tensor.Size()); } else { - for (int x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { + for (size_t x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { tensor_proto.add_int64_data(data[x]); } } @@ -219,7 +219,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u tensor_proto.set_raw_data(data, tensor.Size()); } else { auto u64data = reinterpret_cast(data); - for (int i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(uint64_t)); i < count; ++i) { + for (size_t i = 0, count = 1 + ((tensor.Size() - 1) / sizeof(uint64_t)); i < count; ++i) { tensor_proto.add_uint64_data(u64data[i]); } } @@ -230,7 +230,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u if (using_raw_data) { tensor_proto.set_raw_data(data, tensor.Size()); } else { - for (int x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { + for (size_t x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { tensor_proto.add_uint64_data(data[x]); } } @@ -241,7 +241,7 @@ common::Status MLValueToTensorProto(const onnxruntime::MLValue& ml_value, bool u if (using_raw_data) { tensor_proto.set_raw_data(data, tensor.Size()); } else { - for (int x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { + for (size_t x = 0, loop_length = tensor.Shape().Size(); x < loop_length; ++x) { tensor_proto.add_double_data(data[x]); } } diff --git a/onnxruntime/server/http/predict_request_handler.cc b/onnxruntime/server/http/predict_request_handler.cc index 32d6443744e3d..800e1b468aaa8 100644 --- a/onnxruntime/server/http/predict_request_handler.cc +++ b/onnxruntime/server/http/predict_request_handler.cc @@ -112,7 +112,7 @@ static bool ParseRequestPayload(const HttpContext& context, SupportedContentType break; } case SupportedContentType::PbByteArray: { - bool parse_succeeded = predictRequest.ParseFromArray(body.data(), body.size()); + bool parse_succeeded = predictRequest.ParseFromArray(body.data(), static_cast(body.size())); if (!parse_succeeded) { error_code = http::status::bad_request; error_message = "Invalid payload."; diff --git a/onnxruntime/server/server_configuration.h b/onnxruntime/server/server_configuration.h index 4ae9b58496da7..cfc9287683fe8 100644 --- a/onnxruntime/server/server_configuration.h +++ b/onnxruntime/server/server_configuration.h @@ -40,7 +40,7 @@ class ServerConfiguration { const std::string full_desc = "ONNX Server: host an ONNX model with ONNX Runtime"; std::string model_path; std::string address = "0.0.0.0"; - int http_port = 8001; + unsigned short http_port = 8001; int num_http_threads = std::thread::hardware_concurrency(); onnxruntime::logging::Severity logging_level{}; @@ -97,9 +97,6 @@ class ServerConfiguration { } else if (num_http_threads <= 0) { PrintHelp(std::cerr, "num_http_threads must be greater than 0"); return Result::ExitFailure; - } else if (http_port < 0 || http_port > 65535) { - PrintHelp(std::cerr, "http_port input invalid"); - return Result::ExitFailure; } else if (!file_exists(model_path)) { PrintHelp(std::cerr, "model_path must be the location of a valid file"); return Result::ExitFailure; diff --git a/onnxruntime/test/server/integration_tests/test_util.py b/onnxruntime/test/server/integration_tests/test_util.py index 60840a873012b..e268a735c8c68 100644 --- a/onnxruntime/test/server/integration_tests/test_util.py +++ b/onnxruntime/test/server/integration_tests/test_util.py @@ -12,6 +12,7 @@ import datetime import socket import errno +import sys import predict_pb2 import onnx_ml_pb2 @@ -22,12 +23,20 @@ def test_log(str): def is_process_killed(pid): - try: - os.kill(pid, 0) - except OSError: - return False + if sys.platform.startswith("win"): + process_name = 'onnxruntime_host.exe' + call = 'TASKLIST', '/FI', 'imagename eq {0}'.format(process_name) + output = subprocess.check_output(call).decode('utf-8') + print(output) + last_line = output.strip().split('\r\n')[-1] + return not last_line.lower().startswith(process_name) else: - return True + try: + os.kill(pid, 0) + except OSError: + return False + else: + return True def decode_base64_string(s, count_and_type): diff --git a/onnxruntime/test/server/unit_tests/converter_tests.cc b/onnxruntime/test/server/unit_tests/converter_tests.cc index e5f2d9642f2f6..cf60aa716e6b2 100644 --- a/onnxruntime/test/server/unit_tests/converter_tests.cc +++ b/onnxruntime/test/server/unit_tests/converter_tests.cc @@ -115,12 +115,12 @@ TEST(MLValueToTensorProtoTests, FloatToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(float); + auto count = tp.raw_data().size() / sizeof(float); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - const float* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + const auto* tensor_data = reinterpret_cast(raw); + for (size_t j = 0; j < count; ++j) { EXPECT_FLOAT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -189,12 +189,12 @@ TEST(MLValueToTensorProtoTests, Int32ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(int32_t); + auto count = tp.raw_data().size() / sizeof(int32_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - const int32_t* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + const auto* tensor_data = reinterpret_cast(raw); + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -263,12 +263,12 @@ TEST(MLValueToTensorProtoTests, UInt8ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint8_t); + auto count = tp.raw_data().size() / sizeof(uint8_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -301,7 +301,7 @@ TEST(MLValueToTensorProtoTests, UInt8ToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size() * (sizeof(int32_t) / sizeof(uint8_t)); + auto count = tp.int32_data().size() * (sizeof(int32_t) / sizeof(uint8_t)); EXPECT_EQ(count, 8); auto data = tp.int32_data().data(); const auto* data8 = reinterpret_cast(data); @@ -339,12 +339,12 @@ TEST(MLValueToTensorProtoTests, Int8ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint8_t); + auto count = tp.raw_data().size() / sizeof(uint8_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -377,7 +377,7 @@ TEST(MLValueToTensorProtoTests, Int8ToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size(); + auto count = tp.int32_data().size(); EXPECT_EQ(count, 2); auto data = tp.int32_data().data(); const auto* data8 = reinterpret_cast(data); @@ -415,12 +415,12 @@ TEST(MLValueToTensorProtoTests, UInt16ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint16_t); + auto count = tp.raw_data().size() / sizeof(uint16_t); EXPECT_EQ(count, 9); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -453,7 +453,7 @@ TEST(MLValueToTensorProtoTests, UInt16ToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size(); + auto count = tp.int32_data().size(); EXPECT_EQ(count, 5); auto data = tp.int32_data().data(); const auto* data16 = reinterpret_cast(data); @@ -491,12 +491,12 @@ TEST(MLValueToTensorProtoTests, Int16ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint16_t); + auto count = tp.raw_data().size() / sizeof(uint16_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -529,7 +529,7 @@ TEST(MLValueToTensorProtoTests, Int16ToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size() * (sizeof(int32_t) / sizeof(int16_t)); + auto count = tp.int32_data().size() * (sizeof(int32_t) / sizeof(int16_t)); EXPECT_EQ(count, 6); auto data = tp.int32_data().data(); const auto* data16 = reinterpret_cast(data); @@ -567,12 +567,12 @@ TEST(MLValueToTensorProtoTests, BoolToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(bool); + auto count = tp.raw_data().size() / sizeof(bool); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -605,7 +605,7 @@ TEST(MLValueToTensorProtoTests, BoolToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size(); + auto count = tp.int32_data().size(); EXPECT_EQ(count, 2); auto data = tp.int32_data().data(); const auto* data16 = reinterpret_cast(data); @@ -649,12 +649,12 @@ TEST(MLValueToTensorProtoTests, Float16ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(onnxruntime::MLFloat16); + auto count = tp.raw_data().size() / sizeof(onnxruntime::MLFloat16); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -693,7 +693,7 @@ TEST(MLValueToTensorProtoTests, FloatToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size(); + auto count = tp.int32_data().size(); EXPECT_EQ(count, 3); auto data = tp.int32_data().data(); const auto* data16 = reinterpret_cast(data); @@ -737,12 +737,12 @@ TEST(MLValueToTensorProtoTests, BFloat16ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint16_t); + auto count = tp.raw_data().size() / sizeof(uint16_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); const auto* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j].val); } } @@ -781,7 +781,7 @@ TEST(MLValueToTensorProtoTests, BFloatToInt32Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.int32_data().size(); + auto count = tp.int32_data().size(); EXPECT_EQ(count, 3); auto data = tp.int32_data().data(); const auto* data16 = reinterpret_cast(data); @@ -824,7 +824,7 @@ TEST(MLValueToTensorProtoTests, StringToStringData) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.string_data().size(); + auto count = tp.string_data().size(); EXPECT_EQ(count, 6); const auto* data = tp.string_data().data(); for (int x = 0; x < 6; ++x) { @@ -861,12 +861,12 @@ TEST(MLValueToTensorProtoTests, Int64ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(int64_t); + auto count = tp.raw_data().size() / sizeof(int64_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - const int64_t* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + const auto* tensor_data = reinterpret_cast(raw); + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -935,12 +935,12 @@ TEST(MLValueToTensorProtoTests, UInt32ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint32_t); + auto count = tp.raw_data().size() / sizeof(uint32_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - uint32_t* tensor_data = (uint32_t*)raw; - for (int j = 0; j < count; ++j) { + auto* tensor_data = (uint32_t*)raw; + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -973,12 +973,12 @@ TEST(MLValueToTensorProtoTests, UInt32ToUint64Data) { // Verify data EXPECT_FALSE(tp.has_raw_data()); - int count = tp.uint64_data().size() * (sizeof(uint64_t) / sizeof(uint32_t)); + auto count = tp.uint64_data().size() * (sizeof(uint64_t) / sizeof(uint32_t)); EXPECT_EQ(count, 6); auto data = tp.uint64_data().data(); const auto* data32 = reinterpret_cast(data); - for (int x = 0; x < count; ++x) { + for (size_t x = 0; x < count; ++x) { EXPECT_EQ(data32[x], values_mul_x[x]); } } @@ -1012,12 +1012,12 @@ TEST(MLValueToTensorProtoTests, UInt64ToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint64_t); + auto count = tp.raw_data().size() / sizeof(uint64_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - const uint64_t* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + const auto* tensor_data = reinterpret_cast(raw); + for (size_t j = 0; j < count; ++j) { EXPECT_EQ(tensor_data[j], values_mul_x[j]); } } @@ -1086,12 +1086,12 @@ TEST(MLValueToTensorProtoTests, DoubleToRaw) { // Verify data EXPECT_TRUE(tp.has_raw_data()); - int count = tp.raw_data().size() / sizeof(uint64_t); + auto count = tp.raw_data().size() / sizeof(uint64_t); EXPECT_EQ(count, 6); auto raw = tp.raw_data().data(); - const double* tensor_data = reinterpret_cast(raw); - for (int j = 0; j < count; ++j) { + const auto* tensor_data = reinterpret_cast(raw); + for (size_t j = 0; j < count; ++j) { EXPECT_DOUBLE_EQ(tensor_data[j], values_mul_x[j]); } } diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index e3f2edd186a33..013604d649bcb 100755 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -572,10 +572,14 @@ def run_onnx_tests(build_dir, configs, onnx_test_data_dir, provider, enable_para def run_server_tests(build_dir, configs): - run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host', 'files.pythonhosted.org', 'requests']) + run_subprocess([sys.executable, '-m', 'pip', 'install', '--trusted-host', 'files.pythonhosted.org', 'requests', 'protobuf', 'numpy']) for config in configs: config_build_dir = get_config_build_dir(build_dir, config) server_app_path = os.path.join(config_build_dir, 'onnxruntime_server') + if is_windows(): + server_app_path = os.path.join(config_build_dir, config, 'onnxruntime_hosting.exe') + else: + server_app_path = os.path.join(config_build_dir, 'onnxruntime_hosting') server_test_folder = os.path.join(config_build_dir, 'server_test') server_test_data_folder = os.path.join(os.path.join(config_build_dir, 'testdata'), 'server') run_subprocess([sys.executable, 'test_main.py', server_app_path, server_test_data_folder, server_test_data_folder], cwd=server_test_folder, dll_path=None)