Skip to content

Commit

Permalink
1.9.1 Cherry-Picks (#9239)
Browse files Browse the repository at this point in the history
* Add full iOS job in package pipeline (#9036)

* Add full ios xcframework job

* create zip file of the xcframework

* Bump up TVM version to avoid conflict with existing one (#9159)

* Bump up tvm version

* Bump up onnxruntime-tvm version

There are some c++17 related fixes in TVM

Co-authored-by: KeDengMS <[email protected]>

* fix bug introduced by PR9130 (#9166)

* make uwp store apps link to statically-linked crt desktop builds (#9182)

Co-authored-by: Sheil Kumar <[email protected]>

* #9182 removed the `--is_store_build` option but one place where that was used was missed. (#9219)

This should fix the relevant packaging pipelines.

* DirectML.dll load fails when executable path contains Non-English characters (#9229)

* enable unicode dml

* add wide string L prefix

* Add Fail Fast back

Co-authored-by: Sheil Kumar <[email protected]>

* Fix Android build break after Virtual Environment update to 20210919  (#9163)

Co-authored-by: Guoyu Wang <[email protected]>
Co-authored-by: ke1337 <[email protected]>
Co-authored-by: KeDengMS <[email protected]>
Co-authored-by: George Wu <[email protected]>
Co-authored-by: Sheil Kumar <[email protected]>
Co-authored-by: Scott McKay <[email protected]>
  • Loading branch information
7 people authored Oct 1, 2021
1 parent 4daa14b commit af0001c
Show file tree
Hide file tree
Showing 26 changed files with 134 additions and 261 deletions.
2 changes: 1 addition & 1 deletion cmake/external/tvm
Submodule tvm updated 1 files
+2 −2 CMakeLists.txt
5 changes: 2 additions & 3 deletions csharp/OnnxRuntime.CSharp.proj
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ CMake creates a target to this project
<IsReleaseBuild Condition=" '$(IsReleaseBuild)' == '' ">false</IsReleaseBuild>
<IsLinuxBuild Condition=" '$(IsLinuxBuild)' == '' ">false</IsLinuxBuild>
<ExecutionProvider Condition=" '$(ExecutionProvider)' == '' ">None</ExecutionProvider>
<IsStoreBuild Condition=" '$(IsStoreBuild)' == '' ">false</IsStoreBuild>

<!--internal build related properties-->
<OnnxRuntimeSourceDirectory Condition="'$(OnnxRuntimeSourceDirectory)'==''">..</OnnxRuntimeSourceDirectory>
Expand Down Expand Up @@ -126,7 +125,7 @@ CMake creates a target to this project
Properties="NoBuild=true;Platform=AnyCPU;PackageVersion=$(PackageVersion);OrtPackageId=$(OrtPackageId)"/>

<Message Importance="High" Text="Generating nuspec for the native Nuget package ..." />
<Exec ContinueOnError="False" Command="python $(GenerateNuspecScript) --package_version $(PackageVersion) --package_name $(OrtPackageId) --target_architecture $(TargetArchitecture) --build_config $(Configuration) --native_build_path $(NativeBuildOutputDirAbs) --packages_path $(OnnxRuntimePackagesDirectoryAbs) --ort_build_path $(OnnxRuntimeBuildDirectoryAbs) --sources_path $(OnnxRuntimeSourceDirectoryAbs) --commit_id $(GitCommitHash) --is_release_build $(IsReleaseBuild) --execution_provider $(ExecutionProvider) --is_store_build $(IsStoreBuild)" ConsoleToMSBuild="true">
<Exec ContinueOnError="False" Command="python $(GenerateNuspecScript) --package_version $(PackageVersion) --package_name $(OrtPackageId) --target_architecture $(TargetArchitecture) --build_config $(Configuration) --native_build_path $(NativeBuildOutputDirAbs) --packages_path $(OnnxRuntimePackagesDirectoryAbs) --ort_build_path $(OnnxRuntimeBuildDirectoryAbs) --sources_path $(OnnxRuntimeSourceDirectoryAbs) --commit_id $(GitCommitHash) --is_release_build $(IsReleaseBuild) --execution_provider $(ExecutionProvider)" ConsoleToMSBuild="true">
<Output TaskParameter="ConsoleOutput" PropertyName="GenerateNuspecOutput" />
</Exec>

Expand All @@ -153,7 +152,7 @@ CMake creates a target to this project
<Copy SourceFiles="@(LicenseFile)" DestinationFiles="@(LicenseFile->'$(OnnxRuntimeSourceDirectory)\%(Filename).txt')"/>

<Message Importance="High" Text="Generating nuspec for the Microsoft.AI.MachineLearning Nuget package ..." />
<Exec ContinueOnError="False" Command="python ..\tools\nuget\generate_nuspec_for_native_nuget.py --package_version $(PackageVersion) --package_name Microsoft.AI.MachineLearning --target_architecture $(TargetArchitecture) --build_config $(Configuration) --native_build_path $(NativeBuildOutputDirAbs) --packages_path $(OnnxRuntimePackagesDirectoryAbs) --ort_build_path $(OnnxRuntimeBuildDirectoryAbs) --sources_path $(OnnxRuntimeSourceDirectoryAbs) --commit_id $(GitCommitHash) --is_release_build $(IsReleaseBuild) --is_store_build $(IsStoreBuild)" ConsoleToMSBuild="true">
<Exec ContinueOnError="False" Command="python ..\tools\nuget\generate_nuspec_for_native_nuget.py --package_version $(PackageVersion) --package_name Microsoft.AI.MachineLearning --target_architecture $(TargetArchitecture) --build_config $(Configuration) --native_build_path $(NativeBuildOutputDirAbs) --packages_path $(OnnxRuntimePackagesDirectoryAbs) --ort_build_path $(OnnxRuntimeBuildDirectoryAbs) --sources_path $(OnnxRuntimeSourceDirectoryAbs) --commit_id $(GitCommitHash) --is_release_build $(IsReleaseBuild)" ConsoleToMSBuild="true">
<Output TaskParameter="ConsoleOutput" PropertyName="GenerateNuspecOutput" />
</Exec>

Expand Down
2 changes: 1 addition & 1 deletion java/build-android.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ buildscript {
mavenCentral()
}
dependencies {
classpath 'com.android.tools.build:gradle:3.5.3'
classpath 'com.android.tools.build:gradle:4.0.1'

// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
Expand Down
2 changes: 1 addition & 1 deletion java/src/test/android/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ buildscript {
mavenCentral()
}
dependencies {
classpath "com.android.tools.build:gradle:3.5.3"
classpath "com.android.tools.build:gradle:4.0.1"
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/framework/allocation_planner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,8 @@ class PlannerImpl {

const optional<std::pair<int, int>>& variadic_alias_offsets = ci.kernel_def->VariadicAlias();
if (variadic_alias_offsets.has_value()) {
int input_offset = variadic_alias_offsets.value().first;
int output_offset = variadic_alias_offsets.value().second;
int input_offset = variadic_alias_offsets->first;
int output_offset = variadic_alias_offsets->second;
// we _must_ reuse this input to satisfy aliasing requirement: (e.g., for AllReduce)
int alias_input_index = output_arg_num - output_offset + input_offset;
if (alias_input_index >= 0 && static_cast<size_t>(alias_input_index) < input_args.size()) {
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/framework/config_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ bool ConfigOptions::TryGetConfigEntry(const std::string& config_key, std::string
auto entry = GetConfigEntry(config_key);
const bool found = entry.has_value();
if (found) {
config_value = std::move(entry.value());
config_value = std::move(*entry);
}
return found;
}
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/core/optimizer/matmul_scale_fusion.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ optional<std::pair<float, int>> GetScaleFromNode(

if (!divisor.has_value()) return {};

return {std::make_pair(1.0f / divisor.value(), scale_reciprocal_arg_index)};
return {std::make_pair(1.0f / *divisor, scale_reciprocal_arg_index)};
}

if (graph_utils::IsSupportedOptypeVersionAndDomain(scale_node, "Mul", {7, 13, 14})) {
Expand All @@ -93,7 +93,7 @@ optional<std::pair<float, int>> GetScaleFromNode(

if (!multiplier.has_value()) continue;

return {std::make_pair(multiplier.value(), scale_arg_index)};
return {std::make_pair(*multiplier, scale_arg_index)};
}

return {};
Expand Down Expand Up @@ -128,12 +128,12 @@ std::vector<ScaleMergeInfo> GetInputNodeMerges(
if (!scale_and_index.has_value()) continue;

// assume scale nodes have 2 input defs, so to_scale_index == 1 - scale_index
ORT_ENFORCE(input_node.InputDefs().size() == 2 && scale_and_index.value().second < 2);
const int to_scale_index = 1 - scale_and_index.value().second;
ORT_ENFORCE(input_node.InputDefs().size() == 2 && scale_and_index->second < 2);
const int to_scale_index = 1 - scale_and_index->second;

input_node_merges.push_back(
{input_edge,
scale_and_index.value().first,
scale_and_index->first,
to_scale_index,
input_edge->GetDstArgIndex()});
}
Expand All @@ -160,7 +160,7 @@ std::vector<ScaleMergeInfo> GetOutputNodeMerges(

output_node_merges.push_back(
{output_edge,
scale_and_index.value().first,
scale_and_index->first,
scaled_index,
output_edge->GetSrcArgIndex()});
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/platform/env_var_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ template <typename T>
T ParseEnvironmentVariableWithDefault(const std::string& name, const T& default_value) {
const auto parsed = ParseEnvironmentVariable<T>(name);
if (parsed.has_value()) {
return parsed.value();
return *parsed;
}

return default_value;
Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/core/providers/cpu/reduction/reduction_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@ TensorOpCost ParallelReduceFastCost(int64_t n_row, int64_t n_col, int64_t elemen
This only improves reduce function when reduced axes are contiguous:
if len(shape) == 4, any single axis is ok, axes=(0, 1) or (1, 2) or (2, 3) is ok,
axes=(0, 2) is not covered by this change, former implementation prevails.
In that case, the shape can be compressed into three cases:
In that case, the shape can be compressed into three cases:
(K = axis not reduced, R = reduced axis):
* KR - reduction on the last dimensions
* RK - reduction on the first dimensions
* KRK - reduction on the middle dimensions.
For these three configuration, the reduction may be optimized
with vectors operations. Method WhichFastReduce() returns which case
case be optimized for which aggregator.
Expand Down Expand Up @@ -630,7 +630,7 @@ class ReduceKernelBase {
}
int64_t keepdims = 1;
if (keepdims_override.has_value()) {
keepdims = keepdims_override.value();
keepdims = *keepdims_override;
} else {
ORT_ENFORCE(info.GetAttr("keepdims", &keepdims).IsOK());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ ApplicableMatrixReduction get_applicable_matrix_reduction(
return ApplicableMatrixReduction::None;
}


// Remove all dims with value 1. This can help to optimize case like:
// dims=[2,3,1,4,1,5] and axes=[0,2,4], which is same as dims=[2,3,4,5] and axes=[0].
std::vector<int64_t> new_dims;
Expand Down Expand Up @@ -136,8 +135,8 @@ ApplicableMatrixReduction get_applicable_matrix_reduction(
return ApplicableMatrixReduction::None;
}

const auto& min_axis = min_and_max_axes.value().first;
const auto& max_axis = min_and_max_axes.value().second;
const auto& min_axis = min_and_max_axes->first;
const auto& max_axis = min_and_max_axes->second;

// axes from beginning means row reduction, axes to end means column reduction
// for axes from beginning to end, either works and we do row reduction
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/python/onnxruntime_pybind_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -576,9 +576,10 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
}
}
}
auto p = onnxruntime::CreateExecutionProviderFactory_OpenVINO(&params)->CreateProvider();
// Reset global variables config to avoid it being accidentally passed on to the next session
openvino_device_type.clear();
return onnxruntime::CreateExecutionProviderFactory_OpenVINO(&params)->CreateProvider();
return p;
#endif
} else if (type == kNupharExecutionProvider) {
#if USE_NUPHAR
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/common/path_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ TEST(PathTest, Concat) {
[](const optional<std::string>& a, const std::string& b, const std::string& expected_a, bool expect_throw = false) {
Path p_a{}, p_expected_a{};
if (a.has_value()) {
ASSERT_STATUS_OK(Path::Parse(ToPathString(a.value()), p_a));
ASSERT_STATUS_OK(Path::Parse(ToPathString(*a), p_a));
}
ASSERT_STATUS_OK(Path::Parse(ToPathString(expected_a), p_expected_a));

Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/common/tensor_op_test_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace test {

RandomValueGenerator::RandomValueGenerator(optional<RandomSeedType> seed)
: random_seed_{
seed.has_value() ? seed.value() : static_cast<RandomSeedType>(GetTestRandomSeed())},
seed.has_value() ? *seed : static_cast<RandomSeedType>(GetTestRandomSeed())},
generator_{random_seed_},
output_trace_{__FILE__, __LINE__, "ORT test random seed: " + std::to_string(random_seed_)} {
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/contrib_ops/layer_norm_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ static void TestLayerNorm(const std::vector<int64_t>& x_dims,
test.AddAttribute("axis", axis);
test.AddAttribute("keep_dims", keep_dims);
if (epsilon.has_value()) {
test.AddAttribute("epsilon", epsilon.value());
test.AddAttribute("epsilon", *epsilon);
}

// create rand inputs
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/providers/cpu/nn/batch_norm_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void TestBatchNorm(const unordered_map<string, vector<T>>& input_data_map,
int opset_version = 9) {
OpTester test("BatchNormalization", opset_version);
if (epsilon.has_value()) {
test.AddAttribute("epsilon", epsilon.value());
test.AddAttribute("epsilon", *epsilon);
}
if (opset_version < 9) { // spatial is only defined for opset-8 and below in the spec
test.AddAttribute("spatial", spatial_mode);
Expand Down
12 changes: 6 additions & 6 deletions onnxruntime/test/providers/provider_test_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,13 @@ struct TensorCheck<uint8_t> {
// For any other EPs, we still expect an exact match for the results
if (provider_type == kNnapiExecutionProvider && (has_abs_err || has_rel_err)) {
double threshold = has_abs_err
? params.absolute_error_.value()
? *(params.absolute_error_)
: 0.0;

for (int i = 0; i < size; ++i) {
if (has_rel_err) {
EXPECT_NEAR(expected[i], output[i],
params.relative_error_.value() * expected[i]) // expected[i] is unsigned, can't be negative
*(params.relative_error_) * expected[i]) // expected[i] is unsigned, can't be negative
<< "i:" << i << ", provider_type: " << provider_type;
} else { // has_abs_err
EXPECT_NEAR(expected[i], output[i], threshold)
Expand Down Expand Up @@ -184,12 +184,12 @@ struct TensorCheck<double> {
} else {
if (has_abs_err) {
ASSERT_NEAR(expected[i], output[i],
params.absolute_error_.value())
*(params.absolute_error_))
<< "i:" << i << ", provider_type: " << provider_type;
}
if (has_rel_err) {
ASSERT_NEAR(expected[i], output[i],
params.relative_error_.value() *
*(params.relative_error_) *
std::abs(expected[i]))
<< "i:" << i << ", provider_type: " << provider_type;
}
Expand Down Expand Up @@ -243,12 +243,12 @@ void InternalNumericalCheck(const Tensor& expected_tensor,
} else {
if (has_abs_err) {
ASSERT_NEAR(expected[i], output[i],
params.absolute_error_.value())
*(params.absolute_error_))
<< "i:" << i << ", provider_type: " << provider_type;
}
if (has_rel_err) {
ASSERT_NEAR(expected[i], output[i],
params.relative_error_.value() *
*(params.relative_error_) *
std::abs(expected[i]))
<< "i:" << i << ", provider_type: " << provider_type;
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/util/scoped_env_vars.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace {
Status SetEnvironmentVar(const std::string& name, const optional<std::string>& value) {
if (value.has_value()) {
ORT_RETURN_IF_NOT(
setenv(name.c_str(), value.value().c_str(), 1) == 0,
setenv(name.c_str(), value->c_str(), 1) == 0,
"setenv() failed: ", errno);
} else {
ORT_RETURN_IF_NOT(
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/util/test_random_seed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ RandomSeedType GetTestRandomSeed() {
ParseEnvironmentVariable<RandomSeedType>(test_random_seed_env_vars::kValue);
if (fixed_random_seed.has_value()) {
// use fixed value
return fixed_random_seed.value();
return *fixed_random_seed;
}

auto generate_from_time = []() {
Expand Down
1 change: 1 addition & 0 deletions tools/ci_build/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -1170,6 +1170,7 @@ def build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, targe
env = {}
if args.android:
env['ANDROID_SDK_ROOT'] = args.android_sdk_path
env['ANDROID_NDK_HOME'] = args.android_ndk_path

run_subprocess(cmd_args, env=env)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"build_osx_archs": {
"iphoneos": [
"arm64"
],
"iphonesimulator": [
"arm64",
"x86_64"
]
},
"build_params": [
"--ios",
"--parallel",
"--use_xcode",
"--build_apple_framework",
"--use_coreml",
"--skip_tests",
"--apple_deploy_target=11.0"
]
}
4 changes: 2 additions & 2 deletions tools/ci_build/github/azure-pipelines/mac-ios-ci-pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
--ios \
--ios_sysroot iphonesimulator \
--osx_arch x86_64 \
--apple_deploy_target 12.1 \
--apple_deploy_target 11.0 \
--use_xcode \
--config RelWithDebInfo \
--build_apple_framework \
Expand All @@ -25,7 +25,7 @@ jobs:
--ios \
--ios_sysroot iphonesimulator \
--osx_arch x86_64 \
--apple_deploy_target 12.1 \
--apple_deploy_target 11.0 \
--use_xcode \
--config RelWithDebInfo \
--build_apple_framework \
Expand Down
Loading

0 comments on commit af0001c

Please sign in to comment.