Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup CoreML EP's code to remove COREML_ENABLE_MLPROGRAM #23490

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const logging::Logger& logger) const {
const auto& op_type(node.OpType());

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;
// https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#module-coremltools.converters.mil.mil.ops.defs.iOS15.activation
Expand Down Expand Up @@ -166,9 +165,7 @@ Status ActivationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,

model_builder.AddOperation(std::move(op));

} else
#endif // (COREML_ENABLE_MLPROGRAM)
{
} else {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

if (op_type == "Sigmoid") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ Status ArgMaxOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const int64_t keepdims = helper.Get("keepdims", 1);
const bool removedim = keepdims != 1;

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;
// https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#module-coremltools.converters.mil.mil.ops.defs.iOS15.reduction
Expand All @@ -46,9 +45,7 @@ Status ArgMaxOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
// the output of ArgMax must be int32
AddOperationOutput(*op, *node.OutputDefs()[0], output_datatype);
model_builder.AddOperation(std::move(op));
} else
#endif // (COREML_ENABLE_MLPROGRAM)
{
} else {
auto* coreml_argmax = layer->mutable_argmax();
coreml_argmax->set_axis(axis);
coreml_argmax->set_removedim(removedim);
Expand Down Expand Up @@ -91,11 +88,9 @@ bool ArgMaxOpBuilder::IsOpSupportedImpl(const Node& node,
return false;
}

#if defined(COREML_ENABLE_MLPROGRAM)
if (input_params.create_mlprogram) {
return true;
}
#endif

// If there are multiple downstream nodes and cast (toint32) is one of them
// not supported, exit here
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ Status BatchNormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_bu
const auto eps = helper.Get("epsilon", 1e-5f);
const auto channels = scale_tensor.dims()[0];

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;
// https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#coremltools.converters.mil.mil.ops.defs.iOS15.normalization.batch_norm
Expand All @@ -78,9 +77,7 @@ Status BatchNormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_bu

AddOperationOutput(*op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(op));
} else
#endif // (COREML_ENABLE_MLPROGRAM)
{
} else {
auto* coreml_batch_norm = layer->mutable_batchnorm();
coreml_batch_norm->set_channels(channels);
coreml_batch_norm->set_epsilon(eps);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ bool CheckIfBothInputShapesMatch(const Node& node, const logging::Logger& logger
}
} // namespace

#if defined(COREML_ENABLE_MLPROGRAM)
static std::vector<int64_t> InferOutputShape(const std::vector<int64_t>& a, const std::vector<int64_t>& b) {
std::vector<int64_t> output_shape;
int64_t i_a = 0, j_b = 0;
Expand Down Expand Up @@ -112,14 +111,12 @@ static void AddVariadicInputs(std::unique_ptr<CoreML::Specification::MILSpec::Op
}
*op = std::move(op_prev);
}
#endif

Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const {
const auto& op_type(node.OpType());
const auto& input_defs(node.InputDefs());

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Expand Down Expand Up @@ -153,9 +150,7 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
}
AddOperationOutput(*op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(op));
} else
#endif // defined (COREML_ENABLE_MLPROGRAM)
{
} else {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

if (op_type == "Add") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ void CreateCoreMLWeight(CoreML::Specification::WeightParams& weight, gsl::span<c
CreateCoreMLWeightConvertingDataToFloats(weight, data);
}

#if defined(COREML_ENABLE_MLPROGRAM)
//
// ML Program Utils
//
Expand Down Expand Up @@ -448,6 +447,5 @@ void AddPadTypeAndPads(COREML_SPEC::MILSpec::Operation& op, ModelBuilder& model_
}
}
}
#endif // defined(COREML_ENABLE_MLPROGRAM)
} // namespace coreml
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ void CreateCoreMLWeight(CoreML::Specification::WeightParams& weight, gsl::span<c
// Copy the int64_t array to a coreml weight
void CreateCoreMLWeight(CoreML::Specification::WeightParams& weight, gsl::span<const int64_t> data);

#if defined(COREML_ENABLE_MLPROGRAM)
//
// MLProgram utils
//
Expand Down Expand Up @@ -174,6 +173,5 @@ void AddOperationOutput(COREML_SPEC::MILSpec::Operation& op, const NodeArg& outp
/// <param name="num_spatial_dims">Number of spatial dims in input. Generally rank - 2 (ignore N and C dims).</param>
void AddPadTypeAndPads(COREML_SPEC::MILSpec::Operation& op, ModelBuilder& model_builder, std::string_view op_type,
const NodeAttrHelper& helper, int num_spatial_dims);
#endif // defined(COREML_ENABLE_MLPROGRAM)
} // namespace coreml
} // namespace onnxruntime
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,8 @@ class CastOpBuilder : public BaseOpBuilder {
Status CastOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder& model_builder,
[[maybe_unused]] const Node& node,
[[maybe_unused]] const logging::Logger& logger) const {
// This is a special handling case for ArgMax Op, where argmax is followed by a cast to int32 type.
// The ArgMax is fused with the Cast node and produces an int32 output.
#if defined(COREML_ENABLE_MLPROGRAM)
// This is a special handling case for ArgMax Op, where argmax is followed by a cast to int32 type.
// The ArgMax is fused with the Cast node and produces an int32 output.
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;
// https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#coremltools.converters.mil.mil.ops.defs.iOS15.elementwise_unary.cast
Expand Down Expand Up @@ -73,7 +72,6 @@ Status CastOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder& model
AddOperationOutput(*op, *node.OutputDefs()[0], cast_to_type);
model_builder.AddOperation(std::move(op));
}
#endif

return Status::OK();
}
Expand Down Expand Up @@ -134,7 +132,6 @@ bool CastOpBuilder::HasSupportedInputsImpl(const Node& node, [[maybe_unused]] co
return false;
}

#if defined(COREML_ENABLE_MLPROGRAM)
if (input_params.create_mlprogram) {
if ((input_type == ONNX_NAMESPACE::TensorProto_DataType_INT32 ||
input_type == ONNX_NAMESPACE::TensorProto_DataType_INT64 ||
Expand All @@ -152,7 +149,6 @@ bool CastOpBuilder::HasSupportedInputsImpl(const Node& node, [[maybe_unused]] co
return false;
}
}
#endif

// only support int64 coming from ArgMax (check for ArgMax is done in IsOpSupportedImpl())
if (input_type != ONNX_NAMESPACE::TensorProto_DataType_INT64) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ Status ClipOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
bool has_min = min != std::numeric_limits<float>::lowest();
bool has_max = max != std::numeric_limits<float>::max();

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Expand Down Expand Up @@ -121,9 +120,7 @@ Status ClipOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,

AddOperationOutput(*op, output);
model_builder.AddOperation(std::move(op));
} else
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
} else {
// TODO: CoreML has a Clip layer for NeuralNetwork. Added in CoreML 4. We could potentially use that if available
// to simplify.
// https://apple.github.io/coremltools/mlmodel/Format/NeuralNetwork.html#cliplayerparams
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ class ConcatOpBuilder : public BaseOpBuilder {
Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const Node& node,
const logging::Logger& logger) const {
#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec; // NOLINT

Expand All @@ -45,7 +44,6 @@ Status ConcatOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
AddOperationOutput(*op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(op));
} else // NOLINT
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

Expand Down
12 changes: 2 additions & 10 deletions onnxruntime/core/providers/coreml/builders/impl/conv_op_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N

NodeAttrHelper helper(node);

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Expand Down Expand Up @@ -89,9 +88,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
AddOperationOutput(*conv_op, *node.OutputDefs()[0]);

model_builder.AddOperation(std::move(conv_op));
} else
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
} else {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

auto strides = helper.Get("strides", std::vector<int64_t>{1, 1});
Expand Down Expand Up @@ -225,14 +222,11 @@ bool ConvOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPara
const auto& weight_name = input_defs[1]->Name();
const auto* weight = input_params.graph_viewer.GetConstantInitializer(weight_name);

#if defined(COREML_ENABLE_MLPROGRAM)
if (input_params.create_mlprogram) {
// ML Program supports non-const weight, 1D, 2D and 3D.
// keep to 1D and 2D for consistency with the NeuralNetwork implementation for now.
// add 3D support as/when needed.
} else
#endif // defined (COREML_ENABLE_MLPROGRAM)
{
} else {
if (!weight) {
LOGS(logger, VERBOSE) << "The weight of Conv [" << name << "] must be a constant initializer";
return false;
Expand All @@ -257,7 +251,6 @@ bool ConvOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPara

NodeAttrHelper helper(node);

#if defined(COREML_ENABLE_MLPROGRAM)
// spec says same_lower is supported in CoreML 5. it lies. CoreML 6 is required otherwise you get
// `Unexpected value for parameter pad_type[0] "same_lower" not in ("custom", "same", "valid").`
// We _could_ manually calculate the pads, but not implementing that until we have a real use case to justify
Expand All @@ -269,7 +262,6 @@ bool ConvOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputPara
return false;
}
}
#endif

// there's no equivalent to allow a manual kernel shape in CoreML.
// it's OK if a specified kernel_shape matches kH and kW dims of the weight input.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ class ConvTransposeOpBuilder : public BaseOpBuilder {
Status ConvTransposeOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder& model_builder,
[[maybe_unused]] const Node& node,
const logging::Logger& /*logger*/) const {
#if defined(COREML_ENABLE_MLPROGRAM)
using namespace CoreML::Specification::MILSpec; // NOLINT
const auto input_defs = node.InputDefs();
const auto output_defs = node.OutputDefs();
Expand Down Expand Up @@ -80,7 +79,6 @@ Status ConvTransposeOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuild
AddOperationOutput(*op, *output_defs[0]);

model_builder.AddOperation(std::move(op));
#endif // defined(COREML_ENABLE_MLPROGRAM)

return Status::OK();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ Status DepthToSpaceOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
NodeAttrHelper helper(node);
int64_t blocksize = *helper.GetInt64("blocksize"); // required attribute

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec; // NOLINT

Expand Down Expand Up @@ -105,7 +104,6 @@ Status DepthToSpaceOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
model_builder.AddOperation(std::move(reshape2));
}
} else // NOLINT
#endif // if defined(COREML_ENABLE_MLPROGRAM)
{
const auto& output_name = output_defs[0]->Name();
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ void GemmOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Nod
const auto& input_defs(node.InputDefs());
const bool is_gemm = op == "Gemm";

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
// we have to transpose the weight input of Gemm if transB is false, and potentially override the bias shape
if (is_gemm) {
Expand All @@ -58,9 +57,7 @@ void GemmOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Nod
}
}
}
} else
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
} else {
// We have already embedded the weights (matrix B and C(if any)) into the coreml layer
// No need to copy them later to reduce memory consumption
model_builder.AddInitializerToSkip(input_defs[1]->Name());
Expand Down Expand Up @@ -123,7 +120,6 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
const auto K = transB ? b1 : b0;
const auto N = transB ? b0 : b1;
// we already checked it and dtype must be existed.
#if defined(COREML_ENABLE_MLPROGRAM)
auto input_dtype = a.TypeAsProto()->tensor_type().elem_type();
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;
Expand Down Expand Up @@ -207,9 +203,7 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
AddOperationOutput(*matmul_op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(matmul_op));
}
} else
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
} else {
auto* coreml_inner_product = layer->mutable_innerproduct();

*layer->mutable_input()->Add() = a.Name();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ class GridSampleOpBuilder : public BaseOpBuilder {
Status GridSampleOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder& model_builder,
[[maybe_unused]] const Node& node,
[[maybe_unused]] const logging::Logger& logger) const {
#if defined(COREML_ENABLE_MLPROGRAM)
using namespace CoreML::Specification::MILSpec; // NOLINT
// https://apple.github.io/coremltools/source/coremltools.converters.mil.mil.ops.defs.html#coremltools.converters.mil.mil.ops.defs.iOS15.image_resizing.resample

Expand Down Expand Up @@ -80,7 +79,6 @@ Status GridSampleOpBuilder::AddToModelBuilderImpl([[maybe_unused]] ModelBuilder&
AddOperationOutput(*op, *output_defs[0]);

model_builder.AddOperation(std::move(op));
#endif
return Status::OK();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(
if (node.OpType() == "GroupNormalization") {
return AddGroupNormToModelBuilderImpl(model_builder, node, logger);
}
#if defined(COREML_ENABLE_MLPROGRAM)
const auto& input_defs = node.InputDefs();
NodeAttrHelper helper(node);
const auto& scale_tensor = *model_builder.GetConstantInitializer(input_defs[1]->Name());
Expand Down Expand Up @@ -94,7 +93,6 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(
AddOperationOutput(*op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(op));
}
#endif // (COREML_ENABLE_MLPROGRAM)

return Status::OK();
}
Expand All @@ -103,7 +101,6 @@ Status NormalizationOpBuilder::AddGroupNormToModelBuilderImpl(
[[maybe_unused]] ModelBuilder& model_builder,
[[maybe_unused]] const Node& node,
[[maybe_unused]] const logging::Logger& logger) const {
#if defined(COREML_ENABLE_MLPROGRAM)
const auto& input_defs = node.InputDefs();
NodeAttrHelper helper(node);
// Coreml hasn't supported GroupNorm yet.
Expand Down Expand Up @@ -184,7 +181,6 @@ Status NormalizationOpBuilder::AddGroupNormToModelBuilderImpl(
model_builder.AddOperation(std::move(mul));
model_builder.AddOperation(std::move(add));
}
#endif // (COREML_ENABLE_MLPROGRAM)
return Status::OK();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
const auto& op_type = node.OpType();
const auto& input_defs = node.InputDefs();

#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

Expand Down Expand Up @@ -91,9 +90,7 @@ Status PoolOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
AddOperationOutput(*op, *node.OutputDefs()[0]);
model_builder.AddOperation(std::move(op));

} else
#endif // defined(COREML_ENABLE_MLPROGRAM)
{
} else {
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

auto* coreml_pool = layer->mutable_pooling();
Expand Down
Loading
Loading