From 8c497521b6a91b9e7ab995de40975119954b69e3 Mon Sep 17 00:00:00 2001 From: "M. Amber Hassaan" Date: Wed, 13 Dec 2023 16:31:08 +0000 Subject: [PATCH] fix formatting --- src/fusion.cpp | 15 ++-- src/include/miopen/fusion.hpp | 7 +- .../miopen/fusion/fusion_invoke_params.hpp | 14 +-- ..._ck_igemm_fwd_bias_res_add_activ_fused.cpp | 38 ++++---- test/gtest/fused_conv_bias_res_add_activ.cpp | 88 +++++++++---------- 5 files changed, 82 insertions(+), 80 deletions(-) diff --git a/src/fusion.cpp b/src/fusion.cpp index 502b3e48d2..b100343f23 100644 --- a/src/fusion.cpp +++ b/src/fusion.cpp @@ -104,7 +104,7 @@ miopenStatus_t ConvBiasActivFusion(Handle& handle, // MIOPEN_THROW(miopenStatusNotImplemented, "The addition of z vector is not yet supported"); FusionPlanDescriptor fusePlanDesc{miopenVerticalFusion, xDesc}; OperatorArgs fusionArgs; - auto convOp = std::make_shared(conv_desc, wDesc); + auto convOp = std::make_shared(conv_desc, wDesc); auto zOp = std::make_shared(zDesc); auto biasOp = std::make_shared(biasDesc); auto activOp = std::make_shared(activationDesc.GetMode()); @@ -122,7 +122,7 @@ miopenStatus_t ConvBiasActivFusion(Handle& handle, MIOPEN_CHECK(fusePlanDesc.AddOp(activOp)); MIOPEN_CHECK(fusePlanDesc.Compile(handle)); - float alpha = 1.0f; + float alpha = 1.0f; float beta = 0.0f; float activ_alpha = activationDesc.GetAlpha(); float activ_beta = activationDesc.GetBeta(); @@ -530,10 +530,8 @@ miopenStatus_t ConvForwardOpDescriptor::GetOutputDesc(TensorDescriptor& output_d [&]() { output_desc = base_desc.GetForwardOutputTensor(input_desc, filter_desc); }); } -miopenStatus_t ConvForwardOpDescriptor::SetArgs(OperatorArgs& args, - float alpha, - float beta, - ConstData_t w) +miopenStatus_t +ConvForwardOpDescriptor::SetArgs(OperatorArgs& args, float alpha, float beta, ConstData_t w) { auto op_args = std::make_unique(alpha, beta, w); args.SetArg(GetIdx(), std::move(op_args)); @@ -696,9 +694,8 @@ miopenStatus_t TensorScaleAddOpDescriptor::GetOutputDesc(TensorDescriptor& outpu return miopenStatusSuccess; } -miopenStatus_t TensorScaleAddOpDescriptor::SetArgs(OperatorArgs& args, - float alpha, - ConstData_t tensor_ptr) +miopenStatus_t +TensorScaleAddOpDescriptor::SetArgs(OperatorArgs& args, float alpha, ConstData_t tensor_ptr) { auto op_args = std::make_unique(alpha, tensor_ptr); args.SetArg(GetIdx(), std::move(op_args)); diff --git a/src/include/miopen/fusion.hpp b/src/include/miopen/fusion.hpp index 92554522af..4e9097ac78 100644 --- a/src/include/miopen/fusion.hpp +++ b/src/include/miopen/fusion.hpp @@ -81,15 +81,14 @@ struct BiasFusionOpDescriptor : FusionOpDescriptor TensorDescriptor base_desc; }; -struct TensorScaleAddOpDescriptor: public FusionOpDescriptor { +struct TensorScaleAddOpDescriptor : public FusionOpDescriptor +{ TensorScaleAddOpDescriptor(const TensorDescriptor& desc) : tensor_desc(desc) {} miopenStatus_t GetOutputDesc(TensorDescriptor& output_desc) const override; miopenStatus_t GetNetworkConfig(std::ostringstream& network_config) override; - miopenStatus_t - SetArgs(OperatorArgs& args, float alpha, ConstData_t tensor_ptr); + miopenStatus_t SetArgs(OperatorArgs& args, float alpha, ConstData_t tensor_ptr); miopenFusionOp_t kind() const override { return miopenFusionOpTensorScaleAdd; }; TensorDescriptor tensor_desc; - }; struct ActivFwdFusionOpDescriptor : FusionOpDescriptor diff --git a/src/include/miopen/fusion/fusion_invoke_params.hpp b/src/include/miopen/fusion/fusion_invoke_params.hpp index 6a7e3f62b4..255bc373cd 100644 --- a/src/include/miopen/fusion/fusion_invoke_params.hpp +++ b/src/include/miopen/fusion/fusion_invoke_params.hpp @@ -40,12 +40,12 @@ struct FusionOpInvokeParamBase struct ConvolutionOpInvokeParam : FusionOpInvokeParamBase { - ConvolutionOpInvokeParam(float _alpha, float _beta, ConstData_t w) : - alpha(_alpha), - beta(_beta), - weights(w) {} - float alpha = 1.0f; // scales new result of convolution - float beta = 0.0f; // scales old val of convolution output tensor + ConvolutionOpInvokeParam(float _alpha, float _beta, ConstData_t w) + : alpha(_alpha), beta(_beta), weights(w) + { + } + float alpha = 1.0f; // scales new result of convolution + float beta = 0.0f; // scales old val of convolution output tensor ConstData_t weights = nullptr; }; @@ -58,7 +58,7 @@ struct BiasOpInvokeParam : FusionOpInvokeParamBase struct TensorScaleAddOpInvokeParam : public FusionOpInvokeParamBase { TensorScaleAddOpInvokeParam(float a, ConstData_t tp) : alpha(a), tensor_ptr(tp) {} - float alpha = 1.0f; + float alpha = 1.0f; ConstData_t tensor_ptr = nullptr; }; diff --git a/src/solver/conv_ck_igemm_fwd_bias_res_add_activ_fused.cpp b/src/solver/conv_ck_igemm_fwd_bias_res_add_activ_fused.cpp index 6746bf1531..5dad209f5b 100644 --- a/src/solver/conv_ck_igemm_fwd_bias_res_add_activ_fused.cpp +++ b/src/solver/conv_ck_igemm_fwd_bias_res_add_activ_fused.cpp @@ -58,10 +58,10 @@ using DeviceOp = ck::tensor_operation::device::instance::DeviceOperationInstance ck::tensor_layout::convolution::GKZYXC, ck::Tuple, CK_OutLayout, - DataType, // in data type - DataType, // wei data type + DataType, // in data type + DataType, // wei data type ck::Tuple, // z & bias tensors data type - DataType, // out data type + DataType, // out data type ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise::PassThrough, ck::tensor_operation::element_wise:: @@ -90,11 +90,11 @@ struct CKArgs Do = ProblemInterpreter::GetOutputDepthDo(problem); Z = ProblemInterpreter::GetFilterDepthZ(problem); - in_lens = {G, N, C, Di, Hi, Wi}; - out_lens = {G, N, K, Do, Ho, Wo}; - wei_lens = {G, K, C, Z, Y, X}; - bias_lens = {G, 1, K, 1, 1, 1}; - bias_strides = {K, 0, 1, 0, 0, 0}; + in_lens = {G, N, C, Di, Hi, Wi}; + out_lens = {G, N, K, Do, Ho, Wo}; + wei_lens = {G, K, C, Z, Y, X}; + bias_lens = {G, 1, K, 1, 1, 1}; + bias_strides = {K, 0, 1, 0, 0, 0}; // miopen filter_stride to CK filter_stride auto miopen_in_strides = problem.GetIn().GetStrides(); @@ -162,24 +162,32 @@ struct CKArgs const miopen::fusion::FusionInvokeParams& data_ctx) const { - auto* conv_param = + auto* conv_param = dynamic_cast(data_ctx.op_args.params[0]); assert(conv_param); - auto* z_param = dynamic_cast(data_ctx.op_args.params[1]); + auto* z_param = + dynamic_cast(data_ctx.op_args.params[1]); assert(z_param); - auto* bias_param = dynamic_cast(data_ctx.op_args.params[2]); + auto* bias_param = + dynamic_cast(data_ctx.op_args.params[2]); assert(bias_param); /// \todo: Support general activation functions. /// only relu activation supported and hardcoded for now - [[maybe_unused]] auto* activ_param = dynamic_cast(*data_ctx.op_args.params[3]); + [[maybe_unused]] auto* activ_param = + dynamic_cast(*data_ctx.op_args.params[3]); assert(activ_param); - return MakeArgPtr(op_ptr, data_ctx.in, conv_param->weights, data_ctx.out, - z_param->tensor_ptr, bias_param->bdata, - conv_param->alpha, z_param->alpha); + return MakeArgPtr(op_ptr, + data_ctx.in, + conv_param->weights, + data_ctx.out, + z_param->tensor_ptr, + bias_param->bdata, + conv_param->alpha, + z_param->alpha); } #if 0 diff --git a/test/gtest/fused_conv_bias_res_add_activ.cpp b/test/gtest/fused_conv_bias_res_add_activ.cpp index 6e62d20eb2..32b4415926 100644 --- a/test/gtest/fused_conv_bias_res_add_activ.cpp +++ b/test/gtest/fused_conv_bias_res_add_activ.cpp @@ -59,8 +59,11 @@ inline int SetTensorLayout(miopen::TensorDescriptor& desc) template struct ConvFwdBiasResAddFixture - : public ::testing::TestWithParam< - std::tuple> + : public ::testing::TestWithParam> { protected: @@ -89,43 +92,41 @@ struct ConvFwdBiasResAddFixture SetTensorLayout(z.desc); z.generate(gen_value); - bias = tensor { - tensor_layout, - {1, 1, 1, 1, conv_config.k}, // NDHWK for lengths - {0, 1, 0, 0, 0}}; // NKDHW order for strides + bias = tensor{tensor_layout, + {1, 1, 1, 1, conv_config.k}, // NDHWK for lengths + {0, 1, 0, 0, 0}}; // NKDHW order for strides bias.generate(gen_value); - auto& handle = get_handle(); - in_dev = handle.Write(input.data); - wei_dev = handle.Write(weights.data); - out_dev = handle.Write(output.data); - z_dev = handle.Write(z.data); - bias_dev = handle.Write(bias.data); + in_dev = handle.Write(input.data); + wei_dev = handle.Write(weights.data); + out_dev = handle.Write(output.data); + z_dev = handle.Write(z.data); + bias_dev = handle.Write(bias.data); miopenCreateActivationDescriptor(&activ_desc); miopenSetActivationDescriptor(activ_desc, miopenActivationRELU, 1.0f, 1.0f, 1.0f); } void TearDown() override { - + miopenDestroyActivationDescriptor(activ_desc); auto&& handle = get_handle(); - ref_out = tensor{tensor_layout, output_desc.GetLengths()}; - ref_out = ref_conv_fwd(input, weights, output, conv_desc); + ref_out = tensor{tensor_layout, output_desc.GetLengths()}; + ref_out = ref_conv_fwd(input, weights, output, conv_desc); // implement equation out = act(conv(in) * alpah1 + z * alpha2 + bias); ref_out.par_for_each([&](auto n, auto k, auto... dhw) { - auto& o = ref_out(n, k, dhw...); + auto& o = ref_out(n, k, dhw...); - o *= alpha1; - o += alpha2 * z(n, k, dhw...) + bias(n, k, dhw...); - o = (o > T{0}) ? o : T{0}; // TODO: hardcoded relu. Todo: use - // activationHostInfer - }); + o *= alpha1; + o += alpha2 * z(n, k, dhw...) + bias(n, k, dhw...); + o = (o > T{0}) ? o : T{0}; // TODO: hardcoded relu. Todo: use + // activationHostInfer + }); output.data = handle.Read(out_dev, output.data.size()); EXPECT_FALSE(miopen::range_zero(ref_out)) << "Cpu data is all zeros"; @@ -164,32 +165,29 @@ struct ConvFwdBiasResAddFixture miopenActivationDescriptor activ_desc; }; -TEST_P(ConvFwdBiasResAddActivTest, ConvFwdBiasResAddFixture) { - auto status = miopenConvolutionBiasActivationForward( - &alpha1, - &input.desc, - in_dev.get(), - &weights.desc, - wei_dev.get(), - conv_desc, - algo, - nullptr, // workspace - 0ull, // workspace size - &alpha2, - &z.desc, - z_dev.get(), - &bias.desc, - bias_dev.get(), - activ_desc, - &output.desc, - out_dev.get()); - - EXPECT_EQ(status, miopenStatusSuccess); +TEST_P(ConvFwdBiasResAddActivTest, ConvFwdBiasResAddFixture) +{ + auto status = miopenConvolutionBiasActivationForward(&alpha1, + &input.desc, + in_dev.get(), + &weights.desc, + wei_dev.get(), + conv_desc, + algo, + nullptr, // workspace + 0ull, // workspace size + &alpha2, + &z.desc, + z_dev.get(), + &bias.desc, + bias_dev.get(), + activ_desc, + &output.desc, + out_dev.get()); + + EXPECT_EQ(status, miopenStatusSuccess); } - - - INSTANTIATE_TEST_SUITE_P(ConvFwdBiasActivAPI, ConvBwdSolverTest3D, testing::Combine(testing::Values(miopenConvolutionFwdAlgoImplicitGEMM),