From 2459e942389ea4eee0d6e7b32a0f3ed1b31895df Mon Sep 17 00:00:00 2001 From: fsx950223 Date: Mon, 7 Oct 2019 05:40:58 +0000 Subject: [PATCH] format code --- .../activations/activations_test.py | 2 +- tensorflow_addons/activations/rrelu.py | 5 ++- tensorflow_addons/activations/rrelu_test.py | 6 +-- .../activations/cc/kernels/rrelu_op.cc | 8 ++-- .../activations/cc/kernels/rrelu_op.h | 37 +++++++++---------- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tensorflow_addons/activations/activations_test.py b/tensorflow_addons/activations/activations_test.py index 31a4b82196..32ed113c98 100644 --- a/tensorflow_addons/activations/activations_test.py +++ b/tensorflow_addons/activations/activations_test.py @@ -26,7 +26,7 @@ class ActivationsTest(tf.test.TestCase): ALL_ACTIVATIONS = [ - "gelu", "hardshrink", "lisht", "sparsemax", "tanhshrink" + "gelu", "hardshrink", "lisht", "rrelu", "sparsemax", "tanhshrink" ] def test_serialization(self): diff --git a/tensorflow_addons/activations/rrelu.py b/tensorflow_addons/activations/rrelu.py index 0f96c6c017..617c9474d6 100644 --- a/tensorflow_addons/activations/rrelu.py +++ b/tensorflow_addons/activations/rrelu.py @@ -53,4 +53,7 @@ def rrelu(x, lower=0.125, upper=0.3333333333333333, training=None): @tf.RegisterGradient("Addons>Rrelu") def _rrelu_grad(op, grad): return _activation_ops_so.addons_rrelu_grad(grad, op.inputs[0], - op.outputs[1],op.get_attr("lower"),op.get_attr("upper"),op.get_attr("training")) + op.outputs[1], + op.get_attr("lower"), + op.get_attr("upper"), + op.get_attr("training")) diff --git a/tensorflow_addons/activations/rrelu_test.py b/tensorflow_addons/activations/rrelu_test.py index 7db04a11ec..ffc164522f 100644 --- a/tensorflow_addons/activations/rrelu_test.py +++ b/tensorflow_addons/activations/rrelu_test.py @@ -25,13 +25,13 @@ import tensorflow as tf from tensorflow_addons.activations import rrelu from tensorflow_addons.utils import test_utils -import random +import random - -SEED=111111 +SEED = 111111 tf.random.set_seed(SEED) random.seed(SEED) + @test_utils.run_all_in_graph_and_eager_modes class RreluTest(tf.test.TestCase, parameterized.TestCase): @parameterized.named_parameters(("float16", np.float16), diff --git a/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.cc b/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.cc index c8ea4b4984..394f20e248 100644 --- a/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.cc +++ b/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.cc @@ -61,11 +61,11 @@ TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC } // namespace functor -#define REGISTER_RRELU_GPU_KERNELS(T) \ - REGISTER_KERNEL_BUILDER( \ +#define REGISTER_RRELU_GPU_KERNELS(T) \ + REGISTER_KERNEL_BUILDER( \ Name("Addons>Rrelu").Device(DEVICE_GPU).TypeConstraint("T"), \ - RreluOp); \ - REGISTER_KERNEL_BUILDER( \ + RreluOp); \ + REGISTER_KERNEL_BUILDER( \ Name("Addons>RreluGrad").Device(DEVICE_GPU).TypeConstraint("T"), \ RreluGradOp); diff --git a/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.h b/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.h index a3a7bbe506..432254a976 100644 --- a/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.h +++ b/tensorflow_addons/custom_ops/activations/cc/kernels/rrelu_op.h @@ -33,13 +33,13 @@ struct Rrelu { void operator()(const Device& d, typename TTypes::ConstTensor features, T lower, T upper, bool training, typename TTypes::Tensor activations, - typename TTypes::Tensor alpha){ + typename TTypes::Tensor alpha) { if (training) { alpha.device(d) = alpha.constant(lower) + - (alpha.random() + alpha.constant(static_cast(1))) * - alpha.constant((upper - lower) / static_cast(2)); - activations.device(d) = (features >= static_cast(0)) - .select(features, alpha * features); + (alpha.random() + alpha.constant(static_cast(1))) * + alpha.constant((upper - lower) / static_cast(2)); + activations.device(d) = + (features >= static_cast(0)).select(features, alpha * features); } else { activations.device(d) = (features >= static_cast(0)) @@ -54,7 +54,7 @@ struct RreluGrad { void operator()(const Device& d, typename TTypes::ConstTensor gradients, typename TTypes::ConstTensor features, typename TTypes::ConstTensor alpha, T lower, T upper, - bool training, typename TTypes::Tensor backprops){ + bool training, typename TTypes::Tensor backprops) { if (training) { backprops.device(d) = gradients * @@ -82,12 +82,10 @@ class RreluOp : public OpKernel { OP_REQUIRES_OK(context, context->GetAttr("training", &training_)); lower_ = static_cast(lower); OP_REQUIRES(context, lower_ >= static_cast(0), - errors::InvalidArgument("Need lower >= 0, got ", - lower_)); + errors::InvalidArgument("Need lower >= 0, got ", lower_)); upper_ = static_cast(upper); OP_REQUIRES(context, upper_ < static_cast(1), - errors::InvalidArgument("Need upper < 1, got ", - upper_)); + errors::InvalidArgument("Need upper < 1, got ", upper_)); OP_REQUIRES( context, lower_ <= upper_, errors::InvalidArgument("lower must be less than or equal to upper.")); @@ -101,9 +99,9 @@ class RreluOp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_output(1, input_tensor.shape(), &alpha_tensor)); // functor::Rrelu functor; - functor::Rrelu()(context->eigen_device(), input_tensor.flat(), lower_, - upper_, training_, output_tensor->flat(), - alpha_tensor->flat()); + functor::Rrelu()( + context->eigen_device(), input_tensor.flat(), lower_, upper_, + training_, output_tensor->flat(), alpha_tensor->flat()); } private: @@ -122,12 +120,10 @@ class RreluGradOp : public OpKernel { OP_REQUIRES_OK(context, context->GetAttr("training", &training_)); lower_ = static_cast(lower); OP_REQUIRES(context, lower_ >= static_cast(0), - errors::InvalidArgument("Need lower >= 0, got ", - lower_)); + errors::InvalidArgument("Need lower >= 0, got ", lower_)); upper_ = static_cast(upper); OP_REQUIRES(context, upper_ < static_cast(1), - errors::InvalidArgument("Need upper < 1, got ", - upper_)); + errors::InvalidArgument("Need upper < 1, got ", upper_)); OP_REQUIRES( context, lower_ <= upper_, errors::InvalidArgument("lower must be less than or equal to upper.")); @@ -140,9 +136,10 @@ class RreluGradOp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(), &output_tensor)); // functor::RreluGrad functor; - functor::RreluGrad()(context->eigen_device(), gradients.flat(), - input_tensor.flat(), alpha_tensor.flat(), lower_, upper_, - training_, output_tensor->flat()); + functor::RreluGrad()(context->eigen_device(), + gradients.flat(), input_tensor.flat(), + alpha_tensor.flat(), lower_, upper_, + training_, output_tensor->flat()); } private: