diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py index 8aa24adfdf..80e8aec9ac 100644 --- a/tensorflow_addons/conftest.py +++ b/tensorflow_addons/conftest.py @@ -14,6 +14,20 @@ pytest_collection_modifyitems, ) +import numpy as np +import pytest + +import tensorflow as tf +import tensorflow_addons as tfa + + # fixtures present in this file will be available # when running tests and can be referenced with strings # https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace["np"] = np + doctest_namespace["tf"] = tf + doctest_namespace["tfa"] = tfa diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py index 811d320f7c..b950a78bc4 100644 --- a/tensorflow_addons/layers/wrappers.py +++ b/tensorflow_addons/layers/wrappers.py @@ -16,6 +16,7 @@ import logging import tensorflow as tf + from typeguard import typechecked @@ -30,21 +31,25 @@ class WeightNormalization(tf.keras.layers.Wrapper): Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868 Tim Salimans, Diederik P. Kingma (2016) WeightNormalization wrapper works for keras and tf layers. - ```python - net = WeightNormalization( - tf.keras.layers.Conv2D(2, 2, activation='relu'), - input_shape=(32, 32, 3), - data_init=True)(x) - net = WeightNormalization( - tf.keras.layers.Conv2D(16, 5, activation='relu'), - data_init=True)(net) - net = WeightNormalization( - tf.keras.layers.Dense(120, activation='relu'), - data_init=True)(net) - net = WeightNormalization( - tf.keras.layers.Dense(n_classes), - data_init=True)(net) - ``` + + Usage: + + >>> net = tfa.layers.WeightNormalization( + ... tf.keras.layers.Conv2D(2, 2, activation='relu'), + ... input_shape=(32, 32, 3), + ... data_init=True)(np.random.rand(32, 32, 3, 1).astype('f')) + >>> net = tfa.layers.WeightNormalization( + ... tf.keras.layers.Conv2D(16, 2, activation='relu'), + ... data_init=True)(net) + >>> net = tfa.layers.WeightNormalization( + ... tf.keras.layers.Dense(120, activation='relu'), + ... data_init=True)(net) + >>> net = tfa.layers.WeightNormalization( + ... tf.keras.layers.Dense(2), + ... data_init=True)(net) + >>> net.shape + TensorShape([32, 30, 1, 2]) + Arguments: layer: a layer instance. data_init: If `True` use data dependent variable initialization diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py index 47c785a465..dda4587a29 100644 --- a/tensorflow_addons/losses/focal_loss.py +++ b/tensorflow_addons/losses/focal_loss.py @@ -15,6 +15,7 @@ """Implements Focal loss.""" import tensorflow as tf + import tensorflow.keras.backend as K from tensorflow_addons.utils.keras_utils import LossFunctionWrapper @@ -37,15 +38,11 @@ class SigmoidFocalCrossEntropy(LossFunctionWrapper): Usage: - ```python - fl = tfa.losses.SigmoidFocalCrossEntropy() - loss = fl( - y_true = [[1.0], [1.0], [0.0]], - y_pred = [[0.97], [0.91], [0.03]]) - print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06, - 1.9097870e-04, - 2.0559824e-05] - ``` + >>> fl = tfa.losses.SigmoidFocalCrossEntropy() + >>> loss = fl([[0.97], [0.91], [0.03]], [[1.0], [1.0], [0.0]]) + >>> loss + + Usage with tf.keras API: ```python diff --git a/tensorflow_addons/losses/giou_loss.py b/tensorflow_addons/losses/giou_loss.py index 685d71adf7..8948381353 100644 --- a/tensorflow_addons/losses/giou_loss.py +++ b/tensorflow_addons/losses/giou_loss.py @@ -33,13 +33,13 @@ class GIoULoss(LossFunctionWrapper): Usage: - ```python - gl = tfa.losses.GIoULoss() - boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) - boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) - loss = gl(boxes1, boxes2) - print('Loss: ', loss.numpy()) # Loss: [1.07500000298023224, 1.9333333373069763] - ``` + >>> gl = tfa.losses.GIoULoss() + >>> boxes1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) + >>> boxes2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0]]) + >>> loss = gl(boxes1, boxes2) + >>> loss + + Usage with tf.keras API: ```python diff --git a/tensorflow_addons/losses/quantiles.py b/tensorflow_addons/losses/quantiles.py index e758f62611..ef3aca0a39 100644 --- a/tensorflow_addons/losses/quantiles.py +++ b/tensorflow_addons/losses/quantiles.py @@ -84,15 +84,11 @@ class PinballLoss(LossFunctionWrapper): See: https://en.wikipedia.org/wiki/Quantile_regression Usage: - ```python - pinball = tfa.losses.PinballLoss(tau=.1) - loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.]) - - # loss = max(0.1 * (y_true - y_pred), (0.1 - 1) * (y_true - y_pred)) - # = (0.9 + 0.9 + 0 + 0.1) / 4 - print('Loss: ', loss.numpy()) # Loss: 0.475 - ``` + >>> pinball = tfa.losses.PinballLoss(tau=.1) + >>> loss = pinball([0., 0., 1., 1.], [1., 1., 1., 0.]) + >>> loss + Usage with the `compile` API: diff --git a/tensorflow_addons/metrics/cohens_kappa.py b/tensorflow_addons/metrics/cohens_kappa.py index cb473530b7..504b42a24b 100644 --- a/tensorflow_addons/metrics/cohens_kappa.py +++ b/tensorflow_addons/metrics/cohens_kappa.py @@ -15,6 +15,7 @@ """Implements Cohen's Kappa.""" import tensorflow as tf + import numpy as np import tensorflow.keras.backend as K from tensorflow.keras.metrics import Metric @@ -27,39 +28,43 @@ @tf.keras.utils.register_keras_serializable(package="Addons") class CohenKappa(Metric): """Computes Kappa score between two raters. - The score lies in the range [-1, 1]. A score of -1 represents complete disagreement between two raters whereas a score of 1 represents complete agreement between the two raters. A score of 0 means agreement by chance. - Note: As of now, this implementation considers all labels while calculating the Cohen's Kappa score. Usage: - ```python - actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32) - preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32) - weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32) - - m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) - m.update_state(actuals, preds) - print('Final result: ', m.result().numpy()) # Result: 0.61904764 - - # To use this with weights, sample_weight argument can be used. - m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) - m.update_state(actuals, preds, sample_weight=weights) - print('Final result: ', m.result().numpy()) # Result: 0.37209308 - ``` + >>> actuals = np.array([4, 4, 3, 4, 2, 4, 1, 1], dtype=np.int32) + >>> preds = np.array([4, 4, 3, 4, 4, 2, 1, 1], dtype=np.int32) + >>> weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32) + >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) + >>> m.update_state(actuals, preds) + + >>> m.result().numpy() + 0.61904764 + >>> m = tfa.metrics.CohenKappa(num_classes=5, sparse_labels=True) + >>> m.update_state(actuals, preds, sample_weight=weights) + + >>> m.result().numpy() + 0.37209308 Usage with tf.keras API: - - ```python model = tf.keras.models.Model(inputs, outputs) model.add_metric(tfa.metrics.CohenKappa(num_classes=5)(outputs)) model.compile('sgd', loss='mse') - ``` """ @typechecked diff --git a/tensorflow_addons/metrics/matthews_correlation_coefficient.py b/tensorflow_addons/metrics/matthews_correlation_coefficient.py index e6afa53f61..d54c2350aa 100644 --- a/tensorflow_addons/metrics/matthews_correlation_coefficient.py +++ b/tensorflow_addons/metrics/matthews_correlation_coefficient.py @@ -42,18 +42,13 @@ class MatthewsCorrelationCoefficient(tf.keras.metrics.Metric): ((TP + FP) * (TP + FN) * (TN + FP ) * (TN + FN))^(1/2) Usage: - ```python - actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]], - dtype=tf.float32) - preds = tf.constant([[1.0], [0.0], [1.0], [1.0]], - dtype=tf.float32) - # Matthews correlation coefficient - mcc = MatthewsCorrelationCoefficient(num_classes=1) - mcc.update_state(actuals, preds) - print('Matthews correlation coefficient is:', - mcc.result().numpy()) - # Matthews correlation coefficient is : -0.33333334 - ``` + + >>> actuals = tf.constant([[1.0], [1.0], [1.0], [0.0]], dtype=tf.float32) + >>> preds = tf.constant([[1.0], [0.0], [1.0], [1.0]], dtype=tf.float32) + >>> mcc = tfa.metrics.MatthewsCorrelationCoefficient(num_classes=1) + >>> mcc.update_state(actuals, preds) + >>> mcc.result() + """ @typechecked diff --git a/tensorflow_addons/metrics/multilabel_confusion_matrix.py b/tensorflow_addons/metrics/multilabel_confusion_matrix.py index d13a355b33..291e6a091a 100644 --- a/tensorflow_addons/metrics/multilabel_confusion_matrix.py +++ b/tensorflow_addons/metrics/multilabel_confusion_matrix.py @@ -16,10 +16,11 @@ import warnings +import numpy as np import tensorflow as tf + from tensorflow.keras import backend as K from tensorflow.keras.metrics import Metric -import numpy as np from typeguard import typechecked from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike @@ -46,30 +47,36 @@ class MultiLabelConfusionMatrix(Metric): - false negatives for class i in M(1,0) - true positives for class i in M(1,1) - ```python - # multilabel confusion matrix - y_true = tf.constant([[1, 0, 1], [0, 1, 0]], - dtype=tf.int32) - y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], - dtype=tf.int32) - output = MultiLabelConfusionMatrix(num_classes=3) - output.update_state(y_true, y_pred) - print('Confusion matrix:', output.result().numpy()) - - # Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]] - [[0 1] [1 0]]] - - # if multiclass input is provided - y_true = tf.constant([[1, 0, 0], [0, 1, 0]], - dtype=tf.int32) - y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], - dtype=tf.int32) - output = MultiLabelConfusionMatrix(num_classes=3) - output.update_state(y_true, y_pred) - print('Confusion matrix:', output.result().numpy()) - - # Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]] - ``` + Usage: + + >>> y_true = tf.constant([[1, 0, 1], [0, 1, 0]], dtype=tf.int32) + >>> y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], dtype=tf.int32) + >>> output1 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3) + >>> output1.update_state(y_true, y_pred) + >>> output1.result() + + [[1., 0.], + [0., 1.]], + + [[0., 1.], + [1., 0.]]], dtype=float32)> + >>> y_true = tf.constant([[1, 0, 0], [0, 1, 0]], dtype=tf.int32) + >>> y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], dtype=tf.int32) + >>> output2 = tfa.metrics.MultiLabelConfusionMatrix(num_classes=3) + >>> output2.update_state(y_true, y_pred) + >>> output2.result() + + [[1., 0.], + [1., 0.]], + + [[1., 1.], + [0., 0.]]], dtype=float32)> """ @typechecked diff --git a/tensorflow_addons/metrics/r_square.py b/tensorflow_addons/metrics/r_square.py index 8ae78fc543..8e660edcec 100644 --- a/tensorflow_addons/metrics/r_square.py +++ b/tensorflow_addons/metrics/r_square.py @@ -16,6 +16,7 @@ from typing import Tuple import tensorflow as tf + from tensorflow.keras import backend as K from tensorflow.keras.metrics import Metric from tensorflow.python.ops import weights_broadcast_ops @@ -61,13 +62,13 @@ class RSquare(Metric): of the same metric. Usage: - ```python - actuals = tf.constant([1, 4, 3], dtype=tf.float32) - preds = tf.constant([2, 4, 4], dtype=tf.float32) - result = tf.keras.metrics.RSquare() - result.update_state(actuals, preds) - print('R^2 score is: ', r1.result().numpy()) # 0.57142866 - ``` + + >>> actuals = tf.constant([1, 4, 3], dtype=tf.float32) + >>> preds = tf.constant([2, 4, 4], dtype=tf.float32) + >>> ans = tfa.metrics.RSquare() + >>> ans.update_state(actuals, preds) + >>> ans.result() + """ @typechecked diff --git a/tensorflow_addons/optimizers/lookahead.py b/tensorflow_addons/optimizers/lookahead.py index 8f96dc9c62..fe043d7aaa 100644 --- a/tensorflow_addons/optimizers/lookahead.py +++ b/tensorflow_addons/optimizers/lookahead.py @@ -34,10 +34,8 @@ class Lookahead(tf.keras.optimizers.Optimizer): Example of usage: - ```python - opt = tf.keras.optimizers.SGD(learning_rate) - opt = tfa.optimizers.Lookahead(opt) - ``` + >>> opt = tf.keras.optimizers.SGD(learning_rate=0.01) + >>> opt = tfa.optimizers.Lookahead(opt) """ @typechecked diff --git a/tensorflow_addons/optimizers/moving_average.py b/tensorflow_addons/optimizers/moving_average.py index 7e92186ad9..cc8df1b6c5 100644 --- a/tensorflow_addons/optimizers/moving_average.py +++ b/tensorflow_addons/optimizers/moving_average.py @@ -34,11 +34,9 @@ class MovingAverage(AveragedOptimizerWrapper): Example of usage: - ```python - opt = tf.keras.optimizers.SGD(learning_rate) - opt = tfa.optimizers.MovingAverage(opt) + >>> opt = tf.keras.optimizers.SGD(learning_rate=0.01) + >>> opt = tfa.optimizers.MovingAverage(opt) - ``` """ @typechecked diff --git a/tensorflow_addons/optimizers/weight_decay_optimizers.py b/tensorflow_addons/optimizers/weight_decay_optimizers.py index a30dfa67b8..614a78df2b 100644 --- a/tensorflow_addons/optimizers/weight_decay_optimizers.py +++ b/tensorflow_addons/optimizers/weight_decay_optimizers.py @@ -15,6 +15,7 @@ """Base class to make optimizers weight decay ready.""" import tensorflow as tf + from tensorflow_addons.utils.types import FloatTensorLike from typeguard import typechecked @@ -56,18 +57,14 @@ def __init__(self, weight_decay, *args, **kwargs): Note: when applying a decay to the learning rate, be sure to manually apply the decay to the `weight_decay` as well. For example: - ```python - step = tf.Variable(0, trainable=False) - schedule = tf.optimizers.schedules.PiecewiseConstantDecay( - [10000, 15000], [1e-0, 1e-1, 1e-2]) - # lr and wd can be a function or a tensor - lr = 1e-1 * schedule(step) - wd = lambda: 1e-4 * schedule(step) - - # ... + Usage: - optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) - ``` + >>> step = tf.Variable(0, trainable=False) + >>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay( + ... [10000, 15000], [1e-0, 1e-1, 1e-2]) + >>> lr = 1e-1 * schedule(step) + >>> wd = lambda: 1e-4 * schedule(step) + >>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) """ @typechecked @@ -258,18 +255,15 @@ def extend_with_decoupled_weight_decay( Note: when applying a decay to the learning rate, be sure to manually apply the decay to the `weight_decay` as well. For example: - ```python - step = tf.Variable(0, trainable=False) - schedule = tf.optimizers.schedules.PiecewiseConstantDecay( - [10000, 15000], [1e-0, 1e-1, 1e-2]) - # lr and wd can be a function or a tensor - lr = 1e-1 * schedule(step) - wd = lambda: 1e-4 * schedule(step) + Usage: - # ... + >>> step = tf.Variable(0, trainable=False) + >>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay( + ... [10000, 15000], [1e-0, 1e-1, 1e-2]) + >>> lr = 1e-1 * schedule(step) + >>> wd = lambda: 1e-4 * schedule(step) + >>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) - optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) - ``` Note: you might want to register your own custom optimizer using `tf.keras.utils.get_custom_objects()`. @@ -335,19 +329,15 @@ class SGDW(DecoupledWeightDecayExtension, tf.keras.optimizers.SGD): Note: when applying a decay to the learning rate, be sure to manually apply the decay to the `weight_decay` as well. For example: - ```python - step = tf.Variable(0, trainable=False) - schedule = tf.optimizers.schedules.PiecewiseConstantDecay( - [10000, 15000], [1e-0, 1e-1, 1e-2]) - # lr and wd can be a function or a tensor - lr = 1e-1 * schedule(step) - wd = lambda: 1e-4 * schedule(step) - - # ... + Usage: - optimizer = tfa.optimizers.SGDW( - learning_rate=lr, weight_decay=wd, momentum=0.9) - ``` + >>> step = tf.Variable(0, trainable=False) + >>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay( + ... [10000, 15000], [1e-0, 1e-1, 1e-2]) + >>> lr = 1e-1 * schedule(step) + >>> wd = lambda: 1e-4 * schedule(step) + >>> optimizer = tfa.optimizers.SGDW( + ... learning_rate=lr, weight_decay=wd, momentum=0.9) """ @typechecked @@ -414,18 +404,14 @@ class AdamW(DecoupledWeightDecayExtension, tf.keras.optimizers.Adam): Note: when applying a decay to the learning rate, be sure to manually apply the decay to the `weight_decay` as well. For example: - ```python - step = tf.Variable(0, trainable=False) - schedule = tf.optimizers.schedules.PiecewiseConstantDecay( - [10000, 15000], [1e-0, 1e-1, 1e-2]) - # lr and wd can be a function or a tensor - lr = 1e-1 * schedule(step) - wd = lambda: 1e-4 * schedule(step) - - # ... + Usage: - optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) - ``` + >>> step = tf.Variable(0, trainable=False) + >>> schedule = tf.optimizers.schedules.PiecewiseConstantDecay( + ... [10000, 15000], [1e-0, 1e-1, 1e-2]) + >>> lr = 1e-1 * schedule(step) + >>> wd = lambda: 1e-4 * schedule(step) + >>> optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=wd) """ @typechecked diff --git a/tools/docker/sanity_check.Dockerfile b/tools/docker/sanity_check.Dockerfile index 6ccaf1eec7..4b2a4127bf 100644 --- a/tools/docker/sanity_check.Dockerfile +++ b/tools/docker/sanity_check.Dockerfile @@ -118,7 +118,8 @@ RUN python configure.py RUN --mount=type=cache,id=cache_bazel,target=/root/.cache/bazel \ bash tools/install_so_files.sh RUN pip install --no-deps -e . -RUN pytest -v -n auto ./tensorflow_addons/activations +RUN pytest -v -n auto ./tensorflow_addons/activations \ + --doctest-modules tensorflow_addons/ --ignore-glob=*_test.py RUN touch /ok.txt # -------------------------------