From d6d47b0ae4b6e6b8962c52a43f3639b54f48103a Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sun, 14 Mar 2021 23:55:20 +0100 Subject: [PATCH 1/9] prune accuracy --- .../metrics/classification/accuracy.py | 125 ++----------- .../metrics/functional/accuracy.py | 111 ++--------- tests/deprecated_api/test_remove_1-5.py | 1 + tests/metrics/classification/test_accuracy.py | 176 ------------------ 4 files changed, 30 insertions(+), 383 deletions(-) delete mode 100644 tests/metrics/classification/test_accuracy.py diff --git a/pytorch_lightning/metrics/classification/accuracy.py b/pytorch_lightning/metrics/classification/accuracy.py index 367c9b029d841..9e8ef73df3791 100644 --- a/pytorch_lightning/metrics/classification/accuracy.py +++ b/pytorch_lightning/metrics/classification/accuracy.py @@ -14,89 +14,16 @@ from typing import Any, Callable, Optional import torch -from torchmetrics import Metric +from torchmetrics import Accuracy as _Accuracy -from pytorch_lightning.metrics.functional.accuracy import _accuracy_compute, _accuracy_update +from pytorch_lightning.utilities import rank_zero_warn -class Accuracy(Metric): +class Accuracy(_Accuracy): r""" - Computes `Accuracy `__: - - .. math:: - \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a - tensor of predictions. - - For multi-class and multi-dimensional multi-class data with probability predictions, the - parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the - top-K highest probability items are considered to find the correct label. - - For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" - accuracy by default, which counts all labels or sub-samples separately. This can be - changed to subset accuracy (which requires all labels or sub-samples in the sample to - be correctly predicted) by setting ``subset_accuracy=True``. - - Args: - threshold: - Threshold probability value for transforming probability predictions to binary - (0,1) predictions, in the case of binary or multi-label inputs. - top_k: - Number of highest probability predictions considered to find the correct label, relevant - only for (multi-dimensional) multi-class inputs with probability predictions. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - compute_on_step: - Forward only calls ``update()`` and return ``None`` if this is set to ``False``. - dist_sync_on_step: - Synchronize metric state across processes at each ``forward()`` - before returning the value at the step - process_group: - Specify the process group on which synchronization is called. - default: ``None`` (which selects the entire world) - dist_sync_fn: - Callback that performs the allgather operation on the metric state. When ``None``, DDP - will be used to perform the allgather - - Raises: - ValueError: - If ``threshold`` is not between ``0`` and ``1``. - ValueError: - If ``top_k`` is not an ``integer`` larger than ``0``. - - Example: - - >>> from pytorch_lightning.metrics import Accuracy - >>> target = torch.tensor([0, 1, 2, 3]) - >>> preds = torch.tensor([0, 2, 1, 3]) - >>> accuracy = Accuracy() - >>> accuracy(preds, target) - tensor(0.5000) - - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) - >>> accuracy = Accuracy(top_k=2) - >>> accuracy(preds, target) - tensor(0.6667) + This implementation refers to :class:`~torchmetrics.Accuracy`. + .. warning:: This metric is deprecated, use ``torchmetrics.Accuracy``. Will be removed in v1.5.0. """ def __init__( @@ -109,44 +36,16 @@ def __init__( process_group: Optional[Any] = None, dist_sync_fn: Callable = None, ): + rank_zero_warn( + "This `Accuracy` was deprecated in v1.3.0 in favor of `torchmetrics.Accuracy`." + " It will be removed in v1.5.0", DeprecationWarning + ) super().__init__( + threshold=threshold, + top_k=top_k, + subset_accuracy=subset_accuracy, compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step, process_group=process_group, dist_sync_fn=dist_sync_fn, ) - - self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum") - self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") - - if not 0 < threshold < 1: - raise ValueError(f"The `threshold` should be a float in the (0,1) interval, got {threshold}") - - if top_k is not None and (not isinstance(top_k, int) or top_k <= 0): - raise ValueError(f"The `top_k` should be an integer larger than 0, got {top_k}") - - self.threshold = threshold - self.top_k = top_k - self.subset_accuracy = subset_accuracy - - def update(self, preds: torch.Tensor, target: torch.Tensor): - """ - Update state with predictions and targets. - - Args: - preds: Predictions from model (probabilities, or labels) - target: Ground truth labels - """ - - correct, total = _accuracy_update( - preds, target, threshold=self.threshold, top_k=self.top_k, subset_accuracy=self.subset_accuracy - ) - - self.correct += correct - self.total += total - - def compute(self) -> torch.Tensor: - """ - Computes accuracy based on inputs passed in to ``update`` previously. - """ - return _accuracy_compute(self.correct, self.total) diff --git a/pytorch_lightning/metrics/functional/accuracy.py b/pytorch_lightning/metrics/functional/accuracy.py index 53a47611cd49a..7da94ad075d8a 100644 --- a/pytorch_lightning/metrics/functional/accuracy.py +++ b/pytorch_lightning/metrics/functional/accuracy.py @@ -11,41 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple +from typing import Optional import torch -from torchmetrics.classification.checks import _input_format_classification -from torchmetrics.utilities.enums import DataType +from torchmetrics.functional import accuracy as _accuracy - -def _accuracy_update( - preds: torch.Tensor, target: torch.Tensor, threshold: float, top_k: Optional[int], subset_accuracy: bool -) -> Tuple[torch.Tensor, torch.Tensor]: - - preds, target, mode = _input_format_classification(preds, target, threshold=threshold, top_k=top_k) - - if mode == DataType.MULTILABEL and top_k: - raise ValueError("You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.") - - if mode == DataType.BINARY or (mode == DataType.MULTILABEL and subset_accuracy): - correct = (preds == target).all(dim=1).sum() - total = torch.tensor(target.shape[0], device=target.device) - elif mode == DataType.MULTILABEL and not subset_accuracy: - correct = (preds == target).sum() - total = torch.tensor(target.numel(), device=target.device) - elif mode == DataType.MULTICLASS or (mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy): - correct = (preds * target).sum() - total = target.sum() - elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy: - sample_correct = (preds * target).sum(dim=(1, 2)) - correct = (sample_correct == target.shape[2]).sum() - total = torch.tensor(target.shape[0], device=target.device) - - return correct, total - - -def _accuracy_compute(correct: torch.Tensor, total: torch.Tensor) -> torch.Tensor: - return correct.float() / total +from pytorch_lightning.utilities import rank_zero_warn def accuracy( @@ -55,68 +26,20 @@ def accuracy( top_k: Optional[int] = None, subset_accuracy: bool = False, ) -> torch.Tensor: - r"""Computes `Accuracy `_: - - .. math:: - \text{Accuracy} = \frac{1}{N}\sum_i^N 1(y_i = \hat{y}_i) - - Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a - tensor of predictions. - - For multi-class and multi-dimensional multi-class data with probability predictions, the - parameter ``top_k`` generalizes this metric to a Top-K accuracy metric: for each sample the - top-K highest probability items are considered to find the correct label. - - For multi-label and multi-dimensional multi-class inputs, this metric computes the "global" - accuracy by default, which counts all labels or sub-samples separately. This can be - changed to subset accuracy (which requires all labels or sub-samples in the sample to - be correctly predicted) by setting ``subset_accuracy=True``. - - Args: - preds: Predictions from model (probabilities, or labels) - target: Ground truth labels - threshold: - Threshold probability value for transforming probability predictions to binary - (0,1) predictions, in the case of binary or multi-label inputs. - top_k: - Number of highest probability predictions considered to find the correct label, relevant - only for (multi-dimensional) multi-class inputs with probability predictions. The - default value (``None``) will be interpreted as 1 for these inputs. - - Should be left at default (``None``) for all other types of inputs. - subset_accuracy: - Whether to compute subset accuracy for multi-label and multi-dimensional - multi-class inputs (has no effect for other input types). - - - For multi-label inputs, if the parameter is set to ``True``, then all labels for - each sample must be correctly predicted for the sample to count as correct. If it - is set to ``False``, then all labels are counted separately - this is equivalent to - flattening inputs beforehand (i.e. ``preds = preds.flatten()`` and same for ``target``). - - - For multi-dimensional multi-class inputs, if the parameter is set to ``True``, then all - sub-sample (on the extra axis) must be correct for the sample to be counted as correct. - If it is set to ``False``, then all sub-samples are counter separately - this is equivalent, - in the case of label predictions, to flattening the inputs beforehand (i.e. - ``preds = preds.flatten()`` and same for ``target``). Note that the ``top_k`` parameter - still applies in both cases, if set. - - Raises: - ValueError: - If ``top_k`` parameter is set for ``multi-label`` inputs. - - Example: - - >>> from pytorch_lightning.metrics.functional import accuracy - >>> target = torch.tensor([0, 1, 2, 3]) - >>> preds = torch.tensor([0, 2, 1, 3]) - >>> accuracy(preds, target) - tensor(0.5000) + r""" + This implementation refers to :class:`~torchmetrics.functional.accuracy`. - >>> target = torch.tensor([0, 1, 2]) - >>> preds = torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]) - >>> accuracy(preds, target, top_k=2) - tensor(0.6667) + .. warning:: This metric is deprecated, use ``torchmetrics.functional.accuracy``. Will be removed in v1.5.0. """ - correct, total = _accuracy_update(preds, target, threshold, top_k, subset_accuracy) - return _accuracy_compute(correct, total) + rank_zero_warn( + "This `accuracy` was deprecated in v1.3.0 in favor of `torchmetrics.functional.accuracy`." + " It will be removed in v1.5.0", DeprecationWarning + ) + return _accuracy( + preds=preds, + target=target, + threshold=threshold, + top_k=top_k, + subset_accuracy=subset_accuracy, + ) diff --git a/tests/deprecated_api/test_remove_1-5.py b/tests/deprecated_api/test_remove_1-5.py index f449a37e33c25..06557bf573735 100644 --- a/tests/deprecated_api/test_remove_1-5.py +++ b/tests/deprecated_api/test_remove_1-5.py @@ -15,6 +15,7 @@ from unittest import mock import pytest +import torch from torch import optim from pytorch_lightning import Callback, Trainer diff --git a/tests/metrics/classification/test_accuracy.py b/tests/metrics/classification/test_accuracy.py deleted file mode 100644 index 63a4870ed422e..0000000000000 --- a/tests/metrics/classification/test_accuracy.py +++ /dev/null @@ -1,176 +0,0 @@ -from functools import partial - -import numpy as np -import pytest -import torch -from sklearn.metrics import accuracy_score as sk_accuracy -from torchmetrics.classification.checks import _input_format_classification -from torchmetrics.utilities.enums import DataType - -from pytorch_lightning.metrics import Accuracy -from pytorch_lightning.metrics.functional import accuracy -from tests.metrics.classification.inputs import _input_binary, _input_binary_prob -from tests.metrics.classification.inputs import _input_multiclass as _input_mcls -from tests.metrics.classification.inputs import _input_multiclass_prob as _input_mcls_prob -from tests.metrics.classification.inputs import _input_multidim_multiclass as _input_mdmc -from tests.metrics.classification.inputs import _input_multidim_multiclass_prob as _input_mdmc_prob -from tests.metrics.classification.inputs import _input_multilabel as _input_mlb -from tests.metrics.classification.inputs import _input_multilabel_multidim as _input_mlmd -from tests.metrics.classification.inputs import _input_multilabel_multidim_prob as _input_mlmd_prob -from tests.metrics.classification.inputs import _input_multilabel_prob as _input_mlb_prob -from tests.metrics.utils import MetricTester, THRESHOLD - -torch.manual_seed(42) - - -def _sk_accuracy(preds, target, subset_accuracy): - sk_preds, sk_target, mode = _input_format_classification(preds, target, threshold=THRESHOLD) - sk_preds, sk_target = sk_preds.numpy(), sk_target.numpy() - - if mode == DataType.MULTIDIM_MULTICLASS and not subset_accuracy: - sk_preds, sk_target = np.transpose(sk_preds, (0, 2, 1)), np.transpose(sk_target, (0, 2, 1)) - sk_preds, sk_target = sk_preds.reshape(-1, sk_preds.shape[2]), sk_target.reshape(-1, sk_target.shape[2]) - elif mode == DataType.MULTIDIM_MULTICLASS and subset_accuracy: - return np.all(sk_preds == sk_target, axis=(1, 2)).mean() - elif mode == DataType.MULTILABEL and not subset_accuracy: - sk_preds, sk_target = sk_preds.reshape(-1), sk_target.reshape(-1) - - return sk_accuracy(y_true=sk_target, y_pred=sk_preds) - - -@pytest.mark.parametrize( - "preds, target, subset_accuracy", - [ - (_input_binary_prob.preds, _input_binary_prob.target, False), - (_input_binary.preds, _input_binary.target, False), - (_input_mlb_prob.preds, _input_mlb_prob.target, True), - (_input_mlb_prob.preds, _input_mlb_prob.target, False), - (_input_mlb.preds, _input_mlb.target, True), - (_input_mlb.preds, _input_mlb.target, False), - (_input_mcls_prob.preds, _input_mcls_prob.target, False), - (_input_mcls.preds, _input_mcls.target, False), - (_input_mdmc_prob.preds, _input_mdmc_prob.target, False), - (_input_mdmc_prob.preds, _input_mdmc_prob.target, True), - (_input_mdmc.preds, _input_mdmc.target, False), - (_input_mdmc.preds, _input_mdmc.target, True), - (_input_mlmd_prob.preds, _input_mlmd_prob.target, True), - (_input_mlmd_prob.preds, _input_mlmd_prob.target, False), - (_input_mlmd.preds, _input_mlmd.target, True), - (_input_mlmd.preds, _input_mlmd.target, False), - ], -) -class TestAccuracies(MetricTester): - - @pytest.mark.parametrize("ddp", [False, True]) - @pytest.mark.parametrize("dist_sync_on_step", [False, True]) - def test_accuracy_class(self, ddp, dist_sync_on_step, preds, target, subset_accuracy): - self.run_class_metric_test( - ddp=ddp, - preds=preds, - target=target, - metric_class=Accuracy, - sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy), - dist_sync_on_step=dist_sync_on_step, - metric_args={ - "threshold": THRESHOLD, - "subset_accuracy": subset_accuracy - }, - ) - - def test_accuracy_fn(self, preds, target, subset_accuracy): - self.run_functional_metric_test( - preds, - target, - metric_functional=accuracy, - sk_metric=partial(_sk_accuracy, subset_accuracy=subset_accuracy), - metric_args={ - "threshold": THRESHOLD, - "subset_accuracy": subset_accuracy - }, - ) - - -_l1to4 = [0.1, 0.2, 0.3, 0.4] -_l1to4t3 = np.array([_l1to4, _l1to4, _l1to4]) -_l1to4t3_mcls = [_l1to4t3.T, _l1to4t3.T, _l1to4t3.T] - -# The preds in these examples always put highest probability on class 3, second highest on class 2, -# third highest on class 1, and lowest on class 0 -_topk_preds_mcls = torch.tensor([_l1to4t3, _l1to4t3]).float() -_topk_target_mcls = torch.tensor([[1, 2, 3], [2, 1, 0]]) - -# This is like for MC case, but one sample in each batch is sabotaged with 0 class prediction :) -_topk_preds_mdmc = torch.tensor([_l1to4t3_mcls, _l1to4t3_mcls]).float() -_topk_target_mdmc = torch.tensor([[[1, 1, 0], [2, 2, 2], [3, 3, 3]], [[2, 2, 0], [1, 1, 1], [0, 0, 0]]]) - - -# Replace with a proper sk_metric test once sklearn 0.24 hits :) -@pytest.mark.parametrize( - "preds, target, exp_result, k, subset_accuracy", - [ - (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, False), - (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, False), - (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, False), - (_topk_preds_mcls, _topk_target_mcls, 1 / 6, 1, True), - (_topk_preds_mcls, _topk_target_mcls, 3 / 6, 2, True), - (_topk_preds_mcls, _topk_target_mcls, 5 / 6, 3, True), - (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, False), - (_topk_preds_mdmc, _topk_target_mdmc, 8 / 18, 2, False), - (_topk_preds_mdmc, _topk_target_mdmc, 13 / 18, 3, False), - (_topk_preds_mdmc, _topk_target_mdmc, 1 / 6, 1, True), - (_topk_preds_mdmc, _topk_target_mdmc, 2 / 6, 2, True), - (_topk_preds_mdmc, _topk_target_mdmc, 3 / 6, 3, True), - ], -) -def test_topk_accuracy(preds, target, exp_result, k, subset_accuracy): - topk = Accuracy(top_k=k, subset_accuracy=subset_accuracy) - - for batch in range(preds.shape[0]): - topk(preds[batch], target[batch]) - - assert topk.compute() == exp_result - - # Test functional - total_samples = target.shape[0] * target.shape[1] - - preds = preds.view(total_samples, 4, -1) - target = target.view(total_samples, -1) - - assert accuracy(preds, target, top_k=k, subset_accuracy=subset_accuracy) == exp_result - - -# Only MC and MDMC with probs input type should be accepted for top_k -@pytest.mark.parametrize( - "preds, target", - [ - (_input_binary_prob.preds, _input_binary_prob.target), - (_input_binary.preds, _input_binary.target), - (_input_mlb_prob.preds, _input_mlb_prob.target), - (_input_mlb.preds, _input_mlb.target), - (_input_mcls.preds, _input_mcls.target), - (_input_mdmc.preds, _input_mdmc.target), - (_input_mlmd_prob.preds, _input_mlmd_prob.target), - (_input_mlmd.preds, _input_mlmd.target), - ], -) -def test_topk_accuracy_wrong_input_types(preds, target): - topk = Accuracy(top_k=1) - - with pytest.raises(ValueError): - topk(preds[0], target[0]) - - with pytest.raises(ValueError): - accuracy(preds[0], target[0], top_k=1) - - -@pytest.mark.parametrize("top_k, threshold", [(0, 0.5), (None, 1.5)]) -def test_wrong_params(top_k, threshold): - preds, target = _input_mcls_prob.preds, _input_mcls_prob.target - - with pytest.raises(ValueError): - acc = Accuracy(threshold=threshold, top_k=top_k) - acc(preds, target) - acc.compute() - - with pytest.raises(ValueError): - accuracy(preds, target, threshold=threshold, top_k=top_k) From ed4909c60237c964517735899fa59eaeb4880b1c Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sun, 14 Mar 2021 23:58:21 +0100 Subject: [PATCH 2/9] chlog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f60d13f493dcb..cc78de0f9c0c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). [#6547](https://github.com/PyTorchLightning/pytorch-lightning/pull/6547), + [#6515](https://github.com/PyTorchLightning/pytorch-lightning/pull/6515), + ) From c0ff43661054399ade5e04da2bb5f94c0724defc Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 15 Mar 2021 15:49:11 +0100 Subject: [PATCH 3/9] flake8 --- pytorch_lightning/metrics/classification/accuracy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pytorch_lightning/metrics/classification/accuracy.py b/pytorch_lightning/metrics/classification/accuracy.py index 9e8ef73df3791..08b373451a76b 100644 --- a/pytorch_lightning/metrics/classification/accuracy.py +++ b/pytorch_lightning/metrics/classification/accuracy.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Any, Callable, Optional -import torch from torchmetrics import Accuracy as _Accuracy from pytorch_lightning.utilities import rank_zero_warn From 11b73b925c76be803984b65e94b61dfe23af148d Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 15 Mar 2021 17:22:21 +0100 Subject: [PATCH 4/9] Apply suggestions from code review Co-authored-by: Nicki Skafte --- pytorch_lightning/metrics/functional/accuracy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/metrics/functional/accuracy.py b/pytorch_lightning/metrics/functional/accuracy.py index 7da94ad075d8a..00568ac644186 100644 --- a/pytorch_lightning/metrics/functional/accuracy.py +++ b/pytorch_lightning/metrics/functional/accuracy.py @@ -29,7 +29,7 @@ def accuracy( r""" This implementation refers to :class:`~torchmetrics.functional.accuracy`. - .. warning:: This metric is deprecated, use ``torchmetrics.functional.accuracy``. Will be removed in v1.5.0. + .. warning:: This metric is deprecated since v1.3.0, use ``torchmetrics.functional.accuracy``. Will be removed in v1.5.0. """ rank_zero_warn( From 7254ce0ba7decc00e7c737deca43af8fe0eafb98 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 16 Mar 2021 16:05:21 +0100 Subject: [PATCH 5/9] wrap --- .../metrics/classification/accuracy.py | 27 ++++++------------- .../metrics/functional/accuracy.py | 22 ++++----------- 2 files changed, 13 insertions(+), 36 deletions(-) diff --git a/pytorch_lightning/metrics/classification/accuracy.py b/pytorch_lightning/metrics/classification/accuracy.py index 08b373451a76b..b9d0a45e6fd33 100644 --- a/pytorch_lightning/metrics/classification/accuracy.py +++ b/pytorch_lightning/metrics/classification/accuracy.py @@ -15,16 +15,12 @@ from torchmetrics import Accuracy as _Accuracy -from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities.deprecation import deprecated class Accuracy(_Accuracy): - r""" - This implementation refers to :class:`~torchmetrics.Accuracy`. - - .. warning:: This metric is deprecated, use ``torchmetrics.Accuracy``. Will be removed in v1.5.0. - """ + @deprecated(target=_Accuracy, ver_deprecate="1.3.0", ver_remove="1.5.0") def __init__( self, threshold: float = 0.5, @@ -35,16 +31,9 @@ def __init__( process_group: Optional[Any] = None, dist_sync_fn: Callable = None, ): - rank_zero_warn( - "This `Accuracy` was deprecated in v1.3.0 in favor of `torchmetrics.Accuracy`." - " It will be removed in v1.5.0", DeprecationWarning - ) - super().__init__( - threshold=threshold, - top_k=top_k, - subset_accuracy=subset_accuracy, - compute_on_step=compute_on_step, - dist_sync_on_step=dist_sync_on_step, - process_group=process_group, - dist_sync_fn=dist_sync_fn, - ) + """ + This implementation refers to :class:`~torchmetrics.Accuracy`. + + .. deprecated:: + Use :class:`~torchmetrics.Accuracy`. Will be removed in v1.5.0. + """ diff --git a/pytorch_lightning/metrics/functional/accuracy.py b/pytorch_lightning/metrics/functional/accuracy.py index 00568ac644186..601442cd01202 100644 --- a/pytorch_lightning/metrics/functional/accuracy.py +++ b/pytorch_lightning/metrics/functional/accuracy.py @@ -16,9 +16,10 @@ import torch from torchmetrics.functional import accuracy as _accuracy -from pytorch_lightning.utilities import rank_zero_warn +from pytorch_lightning.utilities.deprecation import deprecated +@deprecated(target=_accuracy, ver_deprecate="1.3.0", ver_remove="1.5.0") def accuracy( preds: torch.Tensor, target: torch.Tensor, @@ -26,20 +27,7 @@ def accuracy( top_k: Optional[int] = None, subset_accuracy: bool = False, ) -> torch.Tensor: - r""" - This implementation refers to :class:`~torchmetrics.functional.accuracy`. - - .. warning:: This metric is deprecated since v1.3.0, use ``torchmetrics.functional.accuracy``. Will be removed in v1.5.0. """ - - rank_zero_warn( - "This `accuracy` was deprecated in v1.3.0 in favor of `torchmetrics.functional.accuracy`." - " It will be removed in v1.5.0", DeprecationWarning - ) - return _accuracy( - preds=preds, - target=target, - threshold=threshold, - top_k=top_k, - subset_accuracy=subset_accuracy, - ) + .. deprecated:: + Use :func:`torchmetrics.functional.accuracy`. Will be removed in v1.5.0. + """ From 10caa2cf55191f8807969c09d71256570e47da00 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 16 Mar 2021 16:10:03 +0100 Subject: [PATCH 6/9] test --- tests/deprecated_api/test_remove_1-5.py | 1 - tests/deprecated_api/test_remove_1-5_metrics.py | 16 +++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tests/deprecated_api/test_remove_1-5.py b/tests/deprecated_api/test_remove_1-5.py index 06557bf573735..f449a37e33c25 100644 --- a/tests/deprecated_api/test_remove_1-5.py +++ b/tests/deprecated_api/test_remove_1-5.py @@ -15,7 +15,6 @@ from unittest import mock import pytest -import torch from torch import optim from pytorch_lightning import Callback, Trainer diff --git a/tests/deprecated_api/test_remove_1-5_metrics.py b/tests/deprecated_api/test_remove_1-5_metrics.py index 3428c0b761e93..a7f86661f618b 100644 --- a/tests/deprecated_api/test_remove_1-5_metrics.py +++ b/tests/deprecated_api/test_remove_1-5_metrics.py @@ -20,7 +20,7 @@ from pytorch_lightning.metrics.utils import get_num_classes, select_topk, to_categorical, to_onehot -def test_v1_5_0_metrics_utils(): +def test_v1_5_metrics_utils(): x = torch.tensor([1, 2, 3]) with pytest.deprecated_call(match="It will be removed in v1.5.0"): assert torch.equal(to_onehot(x), torch.Tensor([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]).to(int)) @@ -37,7 +37,7 @@ def test_v1_5_0_metrics_utils(): assert torch.equal(to_categorical(x), torch.Tensor([1, 0]).to(int)) -def test_v1_5_0_metrics_collection(): +def test_v1_5_metrics_collection(): target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) with pytest.deprecated_call( @@ -45,4 +45,14 @@ def test_v1_5_0_metrics_collection(): " of `torchmetrics.collections.MetricCollection`. It will be removed in v1.5.0." ): metrics = MetricCollection([Accuracy()]) - assert metrics(preds, target) == {'Accuracy': torch.Tensor([0.1250])[0]} + assert metrics(preds, target) == {'Accuracy': torch.tensor(0.1250)} + + +def test_v1_5_metric_accuracy(): + from pytorch_lightning.metrics.functional.accuracy import accuracy + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + assert accuracy(preds=torch.tensor([0, 1]), target=torch.tensor([0, 1])) == torch.tensor(1.) + + from pytorch_lightning.metrics import Accuracy + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + Accuracy() From 5be1ec13c9d0b081bd587cef3a3b497ee5d60679 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Tue, 16 Mar 2021 16:11:18 +0100 Subject: [PATCH 7/9] test --- .../deprecated_api/test_remove_1-5_metrics.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/deprecated_api/test_remove_1-5_metrics.py b/tests/deprecated_api/test_remove_1-5_metrics.py index a7f86661f618b..3c7ce48caeea1 100644 --- a/tests/deprecated_api/test_remove_1-5_metrics.py +++ b/tests/deprecated_api/test_remove_1-5_metrics.py @@ -37,6 +37,16 @@ def test_v1_5_metrics_utils(): assert torch.equal(to_categorical(x), torch.Tensor([1, 0]).to(int)) +def test_v1_5_metric_accuracy(): + from pytorch_lightning.metrics.functional.accuracy import accuracy + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + assert accuracy(preds=torch.tensor([0, 1]), target=torch.tensor([0, 1])) == torch.tensor(1.) + + from pytorch_lightning.metrics import Accuracy + with pytest.deprecated_call(match='It will be removed in v1.5.0'): + Accuracy() + + def test_v1_5_metrics_collection(): target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) @@ -46,13 +56,3 @@ def test_v1_5_metrics_collection(): ): metrics = MetricCollection([Accuracy()]) assert metrics(preds, target) == {'Accuracy': torch.tensor(0.1250)} - - -def test_v1_5_metric_accuracy(): - from pytorch_lightning.metrics.functional.accuracy import accuracy - with pytest.deprecated_call(match='It will be removed in v1.5.0'): - assert accuracy(preds=torch.tensor([0, 1]), target=torch.tensor([0, 1])) == torch.tensor(1.) - - from pytorch_lightning.metrics import Accuracy - with pytest.deprecated_call(match='It will be removed in v1.5.0'): - Accuracy() From 48eb05a1d8128937dc1cf5ecf15f318d1c556be1 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 17 Mar 2021 11:47:34 +0100 Subject: [PATCH 8/9] fix --- tests/deprecated_api/test_remove_1-5_metrics.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/deprecated_api/test_remove_1-5_metrics.py b/tests/deprecated_api/test_remove_1-5_metrics.py index 3c7ce48caeea1..fb8610b62eee2 100644 --- a/tests/deprecated_api/test_remove_1-5_metrics.py +++ b/tests/deprecated_api/test_remove_1-5_metrics.py @@ -17,6 +17,7 @@ import torch from pytorch_lightning.metrics import Accuracy, MetricCollection +from pytorch_lightning.metrics.functional.accuracy import accuracy from pytorch_lightning.metrics.utils import get_num_classes, select_topk, to_categorical, to_onehot @@ -38,11 +39,11 @@ def test_v1_5_metrics_utils(): def test_v1_5_metric_accuracy(): - from pytorch_lightning.metrics.functional.accuracy import accuracy + accuracy.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): assert accuracy(preds=torch.tensor([0, 1]), target=torch.tensor([0, 1])) == torch.tensor(1.) - from pytorch_lightning.metrics import Accuracy + Accuracy.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): Accuracy() From 959694bfe9cb0e9c364fa7cb58d549f8f5b5193b Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 17 Mar 2021 12:11:49 +0100 Subject: [PATCH 9/9] fix --- tests/deprecated_api/test_remove_1-5_metrics.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/deprecated_api/test_remove_1-5_metrics.py b/tests/deprecated_api/test_remove_1-5_metrics.py index fb8610b62eee2..239241dfac2ed 100644 --- a/tests/deprecated_api/test_remove_1-5_metrics.py +++ b/tests/deprecated_api/test_remove_1-5_metrics.py @@ -43,7 +43,7 @@ def test_v1_5_metric_accuracy(): with pytest.deprecated_call(match='It will be removed in v1.5.0'): assert accuracy(preds=torch.tensor([0, 1]), target=torch.tensor([0, 1])) == torch.tensor(1.) - Accuracy.warned = False + Accuracy.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): Accuracy() @@ -51,6 +51,8 @@ def test_v1_5_metric_accuracy(): def test_v1_5_metrics_collection(): target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) + + MetricCollection.__init__.warned = False with pytest.deprecated_call( match="`pytorch_lightning.metrics.metric.MetricCollection` was deprecated since v1.3.0 in favor" " of `torchmetrics.collections.MetricCollection`. It will be removed in v1.5.0."