diff --git a/CHANGELOG.md b/CHANGELOG.md index e79fff6c79b..468b459608b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Changed minimum supported Pytorch version from 1.8 to 1.10 ([#2145](https://github.com/Lightning-AI/torchmetrics/pull/2145)) +- Changed x-/y-axis order for `PrecisionRecallCurve` to be consistent with scikit-learn ([#2183](https://github.com/Lightning-AI/torchmetrics/pull/2183)) + + - Use arange and repeat for deterministic bincount ([#2184](https://github.com/Lightning-AI/torchmetrics/pull/2184)) @@ -46,7 +49,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Deprecated `metric._update_called` ([#2141](https://github.com/Lightning-AI/torchmetrics/pull/2141)) -- Changed x-/y-axis order for `PrecisionRecallCurve` to be consistent with scikit-learn ([#2183](https://github.com/Lightning-AI/torchmetrics/pull/2183)) +- Deprecated `specicity_at_sensitivity` in favour of `specificity_at_sensitivity` ([#2199](https://github.com/Lightning-AI/torchmetrics/pull/2199)) ### Removed diff --git a/src/torchmetrics/classification/__init__.py b/src/torchmetrics/classification/__init__.py index 684f0f2ae9f..079119a6f0d 100644 --- a/src/torchmetrics/classification/__init__.py +++ b/src/torchmetrics/classification/__init__.py @@ -117,18 +117,6 @@ ) __all__ = [ - "BinaryConfusionMatrix", - "ConfusionMatrix", - "MulticlassConfusionMatrix", - "MultilabelConfusionMatrix", - "PrecisionRecallCurve", - "BinaryPrecisionRecallCurve", - "MulticlassPrecisionRecallCurve", - "MultilabelPrecisionRecallCurve", - "BinaryStatScores", - "MulticlassStatScores", - "MultilabelStatScores", - "StatScores", "Accuracy", "BinaryAccuracy", "MulticlassAccuracy", @@ -147,6 +135,10 @@ "BinaryCohenKappa", "CohenKappa", "MulticlassCohenKappa", + "BinaryConfusionMatrix", + "ConfusionMatrix", + "MulticlassConfusionMatrix", + "MultilabelConfusionMatrix", "Dice", "ExactMatch", "MulticlassExactMatch", @@ -184,16 +176,21 @@ "MultilabelRecall", "Precision", "Recall", + "BinaryPrecisionRecallCurve", + "MulticlassPrecisionRecallCurve", + "MultilabelPrecisionRecallCurve", + "PrecisionRecallCurve", "MultilabelCoverageError", "MultilabelRankingAveragePrecision", "MultilabelRankingLoss", + "RecallAtFixedPrecision", "BinaryRecallAtFixedPrecision", "MulticlassRecallAtFixedPrecision", "MultilabelRecallAtFixedPrecision", - "ROC", "BinaryROC", "MulticlassROC", "MultilabelROC", + "ROC", "BinarySpecificity", "MulticlassSpecificity", "MultilabelSpecificity", @@ -201,12 +198,12 @@ "BinarySpecificityAtSensitivity", "MulticlassSpecificityAtSensitivity", "MultilabelSpecificityAtSensitivity", - "BinaryPrecisionAtFixedRecall", "SpecificityAtSensitivity", - "MulticlassPrecisionAtFixedRecall", - "MultilabelPrecisionAtFixedRecall", + "BinaryStatScores", + "MulticlassStatScores", + "MultilabelStatScores", + "StatScores", "PrecisionAtFixedRecall", - "RecallAtFixedPrecision", "BinaryPrecisionAtFixedRecall", "MulticlassPrecisionAtFixedRecall", "MultilabelPrecisionAtFixedRecall", diff --git a/src/torchmetrics/functional/classification/__init__.py b/src/torchmetrics/functional/classification/__init__.py index 069fc3625ad..514cef8091d 100644 --- a/src/torchmetrics/functional/classification/__init__.py +++ b/src/torchmetrics/functional/classification/__init__.py @@ -120,6 +120,7 @@ multiclass_specificity_at_sensitivity, multilabel_specificity_at_sensitivity, specicity_at_sensitivity, + specificity_at_sensitivity, ) from torchmetrics.functional.classification.stat_scores import ( binary_stat_scores, @@ -165,8 +166,6 @@ "multilabel_fbeta_score", "binary_fairness", "binary_groups_stat_rates", - "demographic_parity", - "equal_opportunity", "binary_hamming_distance", "hamming_distance", "multiclass_hamming_distance", @@ -212,6 +211,7 @@ "multiclass_specificity_at_sensitivity", "multilabel_specificity_at_sensitivity", "specicity_at_sensitivity", + "specificity_at_sensitivity", "binary_stat_scores", "multiclass_stat_scores", "multilabel_stat_scores", @@ -219,4 +219,6 @@ "binary_precision_at_fixed_recall", "multilabel_precision_at_fixed_recall", "multiclass_precision_at_fixed_recall", + "demographic_parity", + "equal_opportunity", ] diff --git a/src/torchmetrics/functional/classification/specificity_sensitivity.py b/src/torchmetrics/functional/classification/specificity_sensitivity.py index a44948f8570..d85b47eb453 100644 --- a/src/torchmetrics/functional/classification/specificity_sensitivity.py +++ b/src/torchmetrics/functional/classification/specificity_sensitivity.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import warnings from typing import List, Optional, Tuple, Union import torch @@ -414,7 +415,43 @@ def specicity_at_sensitivity( ignore_index: Optional[int] = None, validate_args: bool = True, ) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: - r"""Compute the highest possible specicity value given the minimum sensitivity thresholds provided. + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. + + .. warning:: + This function was deprecated in v1.3.0 of Torchmetrics and will be removed in v2.0.0. + Use `specificity_at_sensitivity` instead. + + """ + warnings.warn( + "This method has will be removed in 2.0.0. Use `specificity_at_sensitivity` instead.", + DeprecationWarning, + stacklevel=1, + ) + return specificity_at_sensitivity( + preds=preds, + target=target, + task=task, + min_sensitivity=min_sensitivity, + thresholds=thresholds, + num_classes=num_classes, + num_labels=num_labels, + ignore_index=ignore_index, + validate_args=validate_args, + ) + + +def specificity_at_sensitivity( + preds: Tensor, + target: Tensor, + task: Literal["binary", "multiclass", "multilabel"], + min_sensitivity: float, + thresholds: Optional[Union[int, List[float], Tensor]] = None, + num_classes: Optional[int] = None, + num_labels: Optional[int] = None, + ignore_index: Optional[int] = None, + validate_args: bool = True, +) -> Union[Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[List[Tensor], List[Tensor], List[Tensor]]]: + r"""Compute the highest possible specificity value given the minimum sensitivity thresholds provided. This is done by first calculating the Receiver Operating Characteristic (ROC) curve for different thresholds and the find the specificity for a given sensitivity level.