Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename MeanAveragePrecision #754

Merged
merged 8 commits into from
Jan 14, 2022
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Renamed image SSIM metric: ([#747](https://github.com/PyTorchLightning/metrics/pull/747))
* `torchmetrics.functional.ssim` -> `torchmetrics.functional.scale_invariant_signal_noise_ratio`
* `torchmetrics.SSIM` -> `torchmetrics.StructuralSimilarityIndexMeasure`
- Renamed detection `MAP` to `MeanAveragePrecision` metric ([#754](https://github.com/PyTorchLightning/metrics/pull/754))

### Removed

Expand Down
6 changes: 3 additions & 3 deletions docs/source/references/modules.rst
Original file line number Diff line number Diff line change
Expand Up @@ -416,10 +416,10 @@ Detection

Object detection metrics can be used to evaluate the predicted detections with given groundtruth detections on images.

MAP
~~~
MeanAveragePrecision
~~~~~~~~~~~~~~~~~~~~

.. autoclass:: torchmetrics.detection.map.MAP
.. autoclass:: torchmetrics.detection.map.MeanAveragePrecision
:noindex:

**********
Expand Down
2 changes: 1 addition & 1 deletion requirements/detection.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
torchvision
torchvision>=0.8
2 changes: 1 addition & 1 deletion requirements/devel.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
# add extra requirements
-r image.txt
-r text.txt
-r detection.txt
# -r detection.txt # version collision with min versio of PyTorch
-r audio.txt

# add extra testing
Expand Down
18 changes: 9 additions & 9 deletions tests/detection/test_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
import torch

from tests.helpers.testers import MetricTester
from torchmetrics.detection.map import MAP
from torchmetrics.detection.map import MeanAveragePrecision
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_8

Input = namedtuple("Input", ["preds", "target"])
Expand Down Expand Up @@ -181,7 +181,7 @@ def test_map(self, ddp):
ddp=ddp,
preds=_inputs.preds,
target=_inputs.target,
metric_class=MAP,
metric_class=MeanAveragePrecision,
sk_metric=_compare_fn,
dist_sync_on_step=False,
check_batch=False,
Expand All @@ -193,16 +193,16 @@ def test_map(self, ddp):
@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed")
def test_error_on_wrong_init():
"""Test class raises the expected errors."""
MAP() # no error
MeanAveragePrecision() # no error

with pytest.raises(ValueError, match="Expected argument `class_metrics` to be a boolean"):
MAP(class_metrics=0)
MeanAveragePrecision(class_metrics=0)


@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed")
def test_empty_preds():
"""Test empty predictions."""
metric = MAP()
metric = MeanAveragePrecision()

metric.update(
[
Expand All @@ -218,7 +218,7 @@ def test_empty_preds():
@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed")
def test_empty_ground_truths():
"""Test empty ground truths."""
metric = MAP()
metric = MeanAveragePrecision()

metric.update(
[
Expand All @@ -242,7 +242,7 @@ def test_empty_ground_truths():
@pytest.mark.skipif(_gpu_test_condition, reason="test requires CUDA availability")
def test_map_gpu():
"""Test predictions on single gpu."""
metric = MAP()
metric = MeanAveragePrecision()
metric = metric.to("cuda")
metric.update(_inputs.preds[0], _inputs.target[0])
metric.compute()
Expand All @@ -251,14 +251,14 @@ def test_map_gpu():
@pytest.mark.skipif(_pytest_condition, reason="test requires that pycocotools and torchvision=>0.8.0 is installed")
def test_empty_metric():
"""Test empty metric."""
metric = MAP()
metric = MeanAveragePrecision()
metric.compute()


@pytest.mark.skipif(_pytest_condition, reason="test requires that torchvision=>0.8.0 is installed")
def test_error_on_wrong_input():
"""Test class input validation."""
metric = MAP()
metric = MeanAveragePrecision()

metric.update([], []) # no error

Expand Down
5 changes: 5 additions & 0 deletions torchmetrics/detection/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from torchmetrics.utilities.imports import _TORCHVISION_GREATER_EQUAL_0_8

if _TORCHVISION_GREATER_EQUAL_0_8:
from torchmetrics.detection.map import MeanAveragePrecision # noqa: F401
91 changes: 83 additions & 8 deletions torchmetrics/detection/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple

import torch
from deprecate import deprecated, void
from torch import IntTensor, Size, Tensor

from torchmetrics.metric import Metric
from torchmetrics.utilities import _future_warning
from torchmetrics.utilities.imports import _TORCHVISION_AVAILABLE, _TORCHVISION_GREATER_EQUAL_0_8

if _TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_8:
Expand Down Expand Up @@ -129,7 +131,7 @@ def _fix_empty_tensors(boxes: Tensor) -> Tensor:
return boxes


class MAP(Metric):
class MeanAveragePrecision(Metric):
r"""
Computes the `Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR)
<https://jonathan-hui.medium.com/map-mean-average-precision-for-object-detection-45c121a31173>`_
Expand Down Expand Up @@ -181,7 +183,7 @@ class MAP(Metric):

Example:
>>> import torch
>>> from torchmetrics.detection.map import MAP
>>> from torchmetrics.detection.map import MeanAveragePrecision
>>> preds = [
... dict(
... boxes=torch.Tensor([[258.0, 41.0, 606.0, 285.0]]),
Expand All @@ -195,7 +197,7 @@ class MAP(Metric):
... labels=torch.IntTensor([0]),
... )
... ]
>>> metric = MAP() # doctest: +SKIP
>>> metric = MeanAveragePrecision() # doctest: +SKIP
>>> metric.update(preds, target) # doctest: +SKIP
>>> from pprint import pprint
>>> pprint(metric.compute()) # doctest: +SKIP
Expand Down Expand Up @@ -241,10 +243,10 @@ def __init__(
dist_sync_fn=dist_sync_fn,
)

if not (_TORCHVISION_AVAILABLE and _TORCHVISION_GREATER_EQUAL_0_8):
if not _TORCHVISION_GREATER_EQUAL_0_8:
raise ModuleNotFoundError(
"`MAP` metric requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision` or `pip install torchmetrics[detection]`."
"`MeanAveragePrecision` metric requires that `torchvision` version 0.8.0 or newer is installed."
" Please install with `pip install torchvision>=0.8` or `pip install torchmetrics[detection]`."
)

allowed_box_formats = ("xyxy", "xywh", "cxcywh")
Expand Down Expand Up @@ -430,7 +432,9 @@ def _evaluate_image(
if torch.numel(ious) > 0:
for idx_iou, t in enumerate(self.iou_thresholds):
for idx_det in range(nb_det):
m = MAP._find_best_gt_match(t, nb_gt, gt_matches, idx_iou, gt_ignore, ious, idx_det)
m = MeanAveragePrecision._find_best_gt_match(
t, nb_gt, gt_matches, idx_iou, gt_ignore, ious, idx_det
)
if m != -1:
det_ignore[idx_iou, idx_det] = gt_ignore[m]
det_matches[idx_iou, idx_det] = True
Expand Down Expand Up @@ -573,7 +577,7 @@ def _calculate(self, class_ids: List) -> Tuple[Dict, MAPMetricResults, MARMetric
for idx_cls in range(nb_classes):
for idx_bbox_area in range(nb_bbox_areas):
for idx_max_det_thrs, max_det in enumerate(self.max_detection_thresholds):
recall, precision, scores = MAP.__calculate_recall_precision_scores(
recall, precision, scores = MeanAveragePrecision.__calculate_recall_precision_scores(
recall,
precision,
scores,
Expand Down Expand Up @@ -731,3 +735,74 @@ def compute(self) -> dict:
metrics.map_per_class = map_per_class_values
metrics[f"mar_{self.max_detection_thresholds[-1]}_per_class"] = mar_max_dets_per_class_values
return metrics


class MAP(MeanAveragePrecision):
r"""
Computes the `Mean-Average-Precision (mAP) and Mean-Average-Recall (mAR)
<https://jonathan-hui.medium.com/map-mean-average-precision-for-object-detection-45c121a31173>`_
for object detection tasks.

.. deprecated:: v0.7
Use :class:`torchmetrics.detection.MeanAveragePrecision`. Will be removed in v0.8.

Example:
>>> import torch
>>> preds = [
... dict(
... boxes=torch.Tensor([[258.0, 41.0, 606.0, 285.0]]),
... scores=torch.Tensor([0.536]),
... labels=torch.IntTensor([0]),
... )
... ]
>>> target = [
... dict(
... boxes=torch.Tensor([[214.0, 41.0, 562.0, 285.0]]),
... labels=torch.IntTensor([0]),
... )
... ]
>>> metric = MAP() # doctest: +SKIP
rohitgr7 marked this conversation as resolved.
Show resolved Hide resolved
>>> metric.update(preds, target) # doctest: +SKIP
>>> from pprint import pprint
>>> pprint(metric.compute()) # doctest: +SKIP
{'map': tensor(0.6000),
'map_50': tensor(1.),
'map_75': tensor(1.),
'map_small': tensor(-1.),
'map_medium': tensor(-1.),
'map_large': tensor(0.6000),
'mar_1': tensor(0.6000),
'mar_10': tensor(0.6000),
'mar_100': tensor(0.6000),
'mar_small': tensor(-1.),
'mar_medium': tensor(-1.),
'mar_large': tensor(0.6000),
'map_per_class': tensor(-1.),
'mar_100_per_class': tensor(-1.)
}
"""

@deprecated(target=MeanAveragePrecision, deprecated_in="0.7", remove_in="0.8", stream=_future_warning)
def __init__(
self,
box_format: str = "xyxy",
iou_thresholds: Optional[List[float]] = None,
rec_thresholds: Optional[List[float]] = None,
max_detection_thresholds: Optional[List[int]] = None,
class_metrics: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
) -> None: # type: ignore
void(
box_format,
iou_thresholds,
rec_thresholds,
max_detection_thresholds,
class_metrics,
compute_on_step,
dist_sync_on_step,
process_group,
dist_sync_fn,
)