diff --git a/torchvision/prototype/features/__init__.py b/torchvision/prototype/features/__init__.py index 8a461e1bedd..557c4d83ca5 100644 --- a/torchvision/prototype/features/__init__.py +++ b/torchvision/prototype/features/__init__.py @@ -1,24 +1,7 @@ from ._bounding_box import BoundingBox, BoundingBoxFormat from ._encoded import EncodedData, EncodedImage from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor -from ._image import ( - ColorSpace, - Image, - ImageType, - ImageTypeJIT, - LegacyImageType, - LegacyImageTypeJIT, - TensorImageType, - TensorImageTypeJIT, -) +from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT from ._label import Label, OneHotLabel from ._mask import Mask -from ._video import ( - LegacyVideoType, - LegacyVideoTypeJIT, - TensorVideoType, - TensorVideoTypeJIT, - Video, - VideoType, - VideoTypeJIT, -) +from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT diff --git a/torchvision/prototype/features/_bounding_box.py b/torchvision/prototype/features/_bounding_box.py index 59b02258212..638759ae850 100644 --- a/torchvision/prototype/features/_bounding_box.py +++ b/torchvision/prototype/features/_bounding_box.py @@ -61,18 +61,6 @@ def wrap_like( def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] return self._make_repr(format=self.format, spatial_size=self.spatial_size) - def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox: - if isinstance(format, str): - format = BoundingBoxFormat.from_str(format.upper()) - - return BoundingBox.wrap_like( - self, - self._F.convert_format_bounding_box( - self.as_subclass(torch.Tensor), old_format=self.format, new_format=format - ), - format=format, - ) - def horizontal_flip(self) -> BoundingBox: output = self._F.horizontal_flip_bounding_box( self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size diff --git a/torchvision/prototype/features/_image.py b/torchvision/prototype/features/_image.py index d52989641a5..74904294f59 100644 --- a/torchvision/prototype/features/_image.py +++ b/torchvision/prototype/features/_image.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import Any, cast, List, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union import PIL.Image import torch @@ -104,7 +104,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr @property def spatial_size(self) -> Tuple[int, int]: - return cast(Tuple[int, int], tuple(self.shape[-2:])) + return tuple(self.shape[-2:]) # type: ignore[return-value] @property def num_channels(self) -> int: @@ -285,7 +285,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N ImageType = Union[torch.Tensor, PIL.Image.Image, Image] ImageTypeJIT = torch.Tensor -LegacyImageType = Union[torch.Tensor, PIL.Image.Image] -LegacyImageTypeJIT = torch.Tensor TensorImageType = Union[torch.Tensor, Image] TensorImageTypeJIT = torch.Tensor diff --git a/torchvision/prototype/features/_mask.py b/torchvision/prototype/features/_mask.py index 697f0bbd9d2..a297c43c20f 100644 --- a/torchvision/prototype/features/_mask.py +++ b/torchvision/prototype/features/_mask.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, cast, List, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union import torch from torchvision.transforms import InterpolationMode @@ -34,7 +34,7 @@ def wrap_like( @property def spatial_size(self) -> Tuple[int, int]: - return cast(Tuple[int, int], tuple(self.shape[-2:])) + return tuple(self.shape[-2:]) # type: ignore[return-value] def horizontal_flip(self) -> Mask: output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor)) diff --git a/torchvision/prototype/features/_video.py b/torchvision/prototype/features/_video.py index a4d30a49c7a..a2311678304 100644 --- a/torchvision/prototype/features/_video.py +++ b/torchvision/prototype/features/_video.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import Any, cast, List, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union import torch from torchvision.transforms.functional import InterpolationMode @@ -56,7 +56,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr @property def spatial_size(self) -> Tuple[int, int]: - return cast(Tuple[int, int], tuple(self.shape[-2:])) + return tuple(self.shape[-2:]) # type: ignore[return-value] @property def num_channels(self) -> int: @@ -237,7 +237,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N VideoType = Union[torch.Tensor, Video] VideoTypeJIT = torch.Tensor -LegacyVideoType = torch.Tensor -LegacyVideoTypeJIT = torch.Tensor TensorVideoType = Union[torch.Tensor, Video] TensorVideoTypeJIT = torch.Tensor diff --git a/torchvision/prototype/transforms/functional/_deprecated.py b/torchvision/prototype/transforms/functional/_deprecated.py index 1075e9a64ca..e28bc45654c 100644 --- a/torchvision/prototype/transforms/functional/_deprecated.py +++ b/torchvision/prototype/transforms/functional/_deprecated.py @@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima def rgb_to_grayscale( - inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1 -) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]: + inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1 +) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]: if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): inpt = inpt.as_subclass(torch.Tensor) old_color_space = None diff --git a/torchvision/prototype/transforms/functional/_misc.py b/torchvision/prototype/transforms/functional/_misc.py index 3a1d8575cd0..5e636a949a5 100644 --- a/torchvision/prototype/transforms/functional/_misc.py +++ b/torchvision/prototype/transforms/functional/_misc.py @@ -16,9 +16,7 @@ def normalize_image_tensor( raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.") if image.ndim < 3: - raise ValueError( - f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {image.size()}" - ) + raise ValueError(f"Expected tensor to be a tensor image of size (..., C, H, W). Got {image.shape}.") if isinstance(std, (tuple, list)): divzero = not all(std)