Skip to content

Commit

Permalink
[prototype] Clean up features area (#6834)
Browse files Browse the repository at this point in the history
* Clean ups on `features` area

* remove unncessary imports
  • Loading branch information
datumbox authored Oct 25, 2022
1 parent 7de68b0 commit b45969a
Show file tree
Hide file tree
Showing 7 changed files with 11 additions and 46 deletions.
21 changes: 2 additions & 19 deletions torchvision/prototype/features/__init__.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,7 @@
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._encoded import EncodedData, EncodedImage
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
from ._image import (
ColorSpace,
Image,
ImageType,
ImageTypeJIT,
LegacyImageType,
LegacyImageTypeJIT,
TensorImageType,
TensorImageTypeJIT,
)
from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import (
LegacyVideoType,
LegacyVideoTypeJIT,
TensorVideoType,
TensorVideoTypeJIT,
Video,
VideoType,
VideoTypeJIT,
)
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
12 changes: 0 additions & 12 deletions torchvision/prototype/features/_bounding_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,18 +61,6 @@ def wrap_like(
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, spatial_size=self.spatial_size)

def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox:
if isinstance(format, str):
format = BoundingBoxFormat.from_str(format.upper())

return BoundingBox.wrap_like(
self,
self._F.convert_format_bounding_box(
self.as_subclass(torch.Tensor), old_format=self.format, new_format=format
),
format=format,
)

def horizontal_flip(self) -> BoundingBox:
output = self._F.horizontal_flip_bounding_box(
self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size
Expand Down
6 changes: 2 additions & 4 deletions torchvision/prototype/features/_image.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

import warnings
from typing import Any, cast, List, Optional, Tuple, Union
from typing import Any, List, Optional, Tuple, Union

import PIL.Image
import torch
Expand Down Expand Up @@ -104,7 +104,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr

@property
def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:]))
return tuple(self.shape[-2:]) # type: ignore[return-value]

@property
def num_channels(self) -> int:
Expand Down Expand Up @@ -285,7 +285,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N

ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
ImageTypeJIT = torch.Tensor
LegacyImageType = Union[torch.Tensor, PIL.Image.Image]
LegacyImageTypeJIT = torch.Tensor
TensorImageType = Union[torch.Tensor, Image]
TensorImageTypeJIT = torch.Tensor
4 changes: 2 additions & 2 deletions torchvision/prototype/features/_mask.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Any, cast, List, Optional, Tuple, Union
from typing import Any, List, Optional, Tuple, Union

import torch
from torchvision.transforms import InterpolationMode
Expand Down Expand Up @@ -34,7 +34,7 @@ def wrap_like(

@property
def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:]))
return tuple(self.shape[-2:]) # type: ignore[return-value]

def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
Expand Down
6 changes: 2 additions & 4 deletions torchvision/prototype/features/_video.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

import warnings
from typing import Any, cast, List, Optional, Tuple, Union
from typing import Any, List, Optional, Tuple, Union

import torch
from torchvision.transforms.functional import InterpolationMode
Expand Down Expand Up @@ -56,7 +56,7 @@ def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[overr

@property
def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:]))
return tuple(self.shape[-2:]) # type: ignore[return-value]

@property
def num_channels(self) -> int:
Expand Down Expand Up @@ -237,7 +237,5 @@ def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = N

VideoType = Union[torch.Tensor, Video]
VideoTypeJIT = torch.Tensor
LegacyVideoType = torch.Tensor
LegacyVideoTypeJIT = torch.Tensor
TensorVideoType = Union[torch.Tensor, Video]
TensorVideoTypeJIT = torch.Tensor
4 changes: 2 additions & 2 deletions torchvision/prototype/transforms/functional/_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima


def rgb_to_grayscale(
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]:
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
Expand Down
4 changes: 1 addition & 3 deletions torchvision/prototype/transforms/functional/_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ def normalize_image_tensor(
raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.")

if image.ndim < 3:
raise ValueError(
f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {image.size()}"
)
raise ValueError(f"Expected tensor to be a tensor image of size (..., C, H, W). Got {image.shape}.")

if isinstance(std, (tuple, list)):
divzero = not all(std)
Expand Down

0 comments on commit b45969a

Please sign in to comment.