Skip to content

Commit

Permalink
[Py OV] Fix issue with passing (h, w) sequence to PreprocessConverter…
Browse files Browse the repository at this point in the history
….Resize (#26687)

### Details:
- The output image size after resize should match the dimensions
specified in the sequence (h, w) passed as `size` argument. Adjustments
should be done if `size` is an int.
[docs](https://pytorch.org/vision/stable/generated/torchvision.transforms.Resize.html)
- Add more combinations of parameters to
`test_torchvision_preprocessor/test_resize`

### Tickets:
 - [CVS-149919](https://jira.devtools.intel.com/browse/CVS-149919)

---------

Co-authored-by: Anastasia Kuporosova <[email protected]>
  • Loading branch information
almilosz and akuporos authored Sep 23, 2024
1 parent e934d83 commit bc29a58
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -294,12 +294,13 @@ def convert(self, input_idx: int, ppp: PrePostProcessor, transform: Callable, me

target_h, target_w = _setup_size(transform.size, "Incorrect size type for Resize operation")

# rescale the smaller image edge
current_h, current_w = meta["image_dimensions"]
if current_h > current_w:
target_h = int(transform.size * (current_h / current_w))
elif current_w > current_h:
target_w = int(transform.size * (current_w / current_h))
if isinstance(transform.size, int):
# rescale the smaller image edge
current_h, current_w = meta["image_dimensions"]
if current_h > current_w:
target_h = int(transform.size * (current_h / current_w))
elif current_w > current_h:
target_w = int(transform.size * (current_w / current_h))

ppp.input(input_idx).tensor().set_layout(Layout("NCHW"))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,21 +87,27 @@ def test_normalize():
reason="Ticket: 114816",
)
@pytest.mark.parametrize(
("target_size", "interpolation", "tolerance"),
("target_size", "current_size", "interpolation", "tolerance"),
[
((220, 220, 3), transforms.InterpolationMode.NEAREST, 4e-05),
((200, 240, 3), transforms.InterpolationMode.NEAREST, 0.3), # Ticket 127670
((220, 220, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
((200, 240, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
((220, 220, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
((200, 240, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
(224, (220, 220, 3), transforms.InterpolationMode.NEAREST, 4e-05),
(224, (200, 240, 3), transforms.InterpolationMode.NEAREST, 0.3), # Ticket 127670
(224, (220, 220, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
(224, (200, 240, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
(224, (220, 220, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
(224, (200, 240, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
((224, 224), (220, 220, 3), transforms.InterpolationMode.NEAREST, 4e-05),
((224, 224), (200, 240, 3), transforms.InterpolationMode.NEAREST, 4e-05),
((224, 224), (220, 220, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
((224, 224), (200, 240, 3), transforms.InterpolationMode.BILINEAR, 4e-03),
((224, 224), (220, 220, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
((224, 224), (200, 240, 3), transforms.InterpolationMode.BICUBIC, 4e-03),
],
)
def test_resize(target_size, interpolation, tolerance):
test_input = np.random.randint(255, size=target_size, dtype=np.uint8)
def test_resize(target_size, current_size, interpolation, tolerance):
test_input = np.random.randint(255, size=current_size, dtype=np.uint8)
preprocess_pipeline = transforms.Compose(
[
transforms.Resize(224, interpolation=interpolation),
transforms.Resize(target_size, interpolation=interpolation),
transforms.ToTensor(),
],
)
Expand Down

0 comments on commit bc29a58

Please sign in to comment.