From 1359ab28510ca8b90b4f4bc4348dc4f7f5eab18a Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 15 Sep 2023 20:08:46 +0200 Subject: [PATCH 1/4] [PT FE] Add tests for detectron2 models --- .github/workflows/linux.yml | 2 + .../models_hub_common/utils.py | 3 +- .../torch_tests/detectron2_models | 65 +++++++++++++ .../torch_tests/detectron2_precommit | 26 +++++ .../torch_tests/requirements.txt | 2 +- .../torch_tests/test_detectron2.py | 95 +++++++++++++++++++ 6 files changed, 191 insertions(+), 2 deletions(-) create mode 100644 tests/model_hub_tests/torch_tests/detectron2_models create mode 100644 tests/model_hub_tests/torch_tests/detectron2_precommit create mode 100644 tests/model_hub_tests/torch_tests/test_detectron2.py diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 22d9037988634b..f76e7ee1357622 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -924,8 +924,10 @@ jobs: run: | python3 -m pip install openvino --find-links=${{ env.INSTALL_DIR }}/tools - name: PyTorch Models Tests + # detectron2 requires torch to be alredy installed run: | python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/requirements.txt + python3 -m pip install git+https://github.com/facebookresearch/detectron2.git export PYTHONPATH=${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}:$PYTHONPATH python3 -m pytest ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/ -m ${{ env.TYPE }} --html=${{ env.INSTALL_TEST_DIR }}/TEST-torch_model_tests.html --self-contained-html env: diff --git a/tests/model_hub_tests/models_hub_common/utils.py b/tests/model_hub_tests/models_hub_common/utils.py index efb4bd8a64c2e9..b5699a80e62b43 100644 --- a/tests/model_hub_tests/models_hub_common/utils.py +++ b/tests/model_hub_tests/models_hub_common/utils.py @@ -32,7 +32,8 @@ def compare_two_tensors(ov_res, fw_res, eps): is_ok = True if not np.allclose(ov_res, fw_res, atol=eps, rtol=eps, equal_nan=True): is_ok = False - print("Max diff is {}".format(np.array(abs(ov_res - fw_res)).max())) + max_diff = np.abs(ov_res.astype(np.float32) - fw_res.astype(np.float32)).max() + print("Max diff is {}".format(max_diff)) else: print("Accuracy validation successful!\n") print("absolute eps: {}, relative eps: {}".format(eps, eps)) diff --git a/tests/model_hub_tests/torch_tests/detectron2_models b/tests/model_hub_tests/torch_tests/detectron2_models new file mode 100644 index 00000000000000..091464ab8d3080 --- /dev/null +++ b/tests/model_hub_tests/torch_tests/detectron2_models @@ -0,0 +1,65 @@ +COCO-Detection/fast_rcnn_R_50_FPN_1x,none +COCO-Detection/faster_rcnn_R_101_C4_3x,none +COCO-Detection/faster_rcnn_R_101_DC5_3x,none +COCO-Detection/faster_rcnn_R_101_FPN_3x,none +COCO-Detection/faster_rcnn_R_50_C4_1x,none +COCO-Detection/faster_rcnn_R_50_C4_3x,none +COCO-Detection/faster_rcnn_R_50_DC5_1x,none +COCO-Detection/faster_rcnn_R_50_DC5_3x,none +COCO-Detection/faster_rcnn_R_50_FPN_1x,none +COCO-Detection/faster_rcnn_R_50_FPN_3x,none +COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x,none +COCO-Detection/retinanet_R_101_FPN_3x,none +COCO-Detection/retinanet_R_50_FPN_1x,none +COCO-Detection/retinanet_R_50_FPN_3x,none +COCO-Detection/rpn_R_50_C4_1x,none +COCO-Detection/rpn_R_50_FPN_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x,none +COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x,none +COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x,none +#COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x_giou,none - Pretrained model is not available! +COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x,none +COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x,none +#COCO-Keypoints/Base-Keypoint-RCNN-FPN,none - Pretrained model is not available! +COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x,none +COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x,none +COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x,none +COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x,none +#COCO-PanopticSegmentation/Base-Panoptic-FPN,none - Pretrained model is not available! +COCO-PanopticSegmentation/panoptic_fpn_R_101_3x,none +COCO-PanopticSegmentation/panoptic_fpn_R_50_1x,none +COCO-PanopticSegmentation/panoptic_fpn_R_50_3x,none +Cityscapes/mask_rcnn_R_50_FPN,none +Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x,none +Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x,none +Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x,none +LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x,none +LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x,none +LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x,none +#LVISv1-InstanceSegmentation/mask_rcnn_R_101_FPN_1x,none - Pretrained model is not available! +#LVISv1-InstanceSegmentation/mask_rcnn_R_50_FPN_1x,none - Pretrained model is not available! +#LVISv1-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x,none - Pretrained model is not available! +#Misc/mask_rcnn_R_50_FPN_1x_cls_agnostic,none - Pretrained model is not available! +Misc/cascade_mask_rcnn_R_50_FPN_1x,none +Misc/cascade_mask_rcnn_R_50_FPN_3x,none +Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv,none +Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5,none +Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5,none +Misc/mask_rcnn_R_50_FPN_3x_gn,none +Misc/mask_rcnn_R_50_FPN_3x_syncbn,none +Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x,none +Misc/scratch_mask_rcnn_R_50_FPN_3x_gn,none +Misc/scratch_mask_rcnn_R_50_FPN_9x_gn,none +Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn,none +#Misc/semantic_R_50_FPN_1x,none - Pretrained model is not available! +PascalVOC-Detection/faster_rcnn_R_50_C4,none +#PascalVOC-Detection/faster_rcnn_R_50_FPN,none - Pretrained model is not available! +#Base-RCNN-C4,none - Pretrained model is not available! +#Base-RCNN-DilatedC5,none - Pretrained model is not available! +#Base-RCNN-FPN,none - Pretrained model is not available! +#Base-RetinaNet,none - Pretrained model is not available! diff --git a/tests/model_hub_tests/torch_tests/detectron2_precommit b/tests/model_hub_tests/torch_tests/detectron2_precommit new file mode 100644 index 00000000000000..155e4d2a359779 --- /dev/null +++ b/tests/model_hub_tests/torch_tests/detectron2_precommit @@ -0,0 +1,26 @@ +COCO-Detection/faster_rcnn_R_50_C4_1x,none +COCO-Detection/faster_rcnn_R_50_DC5_3x,none +COCO-Detection/faster_rcnn_R_50_FPN_1x,none +COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x,none +COCO-Detection/retinanet_R_50_FPN_1x,none +COCO-Detection/rpn_R_50_C4_1x,none +COCO-Detection/rpn_R_50_FPN_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x,none +COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x,none +COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x,none +COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x,none +COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x,none +Cityscapes/mask_rcnn_R_50_FPN,none +Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x,none +Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x,none +Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x,none +LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x,none +LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x,none +Misc/cascade_mask_rcnn_R_50_FPN_3x,none +Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv,none +Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5,none +Misc/mask_rcnn_R_50_FPN_3x_gn,none +Misc/mask_rcnn_R_50_FPN_3x_syncbn,none +Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn,none +PascalVOC-Detection/faster_rcnn_R_50_C4,none diff --git a/tests/model_hub_tests/torch_tests/requirements.txt b/tests/model_hub_tests/torch_tests/requirements.txt index de68479afe714e..9a859794b09d04 100644 --- a/tests/model_hub_tests/torch_tests/requirements.txt +++ b/tests/model_hub_tests/torch_tests/requirements.txt @@ -4,4 +4,4 @@ pytest pytest-html torch torchvision -av \ No newline at end of file +av diff --git a/tests/model_hub_tests/torch_tests/test_detectron2.py b/tests/model_hub_tests/torch_tests/test_detectron2.py new file mode 100644 index 00000000000000..0662524b2c4863 --- /dev/null +++ b/tests/model_hub_tests/torch_tests/test_detectron2.py @@ -0,0 +1,95 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import pytest +import torch +import tempfile +import torchvision.transforms.functional as F +from models_hub_common.test_convert_model import TestConvertModel +from openvino import convert_model +from models_hub_common.utils import get_models_list, compare_two_tensors + + +class TestTorchHubConvertModel(TestConvertModel): + def setup_class(self): + from PIL import Image + import requests + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + self.image = Image.open(requests.get(url, stream=True).raw) + self.image = self.image.resize([640, 480]) + + def load_model(self, model_name, model_link): + from detectron2 import model_zoo, export + from detectron2.modeling import build_model + from detectron2.checkpoint import DetectionCheckpointer + from detectron2.config import CfgNode + import torchvision.transforms as transforms + + transform = transforms.Compose([transforms.PILToTensor()]) + image = transform(self.image) + + cfg = model_zoo.get_config(model_name + ".yaml", trained=True) + assert isinstance(cfg, CfgNode), "Unexpected config" + cfg.MODEL.DEVICE = "cpu" + model = build_model(cfg) + DetectionCheckpointer(model, save_to_disk=False).load(cfg.MODEL.WEIGHTS) + + model.eval() + inputs = [{"image": image, + "height": torch.tensor(image.shape[1]), + "width": torch.tensor(image.shape[2])}] + adapter = export.TracingAdapter(model, inputs) + + self.example = adapter.flattened_inputs + return adapter + + def get_inputs_info(self, model_obj): + return None + + def prepare_inputs(self, inputs_info): + return [i.numpy() for i in self.example] + + def convert_model(self, model_obj): + ov_model = convert_model(model_obj, example_input=self.example) + return ov_model + + def infer_fw_model(self, model_obj, inputs): + fw_outputs = model_obj(*[torch.from_numpy(i) for i in inputs]) + if isinstance(fw_outputs, dict): + for k in fw_outputs.keys(): + fw_outputs[k] = fw_outputs[k].numpy(force=True) + elif isinstance(fw_outputs, (list, tuple)): + fw_outputs = [o.numpy(force=True) for o in fw_outputs] + else: + fw_outputs = [fw_outputs.numpy(force=True)] + return fw_outputs + + def compare_results(self, fw_outputs, ov_outputs): + assert len(fw_outputs) == len(ov_outputs), \ + "Different number of outputs between TensorFlow and OpenVINO:" \ + " {} vs. {}".format(len(fw_outputs), len(ov_outputs)) + + fw_eps = 5e-2 + is_ok = True + for i in range(len(ov_outputs)): + cur_fw_res = fw_outputs[i] + cur_ov_res = ov_outputs[i] + l = min(len(cur_fw_res), len(cur_ov_res)) + assert l > 0 or len(cur_fw_res) == len(cur_ov_res), "No boxes were selected." + print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}") + is_ok = compare_two_tensors(cur_ov_res[:l], cur_fw_res[:l], fw_eps) + assert is_ok, "Accuracy validation failed" + + @pytest.mark.parametrize("name,type,mark,reason", + get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_precommit"))) + @pytest.mark.precommit + def test_convert_model_all_models(self, name, type, mark, reason, ie_device): + self.run(name, None, ie_device) + + @pytest.mark.parametrize("name,type,mark,reason", + get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models"))) + @pytest.mark.nightly + def test_convert_model_all_models(self, name, type, mark, reason, ie_device): + self.run(name, None, ie_device) From abcdae9abaf76b721dcf693c5af1db2e760ce10b Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 15 Sep 2023 20:52:52 +0200 Subject: [PATCH 2/4] Fix names of tests --- tests/model_hub_tests/torch_tests/test_detectron2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/model_hub_tests/torch_tests/test_detectron2.py b/tests/model_hub_tests/torch_tests/test_detectron2.py index 0662524b2c4863..a3010428887ba8 100644 --- a/tests/model_hub_tests/torch_tests/test_detectron2.py +++ b/tests/model_hub_tests/torch_tests/test_detectron2.py @@ -11,7 +11,7 @@ from models_hub_common.utils import get_models_list, compare_two_tensors -class TestTorchHubConvertModel(TestConvertModel): +class TestDetectron2ConvertModel(TestConvertModel): def setup_class(self): from PIL import Image import requests @@ -85,11 +85,11 @@ def compare_results(self, fw_outputs, ov_outputs): @pytest.mark.parametrize("name,type,mark,reason", get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_precommit"))) @pytest.mark.precommit - def test_convert_model_all_models(self, name, type, mark, reason, ie_device): + def test_detectron2_precommit(self, name, type, mark, reason, ie_device): self.run(name, None, ie_device) @pytest.mark.parametrize("name,type,mark,reason", get_models_list(os.path.join(os.path.dirname(__file__), "detectron2_models"))) @pytest.mark.nightly - def test_convert_model_all_models(self, name, type, mark, reason, ie_device): + def test_detectron2_all_models(self, name, type, mark, reason, ie_device): self.run(name, None, ie_device) From d4b5a98f0f601d4400d9e5ddd6eae2b7bf47d76e Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 18 Sep 2023 09:55:23 +0200 Subject: [PATCH 3/4] Apply suggestions from code review Co-authored-by: Roman Kazantsev --- tests/model_hub_tests/torch_tests/test_detectron2.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/model_hub_tests/torch_tests/test_detectron2.py b/tests/model_hub_tests/torch_tests/test_detectron2.py index a3010428887ba8..e8859905622260 100644 --- a/tests/model_hub_tests/torch_tests/test_detectron2.py +++ b/tests/model_hub_tests/torch_tests/test_detectron2.py @@ -4,8 +4,6 @@ import os import pytest import torch -import tempfile -import torchvision.transforms.functional as F from models_hub_common.test_convert_model import TestConvertModel from openvino import convert_model from models_hub_common.utils import get_models_list, compare_two_tensors From 92d8ce1563477a405a466e74030e103fe9e5292c Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 18 Sep 2023 10:04:00 +0200 Subject: [PATCH 4/4] Create secondary requirements file --- .github/workflows/linux.yml | 3 +-- tests/model_hub_tests/torch_tests/requirements_secondary.txt | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 tests/model_hub_tests/torch_tests/requirements_secondary.txt diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index f76e7ee1357622..1696eeb869c59d 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -924,10 +924,9 @@ jobs: run: | python3 -m pip install openvino --find-links=${{ env.INSTALL_DIR }}/tools - name: PyTorch Models Tests - # detectron2 requires torch to be alredy installed run: | python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/requirements.txt - python3 -m pip install git+https://github.com/facebookresearch/detectron2.git + python3 -m pip install -r ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/requirements_secondary.txt export PYTHONPATH=${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}:$PYTHONPATH python3 -m pytest ${{ env.MODEL_HUB_TESTS_INSTALL_DIR }}/torch_tests/ -m ${{ env.TYPE }} --html=${{ env.INSTALL_TEST_DIR }}/TEST-torch_model_tests.html --self-contained-html env: diff --git a/tests/model_hub_tests/torch_tests/requirements_secondary.txt b/tests/model_hub_tests/torch_tests/requirements_secondary.txt new file mode 100644 index 00000000000000..f59753797b27a0 --- /dev/null +++ b/tests/model_hub_tests/torch_tests/requirements_secondary.txt @@ -0,0 +1,3 @@ +-c ../../constraints.txt +# This file contains requirements dependednt from modules in requirements.txt +git+https://github.com/facebookresearch/detectron2.git \ No newline at end of file