diff --git a/.kokoro/github/ubuntu/gpu/build.sh b/.kokoro/github/ubuntu/gpu/build.sh index c145fae536..9d07218317 100644 --- a/.kokoro/github/ubuntu/gpu/build.sh +++ b/.kokoro/github/ubuntu/gpu/build.sh @@ -51,7 +51,7 @@ pip install --no-deps -e "." --progress-bar off # Run Extra Large Tests for Continuous builds if [ "${RUN_XLARGE:-0}" == "1" ] then - pytest --check_gpu --run_large --run_extra_large --durations 0 \ + pytest --cache-clear --check_gpu --run_large --run_extra_large --durations 0 \ keras_cv/bounding_box \ keras_cv/callbacks \ keras_cv/losses \ @@ -65,7 +65,7 @@ then keras_cv/models/segmentation \ keras_cv/models/stable_diffusion else - pytest --check_gpu --run_large --durations 0 \ + pytest --cache-clear --check_gpu --run_large --durations 0 \ keras_cv/bounding_box \ keras_cv/callbacks \ keras_cv/losses \ diff --git a/keras_cv/metrics/coco/pycoco_wrapper.py b/keras_cv/metrics/coco/pycoco_wrapper.py index 3c09784388..659cdef0a0 100644 --- a/keras_cv/metrics/coco/pycoco_wrapper.py +++ b/keras_cv/metrics/coco/pycoco_wrapper.py @@ -125,6 +125,9 @@ def _convert_predictions_to_coco_annotations(predictions): num_batches = len(predictions["source_id"]) for i in range(num_batches): batch_size = predictions["source_id"][i].shape[0] + predictions["detection_boxes"][i] = predictions["detection_boxes"][ + i + ].copy() for j in range(batch_size): max_num_detections = predictions["num_detections"][i][j] predictions["detection_boxes"][i][j] = _yxyx_to_xywh( diff --git a/keras_cv/models/segmentation/basnet/basnet_test.py b/keras_cv/models/segmentation/basnet/basnet_test.py index 81ebd8e13e..3571f4e005 100644 --- a/keras_cv/models/segmentation/basnet/basnet_test.py +++ b/keras_cv/models/segmentation/basnet/basnet_test.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import gc import os import numpy as np @@ -23,13 +24,13 @@ from keras_cv.backend import ops from keras_cv.backend.config import keras_3 from keras_cv.models import BASNet -from keras_cv.models import ResNet34Backbone +from keras_cv.models import ResNet18Backbone from keras_cv.tests.test_case import TestCase class BASNetTest(TestCase): def test_basnet_construction(self): - backbone = ResNet34Backbone() + backbone = ResNet18Backbone() model = BASNet( input_shape=[288, 288, 3], backbone=backbone, num_classes=1 ) @@ -41,7 +42,7 @@ def test_basnet_construction(self): @pytest.mark.large def test_basnet_call(self): - backbone = ResNet34Backbone() + backbone = ResNet18Backbone() model = BASNet( input_shape=[288, 288, 3], backbone=backbone, num_classes=1 ) @@ -61,7 +62,7 @@ def test_weights_change(self): ds = ds.repeat(2) ds = ds.batch(2) - backbone = ResNet34Backbone() + backbone = ResNet18Backbone() model = BASNet( input_shape=[288, 288, 3], backbone=backbone, num_classes=1 ) @@ -99,7 +100,7 @@ def test_with_model_preset_forward_pass(self): def test_saved_model(self): target_size = [288, 288, 3] - backbone = ResNet34Backbone() + backbone = ResNet18Backbone() model = BASNet( input_shape=[288, 288, 3], backbone=backbone, num_classes=1 ) @@ -112,6 +113,9 @@ def test_saved_model(self): model.save(save_path) else: model.save(save_path, save_format="keras_v3") + # Free up model memory + del model + gc.collect() restored_model = keras.models.load_model(save_path) # Check we got the real object back.