From e5477713ddc5d5eaafb9985c2f5e5c7635119447 Mon Sep 17 00:00:00 2001 From: alexwine36 Date: Thu, 13 Jun 2024 18:07:50 -0600 Subject: [PATCH] `ultralytics 8.2.32` Apple MPS device Autobatch handling (#13568) Co-authored-by: Glenn Jocher --- ultralytics/__init__.py | 2 +- ultralytics/utils/__init__.py | 2 +- ultralytics/utils/autobatch.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 18ab8942bfd..0ab111feb0e 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = "8.2.31" +__version__ = "8.2.32" import os diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py index 28c7e336c5e..50cc939cfa8 100644 --- a/ultralytics/utils/__init__.py +++ b/ultralytics/utils/__init__.py @@ -395,7 +395,7 @@ def yaml_print(yaml_file: Union[str, Path, dict]) -> None: (None) """ yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file - dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) + dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True, width=float("inf")) LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}") diff --git a/ultralytics/utils/autobatch.py b/ultralytics/utils/autobatch.py index 3bdcc06adf6..2f695df82aa 100644 --- a/ultralytics/utils/autobatch.py +++ b/ultralytics/utils/autobatch.py @@ -45,8 +45,8 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch): prefix = colorstr("AutoBatch: ") LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.") device = next(model.parameters()).device # get model device - if device.type == "cpu": - LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}") + if device.type in {"cpu", "mps"}: + LOGGER.info(f"{prefix} ⚠️ intended for CUDA devices, using default batch-size {batch_size}") return batch_size if torch.backends.cudnn.benchmark: LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}")