Skip to content

Commit

Permalink
ultralytics 8.2.32 Apple MPS device Autobatch handling (ultralytics…
Browse files Browse the repository at this point in the history
…#13568)

Co-authored-by: Glenn Jocher <[email protected]>
  • Loading branch information
alexwine36 and glenn-jocher authored Jun 14, 2024
1 parent 39fd266 commit e547771
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion ultralytics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license

__version__ = "8.2.31"
__version__ = "8.2.32"

import os

Expand Down
2 changes: 1 addition & 1 deletion ultralytics/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def yaml_print(yaml_file: Union[str, Path, dict]) -> None:
(None)
"""
yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file
dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True)
dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True, width=float("inf"))
LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}")


Expand Down
4 changes: 2 additions & 2 deletions ultralytics/utils/autobatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
prefix = colorstr("AutoBatch: ")
LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
device = next(model.parameters()).device # get model device
if device.type == "cpu":
LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}")
if device.type in {"cpu", "mps"}:
LOGGER.info(f"{prefix} ⚠️ intended for CUDA devices, using default batch-size {batch_size}")
return batch_size
if torch.backends.cudnn.benchmark:
LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}")
Expand Down

0 comments on commit e547771

Please sign in to comment.