diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ea4e1b1e7f55..e5b713a79f5c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Changed the order of `backward`, `step`, `zero_grad` to `zero_grad`, `backward`, `step` ([#6147](https://github.com/PyTorchLightning/pytorch-lightning/pull/6147)) +- Changed default for DeepSpeed CPU Offload to False, due to prohibitively slow speeds at smaller scale ([#6262](https://github.com/PyTorchLightning/pytorch-lightning/pull/6262)) + + - Renamed `pytorch_lightning.callbacks.swa` to `pytorch_lightning.callbacks.stochastic_weight_avg` ([#6259](https://github.com/PyTorchLightning/pytorch-lightning/pull/6259)) diff --git a/pytorch_lightning/plugins/training_type/deepspeed.py b/pytorch_lightning/plugins/training_type/deepspeed.py index 0f9a8378052a5..06cea848ce1dc 100644 --- a/pytorch_lightning/plugins/training_type/deepspeed.py +++ b/pytorch_lightning/plugins/training_type/deepspeed.py @@ -66,7 +66,7 @@ def __init__( self, zero_optimization: bool = True, stage: int = 2, - cpu_offload: bool = True, + cpu_offload: bool = False, contiguous_gradients: bool = True, overlap_comm: bool = True, allgather_partitions: bool = True, @@ -99,7 +99,7 @@ def __init__( stage: Different stages of the ZeRO Optimizer. 0 is disabled, 1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning (default: 2) - cpu_offload: Enable offloading optimizer memory and computation to CPU (default: True) + cpu_offload: Enable offloading optimizer memory and computation to CPU contiguous_gradients: Copies gradients to a continuous buffer as they are produced. Avoids memory fragmentation during backwards. Useful when training large models. (default: True)