-
Notifications
You must be signed in to change notification settings - Fork 530
/
Copy pathdistributed_training_utils.py
executable file
·255 lines (206 loc) · 9.3 KB
/
distributed_training_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
import sys
import itertools
from contextlib import contextmanager
import torch
import torch.nn as nn
from torch import distributed as dist
from torch.cuda.amp import autocast
from torch.distributed.elastic.multiprocessing import Std
from torch.distributed.elastic.multiprocessing.errors import record
from torch.distributed.launcher.api import LaunchConfig, elastic_launch
from super_gradients.common.data_types.enum import MultiGPUMode
from super_gradients.common.environment.argparse_utils import EXTRA_ARGS
from super_gradients.common.environment.ddp_utils import find_free_port, is_distributed
from super_gradients.common.abstractions.abstract_logger import get_logger
logger = get_logger(__name__)
def distributed_all_reduce_tensor_average(tensor, n):
"""
This method performs a reduce operation on multiple nodes running distributed training
It first sums all of the results and then divides the summation
:param tensor: The tensor to perform the reduce operation for
:param n: Number of nodes
:return: Averaged tensor from all of the nodes
"""
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)
rt /= n
return rt
def reduce_results_tuple_for_ddp(validation_results_tuple, device):
"""Gather all validation tuples from the various devices and average them"""
validation_results_list = list(validation_results_tuple)
for i, validation_result in enumerate(validation_results_list):
if torch.is_tensor(validation_result):
validation_result = validation_result.clone().detach()
else:
validation_result = torch.tensor(validation_result)
validation_results_list[i] = distributed_all_reduce_tensor_average(tensor=validation_result.to(device), n=torch.distributed.get_world_size())
validation_results_tuple = tuple(validation_results_list)
return validation_results_tuple
class MultiGPUModeAutocastWrapper:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
with autocast():
out = self.func(*args, **kwargs)
return out
def scaled_all_reduce(tensors: torch.Tensor, num_gpus: int):
"""
Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place.
Currently supports only the sum
reduction operator.
The reduced values are scaled by the inverse size of the
process group (equivalent to num_gpus).
"""
# There is no need for reduction in the single-proc case
if num_gpus == 1:
return tensors
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / num_gpus)
return tensors
@torch.no_grad()
def compute_precise_bn_stats(model: nn.Module, loader: torch.utils.data.DataLoader, precise_bn_batch_size: int, num_gpus: int):
"""
:param model: The model being trained (ie: Trainer.net)
:param loader: Training dataloader (ie: Trainer.train_loader)
:param precise_bn_batch_size: The effective batch size we want to calculate the batchnorm on. For example, if we are training a model
on 8 gpus, with a batch of 128 on each gpu, a good rule of thumb would be to give it 8192
(ie: effective_batch_size * num_gpus = batch_per_gpu * num_gpus * num_gpus).
If precise_bn_batch_size is not provided in the training_params, the latter heuristic
will be taken.
param num_gpus: The number of gpus we are training on
"""
# Compute the number of minibatches to use
num_iter = int(precise_bn_batch_size / (loader.batch_size * num_gpus)) if precise_bn_batch_size else num_gpus
num_iter = min(num_iter, len(loader))
# Retrieve the BN layers
bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]
# Initialize BN stats storage for computing mean(mean(batch)) and mean(var(batch))
running_means = [torch.zeros_like(bn.running_mean) for bn in bns]
running_vars = [torch.zeros_like(bn.running_var) for bn in bns]
# Remember momentum values
momentums = [bn.momentum for bn in bns]
# Set momentum to 1.0 to compute BN stats that only reflect the current batch
for bn in bns:
bn.momentum = 1.0
# Average the BN stats for each BN layer over the batches
for inputs, _labels in itertools.islice(loader, num_iter):
model(inputs.cuda())
for i, bn in enumerate(bns):
running_means[i] += bn.running_mean / num_iter
running_vars[i] += bn.running_var / num_iter
# Sync BN stats across GPUs (no reduction if 1 GPU used)
running_means = scaled_all_reduce(running_means, num_gpus=num_gpus)
running_vars = scaled_all_reduce(running_vars, num_gpus=num_gpus)
# Set BN stats and restore original momentum values
for i, bn in enumerate(bns):
bn.running_mean = running_means[i]
bn.running_var = running_vars[i]
bn.momentum = momentums[i]
def get_local_rank():
"""
Returns the local rank if running in DDP, and 0 otherwise
:return: local rank
"""
return dist.get_rank() if dist.is_initialized() else 0
def get_world_size() -> int:
"""
Returns the world size if running in DDP, and 1 otherwise
:return: world size
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
@contextmanager
def wait_for_the_master(local_rank: int):
"""
Make all processes waiting for the master to do some task.
"""
if local_rank > 0:
dist.barrier()
yield
if local_rank == 0:
if not dist.is_available():
return
if not dist.is_initialized():
return
else:
dist.barrier()
def setup_device(multi_gpu: MultiGPUMode = MultiGPUMode.OFF, num_gpus: int = None):
"""
If required, launch ddp subprocesses.
:param multi_gpu: DDP, DP or Off
:param num_gpus: Number of GPU's to use.
"""
if multi_gpu == MultiGPUMode.AUTO and torch.cuda.device_count() > 1:
multi_gpu = MultiGPUMode.DISTRIBUTED_DATA_PARALLEL
if require_gpu_setup(multi_gpu):
num_gpus = num_gpus or torch.cuda.device_count()
if num_gpus > torch.cuda.device_count():
raise ValueError(f"You specified num_gpus={num_gpus} but only {torch.cuda.device_count()} GPU's are available")
restart_script_with_ddp(num_gpus)
def setup_gpu_mode(gpu_mode: MultiGPUMode = MultiGPUMode.OFF, num_gpus: int = None):
"""If required, launch ddp subprocesses (deprecated).
:param gpu_mode: DDP, DP or Off
:param num_gpus: Number of GPU's to use.
"""
logger.warning("setup_gpu_mode is now deprecated in favor of setup_device. This will be removed in next version")
setup_device(multi_gpu=gpu_mode, num_gpus=num_gpus)
def require_gpu_setup(multi_gpu: MultiGPUMode) -> bool:
"""Check if the environment requires a setup in order to work with DDP."""
return (multi_gpu == MultiGPUMode.DISTRIBUTED_DATA_PARALLEL) and (not is_distributed())
@record
def restart_script_with_ddp(num_gpus: int = None):
"""Launch the same script as the one that was launched (i.e. the command used to start the current process is re-used) but on subprocesses (i.e. with DDP).
:param num_gpus: How many gpu's you want to run the script on. If not specified, every available device will be used.
"""
ddp_port = find_free_port()
# Get the value fom recipe if specified, otherwise take all available devices.
num_gpus = num_gpus if num_gpus else torch.cuda.device_count()
if num_gpus > torch.cuda.device_count():
raise ValueError(f"You specified num_gpus={num_gpus} but only {torch.cuda.device_count()} GPU's are available")
logger.info(
"Launching DDP with:\n"
f" - ddp_port = {ddp_port}\n"
f" - num_gpus = {num_gpus}/{torch.cuda.device_count()} available\n"
"-------------------------------------\n"
)
config = LaunchConfig(
nproc_per_node=num_gpus,
min_nodes=1,
max_nodes=1,
run_id="sg_initiated",
role="default",
rdzv_endpoint=f"127.0.0.1:{ddp_port}",
rdzv_backend="static",
rdzv_configs={"rank": 0, "timeout": 900},
rdzv_timeout=-1,
max_restarts=0,
monitor_interval=5,
start_method="spawn",
log_dir=None,
redirects=Std.NONE,
tee=Std.NONE,
metrics_cfg={},
)
elastic_launch(config=config, entrypoint=sys.executable)(*sys.argv, *EXTRA_ARGS)
# The code below should actually never be reached as the process will be in a loop inside elastic_launch until any subprocess crashes.
sys.exit(0)
def get_gpu_mem_utilization():
"""GPU memory managed by the caching allocator in bytes for a given device."""
# Workaround to work on any torch version
if hasattr(torch.cuda, "memory_reserved"):
return torch.cuda.memory_reserved()
else:
return torch.cuda.memory_cached()