From 303265782c9816c7319fb5fb442e4a1d5acf0331 Mon Sep 17 00:00:00 2001 From: Abhishek Thakur Date: Thu, 28 Jan 2021 19:29:11 +0100 Subject: [PATCH 1/2] pin_memory -> dataloader_pin_memory --- src/transformers/trainer.py | 6 +++--- src/transformers/training_args.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 3e62db7690be..8ca84beefecf 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -485,7 +485,7 @@ def get_train_dataloader(self) -> DataLoader: collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]: @@ -523,7 +523,7 @@ def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoa collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: @@ -550,7 +550,7 @@ def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, - pin_memory=self.args.pin_memory, + pin_memory=self.args.dataloader_pin_memory, ) def create_optimizer_and_scheduler(self, num_training_steps: int): diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index e4e8aad96d50..983fc8055e5c 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -244,7 +244,7 @@ class TrainingArguments: When using distributed training, the value of the flag :obj:`find_unused_parameters` passed to :obj:`DistributedDataParallel`. Will default to :obj:`False` if gradient checkpointing is used, :obj:`True` otherwise. - pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)): + dataloader_pin_memory (:obj:`bool`, `optional`, defaults to :obj:`True`)): Whether you want to pin memory in data loaders or not. Will default to :obj:`True`. """ @@ -438,7 +438,9 @@ class TrainingArguments: "`DistributedDataParallel`." }, ) - pin_memory: bool = field(default=True, metadata={"help": "Whether or not to pin memory for data loaders."}) + dataloader_pin_memory: bool = field( + default=True, metadata={"help": "Whether or not to pin memory for data loaders."} + ) _n_gpu: int = field(init=False, repr=False, default=-1) def __post_init__(self): From bbcc5dcc5eca6eb95c9502d99d708064692666ea Mon Sep 17 00:00:00 2001 From: abhishek thakur Date: Thu, 28 Jan 2021 19:39:57 +0100 Subject: [PATCH 2/2] Update src/transformers/training_args.py Co-authored-by: Stas Bekman --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 983fc8055e5c..71d5255944cd 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -439,7 +439,7 @@ class TrainingArguments: }, ) dataloader_pin_memory: bool = field( - default=True, metadata={"help": "Whether or not to pin memory for data loaders."} + default=True, metadata={"help": "Whether or not to pin memory for DataLoader."} ) _n_gpu: int = field(init=False, repr=False, default=-1)