From b09d14091df4f482a14a75e94438efc929d65d2d Mon Sep 17 00:00:00 2001 From: Shay Aharon <80472096+shaydeci@users.noreply.github.com> Date: Thu, 22 Dec 2022 15:29:33 +0200 Subject: [PATCH] formatting (#586) --- src/super_gradients/training/sg_trainer/sg_trainer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/super_gradients/training/sg_trainer/sg_trainer.py b/src/super_gradients/training/sg_trainer/sg_trainer.py index 747de977d4..a17ca34fe5 100755 --- a/src/super_gradients/training/sg_trainer/sg_trainer.py +++ b/src/super_gradients/training/sg_trainer/sg_trainer.py @@ -954,6 +954,8 @@ def forward(self, inputs, targets): training_params = dict() self.train_loader = train_loader or self.train_loader self.valid_loader = valid_loader or self.valid_loader + if len(self.train_loader.dataset) % self.train_loader.batch_size != 0 and not self.train_loader.drop_last: + logger.warning("Train dataset size % batch_size != 0 and drop_last=False, this might result in smaller " "last batch.") self._set_dataset_params() if self.multi_gpu == MultiGPUMode.DISTRIBUTED_DATA_PARALLEL: