From b880744c1e6b7c526ab8a2c22161b105778e807e Mon Sep 17 00:00:00 2001 From: Dmitriy Genzel Date: Wed, 7 Oct 2020 09:17:06 -0700 Subject: [PATCH] Fix a bad merge from D24154657 - second try Summary: This removes a line that reset the dummy batch in begin_epoch. In actuality the batch is reset in get_{train,valid}_iterator, and this line was resetting it to None unnecessarily. Reviewed By: joshim5, jhcross Differential Revision: D24157057 fbshipit-source-id: 59ac68327094ceff70f66d7b471fa810997fe84e --- fairseq/trainer.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/fairseq/trainer.py b/fairseq/trainer.py index 14bba0c662..f9ad1b2f95 100644 --- a/fairseq/trainer.py +++ b/fairseq/trainer.py @@ -413,9 +413,6 @@ def begin_epoch(self, epoch): # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) - # reset dummy batch - self._dummy_batch = None - if self.tpu: import torch_xla.core.xla_model as xm