Skip to content

Commit

Permalink
Revert "debug"
Browse files Browse the repository at this point in the history
This reverts commit a6e6101.

Revert "debug"

This reverts commit 5ddeaec.

debug


debug


Revert "debug"

This reverts commit 605be74.

Revert "Revert "debug""

This reverts commit a7612d5.

debug


x


x


x


s


tol


x


tol
  • Loading branch information
awaelchli committed Jul 1, 2021
1 parent 88ca10d commit c712b62
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 8 deletions.
4 changes: 2 additions & 2 deletions benchmarks/test_basic_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def assert_parity_absolute(pl_values, pt_values, norm_by: float = 1, max_diff: f
@pytest.mark.parametrize(
'cls_model,max_diff_speed,max_diff_memory',
[
(ParityModuleRNN, 0.05, 0.0),
(ParityModuleMNIST, 0.25, 0.0), # todo: lower this thr
(ParityModuleRNN, 0.05, 0.001),
(ParityModuleMNIST, 0.25, 0.001), # todo: lower this thr
]
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/loops/epoch/evaluation_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,6 @@ def advance(
if batch is None:
raise StopIteration

assert self.num_dataloaders is not None
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self.num_dataloaders)

with self.trainer.profiler.profile("evaluation_batch_to_device"):
batch = self.trainer.accelerator.batch_to_device(batch, dataloader_idx=dataloader_idx)

Expand Down Expand Up @@ -175,6 +172,9 @@ def on_evaluation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx:
"""
self.trainer.logger_connector.on_batch_start()

assert self.num_dataloaders is not None
self.trainer.logger_connector.on_evaluation_batch_start(batch, batch_idx, dataloader_idx, self.num_dataloaders)

if self.trainer.testing:
self.trainer.call_hook("on_test_batch_start", batch, batch_idx, dataloader_idx)
else:
Expand Down
3 changes: 0 additions & 3 deletions pytorch_lightning/loops/epoch/training_epoch_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,13 +115,11 @@ def advance(self, dataloader_iter: Iterator, **kwargs: Any) -> None:
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("training_batch_to_device"):
print("before run", self.iteration_count, torch.cuda.memory_allocated())
batch = self.trainer.accelerator.batch_to_device(batch, dataloader_idx=self._dataloader_idx)

with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.batch_loop.run(batch, self.iteration_count, self._dataloader_idx)
self.batches_seen += 1
print("after run", self.iteration_count, torch.cuda.memory_allocated())

# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
Expand Down Expand Up @@ -157,7 +155,6 @@ def on_advance_end(self):
Raises:
StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch
"""
print("advance end", self.iteration_count, torch.cuda.memory_allocated())
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
Expand Down

0 comments on commit c712b62

Please sign in to comment.