From 5c554a1131040f11e3afe38d65d5130aa6e39255 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 19:58:44 +0000 Subject: [PATCH 01/15] SA: for #958: set torch cuda device when finding root --- pytorch_lightning/trainer/distrib_parts.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index 0a629a6f21c62..c318df9a2863b 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -640,4 +640,10 @@ def determine_root_gpu_device(gpus): # set root gpu root_gpu = gpus[0] + # set cuda device to root gpu + # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 + # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + root_device = torch.device("cuda", root_gpu) + torch.cuda.set_device(root_device) + return root_gpu From 2f17b2f25f40a6c81328ede73056319155d16cbb Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:06:05 +0000 Subject: [PATCH 02/15] SA: for #958: removing root gpu hack in trainer/evaluation_loop --- pytorch_lightning/trainer/evaluation_loop.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 1ca088ebbc720..c074af4bee55c 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -421,9 +421,13 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # single GPU data transfer if self.single_gpu: # for single GPU put inputs on gpu manually - root_gpu = 0 + if isinstance(self.data_parallel_device_ids, list): root_gpu = self.data_parallel_device_ids[0] + else: + raise RuntimeError( + 'Expected `data_parallel_device_ids` as a list, cannot determine root gpu.' + ) batch = self.transfer_batch_to_gpu(batch, root_gpu) args[0] = batch From 6d895055987647c93ff42a54fc81702cad911a97 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:07:57 +0000 Subject: [PATCH 03/15] SA: setting torch cuda device --- pytorch_lightning/trainer/evaluation_loop.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index c074af4bee55c..071be073ba756 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -424,6 +424,12 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: if isinstance(self.data_parallel_device_ids, list): root_gpu = self.data_parallel_device_ids[0] + + # set cuda device to root gpu + # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 + # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + root_device = torch.device("cuda", root_gpu) + torch.cuda.set_device(root_device) else: raise RuntimeError( 'Expected `data_parallel_device_ids` as a list, cannot determine root gpu.' From 54e9a5e1c6ca316576a2eecccb2e0c6ca4f685fe Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:27:11 +0000 Subject: [PATCH 04/15] comment line too long --- pytorch_lightning/trainer/evaluation_loop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 071be073ba756..da0c16d0e4f52 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -427,7 +427,7 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 - # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + # Refer: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 root_device = torch.device("cuda", root_gpu) torch.cuda.set_device(root_device) else: From 83c291dbc8bbc29c8666dc072ca1e1108cfb752c Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Mon, 9 Mar 2020 09:30:57 +0000 Subject: [PATCH 05/15] check if root gpu exists or available --- pytorch_lightning/trainer/distrib_parts.py | 3 ++- pytorch_lightning/trainer/evaluation_loop.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index c318df9a2863b..39cdf88b1f600 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -643,7 +643,8 @@ def determine_root_gpu_device(gpus): # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 - root_device = torch.device("cuda", root_gpu) + # root_device = torch.device("cuda", root_gpu) + root_device = (torch.device("cuda", root_gpu) if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) return root_gpu diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index da0c16d0e4f52..91037753b7637 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -428,7 +428,8 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 # Refer: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 - root_device = torch.device("cuda", root_gpu) + root_device = (torch.device("cuda", root_gpu) + if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) else: raise RuntimeError( From 8a663086ff0755986683c97bf9dfe6dd55cf16de Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 19:58:44 +0000 Subject: [PATCH 06/15] SA: for #958: set torch cuda device when finding root --- pytorch_lightning/trainer/distrib_parts.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index 0a629a6f21c62..c318df9a2863b 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -640,4 +640,10 @@ def determine_root_gpu_device(gpus): # set root gpu root_gpu = gpus[0] + # set cuda device to root gpu + # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 + # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + root_device = torch.device("cuda", root_gpu) + torch.cuda.set_device(root_device) + return root_gpu From ae30b2761b621c1a731a6cf17ec18cc917caffd5 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:06:05 +0000 Subject: [PATCH 07/15] SA: for #958: removing root gpu hack in trainer/evaluation_loop --- pytorch_lightning/trainer/evaluation_loop.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 1ca088ebbc720..c074af4bee55c 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -421,9 +421,13 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # single GPU data transfer if self.single_gpu: # for single GPU put inputs on gpu manually - root_gpu = 0 + if isinstance(self.data_parallel_device_ids, list): root_gpu = self.data_parallel_device_ids[0] + else: + raise RuntimeError( + 'Expected `data_parallel_device_ids` as a list, cannot determine root gpu.' + ) batch = self.transfer_batch_to_gpu(batch, root_gpu) args[0] = batch From 67072adc0438d70b7c366dc0fedb0e9a6e942d31 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:07:57 +0000 Subject: [PATCH 08/15] SA: setting torch cuda device --- pytorch_lightning/trainer/evaluation_loop.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index c074af4bee55c..071be073ba756 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -424,6 +424,12 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: if isinstance(self.data_parallel_device_ids, list): root_gpu = self.data_parallel_device_ids[0] + + # set cuda device to root gpu + # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 + # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + root_device = torch.device("cuda", root_gpu) + torch.cuda.set_device(root_device) else: raise RuntimeError( 'Expected `data_parallel_device_ids` as a list, cannot determine root gpu.' From c684cc9dbae04f79112eec9673681f10f2ebdabb Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Sun, 8 Mar 2020 20:27:11 +0000 Subject: [PATCH 09/15] comment line too long --- pytorch_lightning/trainer/evaluation_loop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 071be073ba756..da0c16d0e4f52 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -427,7 +427,7 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 - # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 + # Refer: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 root_device = torch.device("cuda", root_gpu) torch.cuda.set_device(root_device) else: From 3b42a40ee5bf4d0a683699042e42110c450e84a1 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Mon, 9 Mar 2020 09:30:57 +0000 Subject: [PATCH 10/15] check if root gpu exists or available --- pytorch_lightning/trainer/distrib_parts.py | 3 ++- pytorch_lightning/trainer/evaluation_loop.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index c318df9a2863b..39cdf88b1f600 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -643,7 +643,8 @@ def determine_root_gpu_device(gpus): # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 - root_device = torch.device("cuda", root_gpu) + # root_device = torch.device("cuda", root_gpu) + root_device = (torch.device("cuda", root_gpu) if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) return root_gpu diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index da0c16d0e4f52..91037753b7637 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -428,7 +428,8 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: # set cuda device to root gpu # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 # Refer: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 - root_device = torch.device("cuda", root_gpu) + root_device = (torch.device("cuda", root_gpu) + if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) else: raise RuntimeError( From 9473303da39598ba7767674727766a74f6377a37 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Thu, 12 Mar 2020 16:42:43 +0000 Subject: [PATCH 11/15] Related to #609. Filter params for tensorboard logging. --- pytorch_lightning/loggers/tensorboard.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index 9be1d82b7669a..e7707578766c0 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -109,8 +109,17 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: " hyperparameter logging." ) else: + # See https://github.com/PyTorchLightning/pytorch-lightning/pull/609#issuecomment-598253152 + # Passing only those params which tensorboard allows here: + # https://github.com/pytorch/pytorch/blob/master/torch/utils/tensorboard/summary.py#L134 + from six import string_types from torch.utils.tensorboard.summary import hparams - exp, ssi, sei = hparams(params, {}) + tensorboard_params = {} + for k, v in params.items(): + if isinstance(v, int) or isinstance(v, float) or isinstance(v, string_types) or isinstance( + v, bool) or isinstance(v, torch.Tensor): + tensorboard_params[k] = v + exp, ssi, sei = hparams(tensorboard_params, {}) writer = self.experiment._get_file_writer() writer.add_summary(exp) writer.add_summary(ssi) From 10a5b05e3530aa504b6114e34b1f52f428f932f3 Mon Sep 17 00:00:00 2001 From: Shubham Agarwal Date: Thu, 12 Mar 2020 16:48:11 +0000 Subject: [PATCH 12/15] comment line too long --- pytorch_lightning/loggers/tensorboard.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index e7707578766c0..c23b32ddb7dae 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -109,15 +109,16 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: " hyperparameter logging." ) else: - # See https://github.com/PyTorchLightning/pytorch-lightning/pull/609#issuecomment-598253152 + # See https://github.com/PyTorchLightning/pytorch-lightning/ \ + # pull/609#issuecomment-598253152 # Passing only those params which tensorboard allows here: # https://github.com/pytorch/pytorch/blob/master/torch/utils/tensorboard/summary.py#L134 from six import string_types from torch.utils.tensorboard.summary import hparams tensorboard_params = {} for k, v in params.items(): - if isinstance(v, int) or isinstance(v, float) or isinstance(v, string_types) or isinstance( - v, bool) or isinstance(v, torch.Tensor): + if isinstance(v, int) or isinstance(v, float) or isinstance(v, string_types) \ + or isinstance(v, bool) or isinstance(v, torch.Tensor): tensorboard_params[k] = v exp, ssi, sei = hparams(tensorboard_params, {}) writer = self.experiment._get_file_writer() From dd8a7c2d54656bd8b19dee6e0917825aab542448 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 18 Mar 2020 17:51:09 +0100 Subject: [PATCH 13/15] Apply suggestions from code review --- pytorch_lightning/loggers/tensorboard.py | 5 +---- pytorch_lightning/trainer/distrib_parts.py | 3 --- pytorch_lightning/trainer/evaluation_loop.py | 2 -- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index c23b32ddb7dae..4593cd22e6beb 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -109,16 +109,13 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: " hyperparameter logging." ) else: - # See https://github.com/PyTorchLightning/pytorch-lightning/ \ - # pull/609#issuecomment-598253152 # Passing only those params which tensorboard allows here: # https://github.com/pytorch/pytorch/blob/master/torch/utils/tensorboard/summary.py#L134 from six import string_types from torch.utils.tensorboard.summary import hparams tensorboard_params = {} for k, v in params.items(): - if isinstance(v, int) or isinstance(v, float) or isinstance(v, string_types) \ - or isinstance(v, bool) or isinstance(v, torch.Tensor): + if isinstance(v, (int, float, string_types, bool, torch.Tensor)): tensorboard_params[k] = v exp, ssi, sei = hparams(tensorboard_params, {}) writer = self.experiment._get_file_writer() diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index 39cdf88b1f600..0237a1975ccee 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -641,9 +641,6 @@ def determine_root_gpu_device(gpus): root_gpu = gpus[0] # set cuda device to root gpu - # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 - # Refer solution: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 - # root_device = torch.device("cuda", root_gpu) root_device = (torch.device("cuda", root_gpu) if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 91037753b7637..b6e595fb7228d 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -426,8 +426,6 @@ def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test_mode: root_gpu = self.data_parallel_device_ids[0] # set cuda device to root gpu - # related to https://github.com/PyTorchLightning/pytorch-lightning/issues/958 - # Refer: https://github.com/pytorch/pytorch/issues/9871#issuecomment-408304190 root_device = (torch.device("cuda", root_gpu) if root_gpu >= 0 else torch.device("cpu")) torch.cuda.set_device(root_device) From 2acd85db201225fb11117220dd9c3049aee1b2af Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 25 Mar 2020 18:02:45 +0100 Subject: [PATCH 14/15] revert to master --- pytorch_lightning/loggers/tensorboard.py | 40 ++++++++++-------------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index 4593cd22e6beb..2ef6a55bcaadc 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -8,7 +8,7 @@ from pkg_resources import parse_version from torch.utils.tensorboard import SummaryWriter -from .base import LightningLoggerBase, rank_zero_only +from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only class TensorBoardLogger(LightningLoggerBase): @@ -17,28 +17,24 @@ class TensorBoardLogger(LightningLoggerBase): Log to local file system in TensorBoard format Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to - `os.path.join(save_dir, name, version)` + ``os.path.join(save_dir, name, version)`` - .. _tf-logger: + Example: + .. code-block:: python - Example - ------------------ - - .. code-block:: python - - logger = TensorBoardLogger("tb_logs", name="my_model") - trainer = Trainer(logger=logger) - trainer.train(model) + logger = TensorBoardLogger("tb_logs", name="my_model") + trainer = Trainer(logger=logger) + trainer.train(model) Args: - save_dir (str): Save directory - name (str): Experiment name. Defaults to "default". If it is the empty string then no per-experiment + save_dir: Save directory + name: Experiment name. Defaults to "default". If it is the empty string then no per-experiment subdirectory is used. - version (int|str): Experiment version. If version is not specified the logger inspects the save + version: Experiment version. If version is not specified the logger inspects the save directory for existing versions, then automatically assigns the next available version. If it is a string then it is used as the run-specific subdirectory name, otherwise version_${version} is used. - \**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor. + \**kwargs: Other arguments are passed directly to the :class:`SummaryWriter` constructor. """ NAME_CSV_TAGS = 'meta_tags.csv' @@ -101,6 +97,8 @@ def experiment(self) -> SummaryWriter: @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: params = self._convert_params(params) + params = self._flatten_dict(params) + sanitized_params = self._sanitize_params(params) if parse_version(torch.__version__) < parse_version("1.3.0"): warn( @@ -109,21 +107,15 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: " hyperparameter logging." ) else: - # Passing only those params which tensorboard allows here: - # https://github.com/pytorch/pytorch/blob/master/torch/utils/tensorboard/summary.py#L134 - from six import string_types from torch.utils.tensorboard.summary import hparams - tensorboard_params = {} - for k, v in params.items(): - if isinstance(v, (int, float, string_types, bool, torch.Tensor)): - tensorboard_params[k] = v - exp, ssi, sei = hparams(tensorboard_params, {}) + exp, ssi, sei = hparams(sanitized_params, {}) writer = self.experiment._get_file_writer() writer.add_summary(exp) writer.add_summary(ssi) writer.add_summary(sei) + # some alternative should be added - self.tags.update(params) + self.tags.update(sanitized_params) @rank_zero_only def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: From 08bb5743fe10dddb843ea6cc1f46e9de57e4f17c Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Wed, 25 Mar 2020 18:04:25 +0100 Subject: [PATCH 15/15] revert to origin --- pytorch_lightning/loggers/tensorboard.py | 33 ++++++++++++------------ 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/pytorch_lightning/loggers/tensorboard.py b/pytorch_lightning/loggers/tensorboard.py index 2ef6a55bcaadc..9be1d82b7669a 100644 --- a/pytorch_lightning/loggers/tensorboard.py +++ b/pytorch_lightning/loggers/tensorboard.py @@ -8,7 +8,7 @@ from pkg_resources import parse_version from torch.utils.tensorboard import SummaryWriter -from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only +from .base import LightningLoggerBase, rank_zero_only class TensorBoardLogger(LightningLoggerBase): @@ -17,24 +17,28 @@ class TensorBoardLogger(LightningLoggerBase): Log to local file system in TensorBoard format Implemented using :class:`torch.utils.tensorboard.SummaryWriter`. Logs are saved to - ``os.path.join(save_dir, name, version)`` + `os.path.join(save_dir, name, version)` - Example: - .. code-block:: python + .. _tf-logger: - logger = TensorBoardLogger("tb_logs", name="my_model") - trainer = Trainer(logger=logger) - trainer.train(model) + Example + ------------------ + + .. code-block:: python + + logger = TensorBoardLogger("tb_logs", name="my_model") + trainer = Trainer(logger=logger) + trainer.train(model) Args: - save_dir: Save directory - name: Experiment name. Defaults to "default". If it is the empty string then no per-experiment + save_dir (str): Save directory + name (str): Experiment name. Defaults to "default". If it is the empty string then no per-experiment subdirectory is used. - version: Experiment version. If version is not specified the logger inspects the save + version (int|str): Experiment version. If version is not specified the logger inspects the save directory for existing versions, then automatically assigns the next available version. If it is a string then it is used as the run-specific subdirectory name, otherwise version_${version} is used. - \**kwargs: Other arguments are passed directly to the :class:`SummaryWriter` constructor. + \**kwargs (dict): Other arguments are passed directly to the :class:`SummaryWriter` constructor. """ NAME_CSV_TAGS = 'meta_tags.csv' @@ -97,8 +101,6 @@ def experiment(self) -> SummaryWriter: @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: params = self._convert_params(params) - params = self._flatten_dict(params) - sanitized_params = self._sanitize_params(params) if parse_version(torch.__version__) < parse_version("1.3.0"): warn( @@ -108,14 +110,13 @@ def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: ) else: from torch.utils.tensorboard.summary import hparams - exp, ssi, sei = hparams(sanitized_params, {}) + exp, ssi, sei = hparams(params, {}) writer = self.experiment._get_file_writer() writer.add_summary(exp) writer.add_summary(ssi) writer.add_summary(sei) - # some alternative should be added - self.tags.update(sanitized_params) + self.tags.update(params) @rank_zero_only def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: