From 80a4104a5645b1ab06d2b998b1e4d80aa6c82de5 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Sun, 25 Sep 2022 17:17:24 -0700 Subject: [PATCH 1/9] fix documentation to match pytorch-lightning --- darts/models/forecasting/block_rnn_model.py | 6 +++--- darts/models/forecasting/nbeats.py | 6 +++--- darts/models/forecasting/nhits.py | 6 +++--- darts/models/forecasting/rnn_model.py | 6 +++--- darts/models/forecasting/tcn_model.py | 6 +++--- darts/models/forecasting/tft_model.py | 6 +++--- darts/models/forecasting/torch_forecasting_model.py | 6 +++--- darts/models/forecasting/transformer_model.py | 6 +++--- docs/userguide/timeseries.md | 6 +++--- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index db475e7ad7..d663fc0a4e 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -222,13 +222,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/nbeats.py b/darts/models/forecasting/nbeats.py index 2715a969a7..a7dee22e37 100644 --- a/darts/models/forecasting/nbeats.py +++ b/darts/models/forecasting/nbeats.py @@ -642,13 +642,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/nhits.py b/darts/models/forecasting/nhits.py index 5309500890..953f668bed 100644 --- a/darts/models/forecasting/nhits.py +++ b/darts/models/forecasting/nhits.py @@ -578,13 +578,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/rnn_model.py b/darts/models/forecasting/rnn_model.py index e687433b4c..9ab8037509 100644 --- a/darts/models/forecasting/rnn_model.py +++ b/darts/models/forecasting/rnn_model.py @@ -300,13 +300,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/tcn_model.py b/darts/models/forecasting/tcn_model.py index 0d5ec74a47..d518e59c9a 100644 --- a/darts/models/forecasting/tcn_model.py +++ b/darts/models/forecasting/tcn_model.py @@ -343,13 +343,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/tft_model.py b/darts/models/forecasting/tft_model.py index b86bcef4da..cadf9dd8ba 100644 --- a/darts/models/forecasting/tft_model.py +++ b/darts/models/forecasting/tft_model.py @@ -782,13 +782,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index b22f091ef7..d1eb47c0a3 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -170,13 +170,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/darts/models/forecasting/transformer_model.py b/darts/models/forecasting/transformer_model.py index d6d8328e24..563306d608 100644 --- a/darts/models/forecasting/transformer_model.py +++ b/darts/models/forecasting/transformer_model.py @@ -431,13 +431,13 @@ def __init__( .. deprecated:: v0.17.0 ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "gpus", "auto_select_gpus"`` in your + Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "gpus": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "gpus": -1, "auto_select_gpus": True}`` to use all available GPUS. + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and diff --git a/docs/userguide/timeseries.md b/docs/userguide/timeseries.md index 9f8d1b484e..7faeb66234 100644 --- a/docs/userguide/timeseries.md +++ b/docs/userguide/timeseries.md @@ -14,7 +14,7 @@ Using a dedicated type to represent time series (instead of say, Pandas DataFram ## Multivariate time series -vs- multiple time series We distinguish univariate from multivariate series: -* A **Multivariate** series contain multiple dimensions (i.e. multiple values per time step). +* A **multivariate** series contain multiple dimensions (i.e. multiple values per time step). * A **univariate** series contains only one dimension (i.e., single scalar value for each time step). Sometimes the dimensions are called *components*. A single `TimeSeries` object can be either univariate (if it has a single component), or multivariate (if it has multiple components). In a multivariate series, all components share the same time axis. I.e., they all share the same time stamps. @@ -60,7 +60,7 @@ More methods are documented in the [TimeSeries API documentation](https://unit8c Furthermore, it is possible to concatenate series along different axes using the function `concatenate()`. `axis=0` corresponds to time, `axis=1` corresponds to component and `axis=2` correspond to stochastic sample dimensions. For instance: ```python -from dart import concatenate +from darts import concatenate my_multivariate_series = concatenate([series1, series2, ...], axis=1) ``` @@ -97,7 +97,7 @@ Optionally, `TimeSeries` objects can contain a hierarchy, which specifies how it For instance, the following hierarchy means that the two components `"a"` and `"b"` add up to `"total"`: ```python -hierarchy = {"a": ["total"], "b", ["total"]} +hierarchy = {"a": ["total"], "b": ["total"]} ``` Hierarchies can be used for posthoc forecast reconciliation. Darts offers several reconciliation transformers (usable with `fit()`/`transform()`) - see the [corresponding API documentation](https://unit8co.github.io/darts/generated_api/darts.dataprocessing.transformers.reconciliation.html). From a7accf5e77f5b7f232ba8b1f6a61e495286121e6 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Mon, 26 Sep 2022 10:34:12 -0700 Subject: [PATCH 2/9] fixed broken link --- darts/models/forecasting/block_rnn_model.py | 2 +- darts/models/forecasting/nbeats.py | 2 +- darts/models/forecasting/nhits.py | 2 +- darts/models/forecasting/rnn_model.py | 2 +- darts/models/forecasting/tcn_model.py | 2 +- darts/models/forecasting/tft_model.py | 2 +- darts/models/forecasting/transformer_model.py | 2 +- docs/userguide/gpu_and_tpu_usage.md | 3 ++- 8 files changed, 9 insertions(+), 8 deletions(-) diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index d663fc0a4e..446425ca7e 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -232,7 +232,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/nbeats.py b/darts/models/forecasting/nbeats.py index d32376c115..da9755e886 100644 --- a/darts/models/forecasting/nbeats.py +++ b/darts/models/forecasting/nbeats.py @@ -652,7 +652,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/nhits.py b/darts/models/forecasting/nhits.py index 953f668bed..eac4cf73fc 100644 --- a/darts/models/forecasting/nhits.py +++ b/darts/models/forecasting/nhits.py @@ -588,7 +588,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/rnn_model.py b/darts/models/forecasting/rnn_model.py index 9035a3b857..a5dae35b86 100644 --- a/darts/models/forecasting/rnn_model.py +++ b/darts/models/forecasting/rnn_model.py @@ -311,7 +311,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/tcn_model.py b/darts/models/forecasting/tcn_model.py index d518e59c9a..26f3f9f8c8 100644 --- a/darts/models/forecasting/tcn_model.py +++ b/darts/models/forecasting/tcn_model.py @@ -353,7 +353,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/tft_model.py b/darts/models/forecasting/tft_model.py index cadf9dd8ba..d501dedcf4 100644 --- a/darts/models/forecasting/tft_model.py +++ b/darts/models/forecasting/tft_model.py @@ -792,7 +792,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/darts/models/forecasting/transformer_model.py b/darts/models/forecasting/transformer_model.py index 18d1d81325..6132224454 100644 --- a/darts/models/forecasting/transformer_model.py +++ b/darts/models/forecasting/transformer_model.py @@ -441,7 +441,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. diff --git a/docs/userguide/gpu_and_tpu_usage.md b/docs/userguide/gpu_and_tpu_usage.md index 1877863b28..c765b1a452 100644 --- a/docs/userguide/gpu_and_tpu_usage.md +++ b/docs/userguide/gpu_and_tpu_usage.md @@ -83,6 +83,7 @@ Now the model is ready to start predicting, which won't be shown here since it's ## Use a GPU GPUs can dramatically improve the performance of your model in terms of processing time. By using an Accelerator in the [Pytorch Lightning Trainer](https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#accelerator), we can enjoy the benefits of a GPU. We only need to instruct our model to use our machine's GPU through PyTorch Lightning Trainer parameters, which are expressed as the `pl_trainer_kwargs` dictionary, like this: + ```python my_model = RNNModel( model="RNN", @@ -90,7 +91,7 @@ my_model = RNNModel( force_reset=True, pl_trainer_kwargs={ "accelerator": "gpu", - "gpus": [0] + "devices": [0] }, ) ``` From b13dfb1a6fa7ba40658483959650f9d4c1b93c4f Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Mon, 26 Sep 2022 10:35:22 -0700 Subject: [PATCH 3/9] added pytorch lightning 1.7 param check --- darts/models/forecasting/torch_forecasting_model.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index 446703a3eb..b7f7dd4548 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -81,6 +81,10 @@ logger = get_logger(__name__) +# Check whether we are running pytorch-lightning >= 1.7.0 or not: +tokens = pl.__version__.split(".") +pl_170_or_above = int(tokens[0]) >= 1 and int(tokens[1]) >= 7 + def _get_checkpoint_folder(work_dir, model_name): return os.path.join(work_dir, model_name, CHECKPOINTS_FOLDER) @@ -181,7 +185,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html#select-gpu-devices + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -324,7 +328,6 @@ def __init__( # setup trainer parameters from model creation parameters self.trainer_params = { "accelerator": accelerator, - "gpus": gpus, "auto_select_gpus": auto_select_gpus, "logger": model_logger, "max_epochs": n_epochs, @@ -332,6 +335,10 @@ def __init__( "enable_checkpointing": save_checkpoints, "callbacks": [cb for cb in [checkpoint_callback] if cb is not None], } + if pl_170_or_above: + self.trainer_params["devices"] = gpus + else: + self.trainer_params["gpus"] = gpus # update trainer parameters with user defined `pl_trainer_kwargs` if pl_trainer_kwargs is not None: From 8564adb6907ca61e0af339b5dc67aa6c91506f04 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Mon, 26 Sep 2022 11:05:50 -0700 Subject: [PATCH 4/9] updated a few more files --- CONTRIBUTING.md | 10 +++++----- .../forecasting/torch_forecasting_model.py | 20 +++++++++---------- .../test_torch_forecasting_model.py | 4 ++-- docs/userguide/gpu_and_tpu_usage.md | 2 +- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cb70a15eab..25df10c37c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,7 +53,7 @@ and discuss it with some of the core team. * Additionally you can generate an xml report and use VSCode Coverage gutter to identify untested lines with `./coverage.sh xml` 9. If your contribution introduces a non-negligible change, add it to `CHANGELOG.md` under the "Unreleased" section. - You can already refer to the pull request. In addition, for tracking contributions we are happy if you provide + You can already refer to the pull request. In addition, for tracking contributions we are happy if you provide your full name (if you want to) and link to your Github handle. Example: ``` - Added new feature XYZ. [#001](https://https://github.com/unit8co/darts/pull/001) @@ -77,8 +77,8 @@ To ensure you don't need to worry about formatting and linting when contributing ### Development environment on Mac with Apple Silicon M1 processor (arm64 architecture) -Please follow the procedure described in [INSTALL.md](https://github.com/unit8co/darts/blob/master/INSTALL.md#test-environment-appple-m1-processor) -to set up a x_64 emulated environment. For the development environment, instead of installing Darts with -`pip install darts`, instead go to the darts cloned repo location and install the packages with: `pip install -r requirements/dev-all.txt`. +Please follow the procedure described in [INSTALL.md](https://github.com/unit8co/darts/blob/master/INSTALL.md#test-environment-appple-m1-processor) +to set up a x_64 emulated environment. For the development environment, instead of installing Darts with +`pip install darts`, instead go to the darts cloned repo location and install the packages with: `pip install -r requirements/dev-all.txt`. If necessary, follow the same steps to setup libomp for lightgbm. -Finally, verify your overall environment setup by successfully running all unitTests with gradlew or pytest. \ No newline at end of file +Finally, verify your overall environment setup by successfully running all unitTests with gradlew or pytest. diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index b7f7dd4548..3f1794b085 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -301,7 +301,7 @@ def __init__( pass # TODO: remove below in the next version ======> - accelerator, gpus, auto_select_gpus = self._extract_torch_devices( + accelerator, devices, auto_select_gpus = self._extract_torch_devices( torch_device_str ) # TODO: until here <====== @@ -336,9 +336,9 @@ def __init__( "callbacks": [cb for cb in [checkpoint_callback] if cb is not None], } if pl_170_or_above: - self.trainer_params["devices"] = gpus + self.trainer_params["devices"] = devices else: - self.trainer_params["gpus"] = gpus + self.trainer_params["gpus"] = devices # update trainer parameters with user defined `pl_trainer_kwargs` if pl_trainer_kwargs is not None: @@ -367,7 +367,7 @@ def _extract_torch_devices( Returns ------- Tuple - (accelerator, gpus, auto_select_gpus) + (accelerator, devices, auto_select_gpus) """ if torch_device_str is None: @@ -376,7 +376,7 @@ def _extract_torch_devices( device_warning = ( "`torch_device_str` is deprecated and will be removed in a coming Darts version. For full support " "of all torch devices, use PyTorch-Lightnings trainer flags and pass them inside " - "`pl_trainer_kwargs`. Flags of interest are {`accelerator`, `gpus`, `auto_select_gpus`, `devices`}. " + "`pl_trainer_kwargs`. Flags of interest are {`accelerator`, `devices`, `auto_select_gpus`}. " "For more information, visit " "https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags" ) @@ -395,16 +395,16 @@ def _extract_torch_devices( ) device_split = torch_device_str.split(":") - gpus = None + devices = None auto_select_gpus = False accelerator = "gpu" if device_split[0] == "cuda" else device_split[0] if len(device_split) == 2 and accelerator == "gpu": - gpus = device_split[1] - gpus = [int(gpus)] + devices = device_split[1] + devices = [int(devices)] elif len(device_split) == 1: if accelerator == "gpu": - gpus = -1 + devices = -1 auto_select_gpus = True else: raise_if( @@ -412,7 +412,7 @@ def _extract_torch_devices( f"unknown torch_device_str `{torch_device_str}`. " + device_warning, logger, ) - return accelerator, gpus, auto_select_gpus + return accelerator, devices, auto_select_gpus @classmethod def _validate_model_params(cls, **kwargs): diff --git a/darts/tests/models/forecasting/test_torch_forecasting_model.py b/darts/tests/models/forecasting/test_torch_forecasting_model.py index bb15d4bd80..5ca51925be 100644 --- a/darts/tests/models/forecasting/test_torch_forecasting_model.py +++ b/darts/tests/models/forecasting/test_torch_forecasting_model.py @@ -338,11 +338,11 @@ def test_devices(self): ] for torch_device, settings in torch_devices: - accelerator, gpus, auto_select_gpus = settings + accelerator, devices, auto_select_gpus = settings model = RNNModel(12, "RNN", 10, 10, torch_device_str=torch_device) self.assertEqual(model.trainer_params["accelerator"], accelerator) - self.assertEqual(model.trainer_params["gpus"], gpus) + self.assertEqual(model.trainer_params["devices"], devices) self.assertEqual( model.trainer_params["auto_select_gpus"], auto_select_gpus ) diff --git a/docs/userguide/gpu_and_tpu_usage.md b/docs/userguide/gpu_and_tpu_usage.md index c765b1a452..37dab5e8e1 100644 --- a/docs/userguide/gpu_and_tpu_usage.md +++ b/docs/userguide/gpu_and_tpu_usage.md @@ -171,4 +171,4 @@ Epoch 299: 100% 8/8 [00:00<00:00, 8.52it/s, loss=0.00285, v_num=logs] ``` -From the output we can see that our model is using 4 TPUs. \ No newline at end of file +From the output we can see that our model is using 4 TPUs. From 1f50570b9578ab9ad1fdaf783de01727b403264d Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Tue, 27 Sep 2022 14:47:08 -0700 Subject: [PATCH 5/9] removed torch_device_str --- CHANGELOG.md | 1 + darts/models/forecasting/block_rnn_model.py | 29 +++--- darts/models/forecasting/nbeats.py | 29 +++--- darts/models/forecasting/nhits.py | 29 +++--- darts/models/forecasting/rnn_model.py | 29 +++--- darts/models/forecasting/tcn_model.py | 30 +++--- darts/models/forecasting/tft_model.py | 29 +++--- .../forecasting/torch_forecasting_model.py | 98 +++---------------- darts/models/forecasting/transformer_model.py | 29 +++--- .../test_torch_forecasting_model.py | 19 ---- 10 files changed, 90 insertions(+), 232 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ece63d5fe..034210391c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Darts is still in an early development phase, and we cannot always guarantee bac - Added support for retraining model(s) every `n` iteration and on custom condition in `historical_forecasts` method of `ForecastingModel` abstract class. Addressed issues [#135](https://github.com/unit8co/darts/issues/135) and [#623](https://github.com/unit8co/darts/issues/623) by [Francesco Bruzzesi](https://github.com/fbruzzesi). - New LayerNorm alternatives, RMSNorm and LayerNormNoBias [#1113](https://github.com/unit8co/darts/issues/1113) by [Greg DeVos](https://github.com/gdevos010). - Fixed type hinting for ExponentialSmoothing model [#1185](https://https://github.com/unit8co/darts/pull/1185) by [Rijk van der Meulen](https://github.com/rijkvandermeulen) +- 🔴 `torch_device_str` has been removed from all torch models in favor of Pytorch Lightning's `pl_trainer_kwargs` method [#1244](https://github.com/unit8co/darts/pull/1244) by [Greg DeVos](https://github.com/gdevos010). [Full Changelog](https://github.com/unit8co/darts/compare/0.21.0...master) diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index 446425ca7e..6be6b7d5af 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -215,24 +215,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -274,6 +256,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/nbeats.py b/darts/models/forecasting/nbeats.py index da9755e886..d65e020039 100644 --- a/darts/models/forecasting/nbeats.py +++ b/darts/models/forecasting/nbeats.py @@ -635,24 +635,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -694,6 +676,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/nhits.py b/darts/models/forecasting/nhits.py index eac4cf73fc..0533bdae97 100644 --- a/darts/models/forecasting/nhits.py +++ b/darts/models/forecasting/nhits.py @@ -571,24 +571,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -630,6 +612,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/rnn_model.py b/darts/models/forecasting/rnn_model.py index a5dae35b86..45b75d52e2 100644 --- a/darts/models/forecasting/rnn_model.py +++ b/darts/models/forecasting/rnn_model.py @@ -294,24 +294,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -353,6 +335,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/tcn_model.py b/darts/models/forecasting/tcn_model.py index 26f3f9f8c8..259bd8492b 100644 --- a/darts/models/forecasting/tcn_model.py +++ b/darts/models/forecasting/tcn_model.py @@ -336,24 +336,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -395,6 +377,18 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict:rgs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/tft_model.py b/darts/models/forecasting/tft_model.py index d501dedcf4..23b74e2d33 100644 --- a/darts/models/forecasting/tft_model.py +++ b/darts/models/forecasting/tft_model.py @@ -775,24 +775,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -834,6 +816,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index 3f1794b085..41aa39013a 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -127,7 +127,6 @@ def __init__( work_dir: str = os.path.join(os.getcwd(), DEFAULT_DARTS_FOLDER), log_tensorboard: bool = False, nr_epochs_val_period: int = 1, - torch_device_str: Optional[str] = None, force_reset: bool = False, save_checkpoints: bool = False, add_encoders: Optional[dict] = None, @@ -168,24 +167,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -227,6 +208,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond @@ -300,12 +292,6 @@ def __init__( else: pass - # TODO: remove below in the next version ======> - accelerator, devices, auto_select_gpus = self._extract_torch_devices( - torch_device_str - ) - # TODO: until here <====== - # save best epoch on val_loss and last epoch under 'darts_logs/model_name/checkpoints/' if save_checkpoints: checkpoint_callback = pl.callbacks.ModelCheckpoint( @@ -327,18 +313,12 @@ def __init__( # setup trainer parameters from model creation parameters self.trainer_params = { - "accelerator": accelerator, - "auto_select_gpus": auto_select_gpus, "logger": model_logger, "max_epochs": n_epochs, "check_val_every_n_epoch": nr_epochs_val_period, "enable_checkpointing": save_checkpoints, "callbacks": [cb for cb in [checkpoint_callback] if cb is not None], } - if pl_170_or_above: - self.trainer_params["devices"] = devices - else: - self.trainer_params["gpus"] = devices # update trainer parameters with user defined `pl_trainer_kwargs` if pl_trainer_kwargs is not None: @@ -358,62 +338,6 @@ def __init__( # pl_module_params must be set in __init__ method of TorchForecastingModel subclass self.pl_module_params: Optional[dict] = None - @staticmethod - def _extract_torch_devices( - torch_device_str, - ) -> Tuple[str, Optional[Union[list, int]], bool]: - """This method handles the deprecated `torch_device_str` and should be removed in a future Darts version. - - Returns - ------- - Tuple - (accelerator, devices, auto_select_gpus) - """ - - if torch_device_str is None: - return "cpu", None, False - - device_warning = ( - "`torch_device_str` is deprecated and will be removed in a coming Darts version. For full support " - "of all torch devices, use PyTorch-Lightnings trainer flags and pass them inside " - "`pl_trainer_kwargs`. Flags of interest are {`accelerator`, `devices`, `auto_select_gpus`}. " - "For more information, visit " - "https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags" - ) - raise_deprecation_warning(device_warning, logger) - # check torch device - raise_if_not( - any( - [ - device_str in torch_device_str - for device_str in ["cuda", "cpu", "auto"] - ] - ), - f"unknown torch_device_str `{torch_device_str}`. String must contain one of `('cuda', 'cpu', 'auto') " - + device_warning, - logger, - ) - device_split = torch_device_str.split(":") - - devices = None - auto_select_gpus = False - accelerator = "gpu" if device_split[0] == "cuda" else device_split[0] - - if len(device_split) == 2 and accelerator == "gpu": - devices = device_split[1] - devices = [int(devices)] - elif len(device_split) == 1: - if accelerator == "gpu": - devices = -1 - auto_select_gpus = True - else: - raise_if( - True, - f"unknown torch_device_str `{torch_device_str}`. " + device_warning, - logger, - ) - return accelerator, devices, auto_select_gpus - @classmethod def _validate_model_params(cls, **kwargs): """validate that parameters used at model creation are part of :class:`TorchForecastingModel`, diff --git a/darts/models/forecasting/transformer_model.py b/darts/models/forecasting/transformer_model.py index 6132224454..301b699f6c 100644 --- a/darts/models/forecasting/transformer_model.py +++ b/darts/models/forecasting/transformer_model.py @@ -424,24 +424,6 @@ def __init__( nr_epochs_val_period Number of epochs to wait before evaluating the validation loss (if a validation ``TimeSeries`` is passed to the :func:`fit()` method). Default: ``1``. - torch_device_str - Optionally, a string indicating the torch device to use. By default, ``torch_device_str`` is ``None`` - which will run on CPU. Set it to ``"cuda"`` to use all available GPUs or ``"cuda:i"`` to only use - GPU ``i`` (``i`` must be an integer). For example "cuda:0" will use the first GPU only. - - .. deprecated:: v0.17.0 - ``torch_device_str`` has been deprecated in v0.17.0 and will be removed in a future version. - Instead, specify this with keys ``"accelerator", "devices", "auto_select_gpus"`` in your - ``pl_trainer_kwargs`` dict. Some examples for setting the devices inside the ``pl_trainer_kwargs`` - dict: - - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus force_reset If set to ``True``, any previously-existing model with the same name will be reset (all checkpoints will be discarded). Default: ``False``. @@ -483,6 +465,17 @@ def __init__( object. Check the `PL Trainer documentation `_ for more information about the supported kwargs. Default: ``None``. + Running on GPU(s) is also possible using ``pl_trainer_kwargs`` by specifying keys ``"accelerator", + "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` + dict: + + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/tests/models/forecasting/test_torch_forecasting_model.py b/darts/tests/models/forecasting/test_torch_forecasting_model.py index 5ca51925be..9a9fa8f249 100644 --- a/darts/tests/models/forecasting/test_torch_forecasting_model.py +++ b/darts/tests/models/forecasting/test_torch_forecasting_model.py @@ -328,25 +328,6 @@ def test_lr_schedulers(self): # should not raise an error model.fit(self.series, epochs=1) - def test_devices(self): - torch_devices = [ - (None, ("cpu", None, False)), - ("cpu", ("cpu", None, False)), - ("cuda:0", ("gpu", [0], False)), - ("cuda", ("gpu", -1, True)), - ("auto", ("auto", None, False)), - ] - - for torch_device, settings in torch_devices: - accelerator, devices, auto_select_gpus = settings - model = RNNModel(12, "RNN", 10, 10, torch_device_str=torch_device) - - self.assertEqual(model.trainer_params["accelerator"], accelerator) - self.assertEqual(model.trainer_params["devices"], devices) - self.assertEqual( - model.trainer_params["auto_select_gpus"], auto_select_gpus - ) - def test_wrong_model_creation_params(self): valid_kwarg = {"pl_trainer_kwargs": {}} invalid_kwarg = {"some_invalid_kwarg": None} From 0f11407635e5c9d38e279ed0e5ac3792bc2d2c9a Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Wed, 28 Sep 2022 07:18:10 -0700 Subject: [PATCH 6/9] Update darts/models/forecasting/block_rnn_model.py Co-authored-by: Dennis Bader --- darts/models/forecasting/block_rnn_model.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index 6be6b7d5af..f64c0271c8 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -260,13 +260,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond From 6b12050294aa7ec7f7334482c252e9361bc4f343 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Wed, 28 Sep 2022 09:11:19 -0700 Subject: [PATCH 7/9] formatting --- darts/models/forecasting/nbeats.py | 14 ++++++++------ darts/models/forecasting/nhits.py | 14 ++++++++------ darts/models/forecasting/rnn_model.py | 14 ++++++++------ darts/models/forecasting/tcn_model.py | 14 ++++++++------ darts/models/forecasting/tft_model.py | 14 ++++++++------ .../models/forecasting/torch_forecasting_model.py | 14 ++++++++------ darts/models/forecasting/transformer_model.py | 14 ++++++++------ 7 files changed, 56 insertions(+), 42 deletions(-) diff --git a/darts/models/forecasting/nbeats.py b/darts/models/forecasting/nbeats.py index d65e020039..0b3ae2fb84 100644 --- a/darts/models/forecasting/nbeats.py +++ b/darts/models/forecasting/nbeats.py @@ -680,13 +680,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/nhits.py b/darts/models/forecasting/nhits.py index 0533bdae97..540fbe565d 100644 --- a/darts/models/forecasting/nhits.py +++ b/darts/models/forecasting/nhits.py @@ -616,13 +616,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/rnn_model.py b/darts/models/forecasting/rnn_model.py index 45b75d52e2..429313f711 100644 --- a/darts/models/forecasting/rnn_model.py +++ b/darts/models/forecasting/rnn_model.py @@ -339,13 +339,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/tcn_model.py b/darts/models/forecasting/tcn_model.py index 259bd8492b..e5bd23ac5b 100644 --- a/darts/models/forecasting/tcn_model.py +++ b/darts/models/forecasting/tcn_model.py @@ -382,13 +382,15 @@ def __init__( dict:rgs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/tft_model.py b/darts/models/forecasting/tft_model.py index 23b74e2d33..a2ad856bd2 100644 --- a/darts/models/forecasting/tft_model.py +++ b/darts/models/forecasting/tft_model.py @@ -820,13 +820,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index 41aa39013a..d5542b1ed6 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -212,13 +212,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond diff --git a/darts/models/forecasting/transformer_model.py b/darts/models/forecasting/transformer_model.py index 301b699f6c..223014ed32 100644 --- a/darts/models/forecasting/transformer_model.py +++ b/darts/models/forecasting/transformer_model.py @@ -469,13 +469,15 @@ def __init__( "devices", and "auto_select_gpus"``. Some examples for setting the devices inside the ``pl_trainer_kwargs`` dict: - - ``{"accelerator": "cpu"}`` for CPU, - - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), - - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. - For more info, see here: - https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and - https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + - ``{"accelerator": "cpu"}`` for CPU, + - ``{"accelerator": "gpu", "devices": [i]}`` to use only GPU ``i`` (``i`` must be an integer), + - ``{"accelerator": "gpu", "devices": -1, "auto_select_gpus": True}`` to use all available GPUS. + + For more info, see here: + https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and + https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond From f31da9e2131333e533d9edd9a27e08c72be8c088 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Wed, 28 Sep 2022 09:18:53 -0700 Subject: [PATCH 8/9] formatting --- darts/models/forecasting/block_rnn_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index f64c0271c8..a90dfa198e 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -268,7 +268,7 @@ def __init__( For more info, see here: https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#trainer-flags , and https://pytorch-lightning.readthedocs.io/en/stable/accelerators/gpu_basic.html#train-on-multiple-gpus - + With parameter ``"callbacks"`` you can add custom or PyTorch-Lightning built-in callbacks to Darts' :class:`TorchForecastingModel`. Below is an example for adding EarlyStopping to the training process. The model will stop training early if the validation loss `val_loss` does not improve beyond From 46ff8c66c3335f02b7b51b2018176c4a17e38b57 Mon Sep 17 00:00:00 2001 From: Greg DeVos Date: Wed, 28 Sep 2022 11:53:14 -0700 Subject: [PATCH 9/9] cleanup --- darts/models/forecasting/torch_forecasting_model.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/darts/models/forecasting/torch_forecasting_model.py b/darts/models/forecasting/torch_forecasting_model.py index d5542b1ed6..452540043d 100644 --- a/darts/models/forecasting/torch_forecasting_model.py +++ b/darts/models/forecasting/torch_forecasting_model.py @@ -81,10 +81,6 @@ logger = get_logger(__name__) -# Check whether we are running pytorch-lightning >= 1.7.0 or not: -tokens = pl.__version__.split(".") -pl_170_or_above = int(tokens[0]) >= 1 and int(tokens[1]) >= 7 - def _get_checkpoint_folder(work_dir, model_name): return os.path.join(work_dir, model_name, CHECKPOINTS_FOLDER)