diff --git a/darts/models/forecasting/block_rnn_model.py b/darts/models/forecasting/block_rnn_model.py index db41dd9455..36cf6e210d 100644 --- a/darts/models/forecasting/block_rnn_model.py +++ b/darts/models/forecasting/block_rnn_model.py @@ -65,16 +65,6 @@ def __init__( The fraction of neurons that are dropped in all-but-last RNN layers. **kwargs all parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class. - - Inputs - ------ - x of shape `(batch_size, input_chunk_length, input_size, nr_params)` - Tensor containing the features of the input sequence. - - Outputs - ------- - y of shape `(batch_size, output_chunk_length, target_size, nr_params)` - Tensor containing the prediction at the last time step of the sequence. """ super().__init__(**kwargs) @@ -91,6 +81,21 @@ def __init__( @io_processor @abstractmethod def forward(self, x_in: Tuple) -> torch.Tensor: + """BlockRNN Module forward. + + Parameters + ---------- + x_in + Tuple of Tensors containing the features of the input sequence. The tuple has elements + (past target, historic future covariates, future covariates, static covariates). + The shape of the past target is `(batch_size, input_length, input_size)`. + + Returns + ------- + torch.Tensor + The BlockRNN output Tensor with shape `(batch_size, output_chunk_length, target_size, nr_params)`. + It contains the prediction at the last time step of the sequence. + """ pass diff --git a/darts/models/forecasting/pl_forecasting_module.py b/darts/models/forecasting/pl_forecasting_module.py index c58ffd57e3..7ade7eaac9 100644 --- a/darts/models/forecasting/pl_forecasting_module.py +++ b/darts/models/forecasting/pl_forecasting_module.py @@ -3,6 +3,7 @@ """ from abc import ABC, abstractmethod +from functools import wraps from typing import Any, Dict, Optional, Sequence, Tuple, Union import pytorch_lightning as pl @@ -42,6 +43,7 @@ def forward(self, *args, **kwargs) normalizes batch input target features, and inverse transform the forward output back to the original scale """ + @wraps(forward) def forward_wrapper(self, *args, **kwargs): if not self.use_reversible_instance_norm: return forward(self, *args, **kwargs) diff --git a/darts/models/forecasting/rnn_model.py b/darts/models/forecasting/rnn_model.py index e0f5d43629..16ef18015e 100644 --- a/darts/models/forecasting/rnn_model.py +++ b/darts/models/forecasting/rnn_model.py @@ -63,18 +63,6 @@ def __init__( The fraction of neurons that are dropped in all-but-last RNN layers. **kwargs all parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class. - - Inputs - ------ - x of shape `(batch_size, input_length, input_size)` - Tensor containing the features of the input sequence. The `input_length` is not fixed. - - Outputs - ------- - y of shape `(batch_size, output_chunk_length, target_size, nr_params)` - Tensor containing the outputs of the RNN at every time step of the input sequence. - During training the whole tensor is used as output, whereas during prediction we only use y[:, -1, :]. - However, this module always returns the whole Tensor. """ # RNNModule doesn't really need input and output_chunk_length for PLModule super().__init__(**kwargs) @@ -92,6 +80,25 @@ def __init__( def forward( self, x_in: Tuple, h: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: + """RNN Module forward. + + Parameters + ---------- + x_in + Tuple of Tensors containing the features of the input sequence. The tuple has elements (past target, + historic future covariates, future covariates, static covariates). The shape of the past target is + `(batch_size, input_length, input_size)`. + h + Optionally, the hidden state. + + Returns + ------- + Tuple[torch.Tensor, torch.Tensor] + Tuple of Tensors with elements (RNN output, hidden state). The RNN output Tensor has shape + `(batch_size, output_chunk_length, target_size, nr_params)`. It contains the outputs at every + time step of the input sequence. During training the whole tensor is used as output, whereas during + prediction we only use y[:, -1, :]. However, this module always returns the whole Tensor. + """ pass def _produce_train_output(self, input_batch: Tuple) -> torch.Tensor: diff --git a/docs/Makefile b/docs/Makefile index 68435389c9..a155c7f7df 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -7,6 +7,7 @@ SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source USERGUIDEDIR = userguide +GENAPIDIR = generated_api BUILDDIR = build # Put it first so that "make" without argument is like "make help". @@ -50,6 +51,7 @@ generate-userguide: generate-api: @echo "[Makefile] generating API using sphinx-apidoc..." @sphinx-apidoc -e -f --templatedir templates -o "$(SOURCEDIR)/generated_api" ../darts ../darts/logging.py ../darts/tests/* + @cp -r $(GENAPIDIR)/*.rst "$(SOURCEDIR)/generated_api/" html: # Note: this target has to be called "html" because its name is given as an argument to Sphinx diff --git a/docs/generated_api/darts.models.forecasting.block_rnn_model.rst b/docs/generated_api/darts.models.forecasting.block_rnn_model.rst new file mode 100644 index 0000000000..149cc6f3ae --- /dev/null +++ b/docs/generated_api/darts.models.forecasting.block_rnn_model.rst @@ -0,0 +1,10 @@ +Block Recurrent Neural Networks +------------------------------- + +.. autoclass:: darts.models.forecasting.block_rnn_model.BlockRNNModel + :members: + :exclude-members: + +.. autoclass:: darts.models.forecasting.block_rnn_model.CustomBlockRNNModule + :members: + :no-inherited-members: diff --git a/docs/generated_api/darts.models.forecasting.rnn_model.rst b/docs/generated_api/darts.models.forecasting.rnn_model.rst new file mode 100644 index 0000000000..31ecf93cbd --- /dev/null +++ b/docs/generated_api/darts.models.forecasting.rnn_model.rst @@ -0,0 +1,10 @@ +Recurrent Neural Networks +------------------------- + +.. autoclass:: darts.models.forecasting.rnn_model.RNNModel + :members: + :exclude-members: + +.. autoclass:: darts.models.forecasting.rnn_model.CustomRNNModule + :members: + :no-inherited-members: diff --git a/docs/templates/module.rst_t b/docs/templates/module.rst_t index d6ffb054f5..1477837371 100644 --- a/docs/templates/module.rst_t +++ b/docs/templates/module.rst_t @@ -1,4 +1,3 @@ - .. automodule:: {{ qualname }} :members: :undoc-members: