From 16922f7c9ad1fc01f56fc8ee7d1830f7064df574 Mon Sep 17 00:00:00 2001 From: Jirka Date: Fri, 24 Feb 2023 08:35:18 +0100 Subject: [PATCH 1/9] update docs imports --- .../accelerators/accelerator_prepare.rst | 2 +- .../accelerators/gpu_intermediate.rst | 6 +- .../source-pytorch/accelerators/hpu_basic.rst | 2 +- .../accelerators/hpu_intermediate.rst | 14 +- .../accelerators/ipu_advanced.rst | 12 +- .../source-pytorch/accelerators/ipu_basic.rst | 4 +- .../accelerators/ipu_intermediate.rst | 10 +- .../accelerators/tpu_advanced.rst | 4 +- docs/source-pytorch/accelerators/tpu_faq.rst | 6 +- .../accelerators/tpu_intermediate.rst | 4 +- .../advanced/model_parallel.rst | 80 ++++---- .../advanced/post_training_quantization.rst | 2 +- .../advanced/pruning_quantization.rst | 4 +- .../advanced/strategy_registry.rst | 2 +- .../advanced/third_party/colossalai.rst | 2 +- .../advanced/training_tricks.rst | 30 +-- docs/source-pytorch/api_references.rst | 28 +-- .../cli/lightning_cli_advanced.rst | 6 +- .../cli/lightning_cli_advanced_2.rst | 4 +- .../cli/lightning_cli_advanced_3.rst | 36 ++-- .../cli/lightning_cli_expert.rst | 20 +- docs/source-pytorch/cli/lightning_cli_faq.rst | 6 +- .../cli/lightning_cli_intermediate.rst | 10 +- .../cli/lightning_cli_intermediate_2.rst | 18 +- .../clouds/cluster_advanced.rst | 6 +- docs/source-pytorch/clouds/cluster_expert.rst | 4 +- docs/source-pytorch/common/checkpointing.rst | 2 +- .../common/checkpointing_advanced.rst | 4 +- .../common/checkpointing_expert.rst | 26 +-- .../common/checkpointing_intermediate.rst | 24 +-- .../common/checkpointing_migration.rst | 4 +- docs/source-pytorch/common/child_modules.rst | 2 +- docs/source-pytorch/common/console_logs.rst | 4 +- docs/source-pytorch/common/early_stopping.rst | 24 +-- .../common/evaluation_intermediate.rst | 16 +- .../common/gradient_accumulation.rst | 6 +- .../common/lightning_module.rst | 194 +++++++++--------- docs/source-pytorch/common/optimization.rst | 16 +- .../common/precision_expert.rst | 2 +- docs/source-pytorch/common/progress_bar.rst | 46 ++--- docs/source-pytorch/common/remote_fs.rst | 2 +- docs/source-pytorch/common/trainer.rst | 62 +++--- docs/source-pytorch/conf.py | 32 +-- docs/source-pytorch/data/datamodule.rst | 48 ++--- docs/source-pytorch/debug/debugging_basic.rst | 16 +- .../debug/debugging_intermediate.rst | 4 +- .../deploy/production_advanced.rst | 6 +- .../deploy/production_advanced_2.rst | 2 +- .../deploy/production_basic.rst | 4 +- docs/source-pytorch/ecosystem/bolts.rst | 2 +- .../ecosystem/community_examples.rst | 6 +- .../source-pytorch/extensions/accelerator.rst | 6 +- docs/source-pytorch/extensions/callbacks.rst | 86 ++++---- .../extensions/callbacks_state.rst | 8 +- .../extensions/entry_points.rst | 4 +- docs/source-pytorch/extensions/logging.rst | 46 ++--- docs/source-pytorch/extensions/plugins.rst | 8 +- docs/source-pytorch/extensions/strategy.rst | 26 +-- docs/source-pytorch/guides/data.rst | 34 +-- docs/source-pytorch/guides/speed.rst | 20 +- .../model/manual_optimization.rst | 20 +- .../model/train_model_basic.rst | 2 +- docs/source-pytorch/starter/converting.rst | 10 +- docs/source-pytorch/starter/introduction.rst | 2 +- docs/source-pytorch/starter/style_guide.rst | 12 +- .../tuning/profiler_advanced.rst | 4 +- docs/source-pytorch/tuning/profiler_basic.rst | 8 +- .../source-pytorch/tuning/profiler_expert.rst | 8 +- .../tuning/profiler_intermediate.rst | 12 +- .../visualize/experiment_managers.rst | 2 +- .../visualize/logging_advanced.rst | 10 +- .../visualize/logging_expert.rst | 24 +-- .../visualize/logging_intermediate.rst | 2 +- .../visualize/supported_exp_managers.rst | 34 +-- 74 files changed, 632 insertions(+), 632 deletions(-) diff --git a/docs/source-pytorch/accelerators/accelerator_prepare.rst b/docs/source-pytorch/accelerators/accelerator_prepare.rst index f1da6867a0eee..403544370bc2b 100644 --- a/docs/source-pytorch/accelerators/accelerator_prepare.rst +++ b/docs/source-pytorch/accelerators/accelerator_prepare.rst @@ -50,7 +50,7 @@ This will make your code scale to any arbitrary number of GPUs or TPUs with Ligh z = torch.Tensor(2, 3) z = z.to(x) -The :class:`~pytorch_lightning.core.module.LightningModule` knows what device it is on. You can access the reference via ``self.device``. +The :class:`~lightning.pytorch.core.module.LightningModule` knows what device it is on. You can access the reference via ``self.device``. Sometimes it is necessary to store tensors as module attributes. However, if they are not parameters they will remain on the CPU even if the module gets moved to a new device. To prevent that and remain device agnostic, register the tensor as a buffer in your modules' ``__init__`` method with :meth:`~torch.nn.Module.register_buffer`. diff --git a/docs/source-pytorch/accelerators/gpu_intermediate.rst b/docs/source-pytorch/accelerators/gpu_intermediate.rst index df7f0ac632449..e3199d31c57e9 100644 --- a/docs/source-pytorch/accelerators/gpu_intermediate.rst +++ b/docs/source-pytorch/accelerators/gpu_intermediate.rst @@ -228,9 +228,9 @@ DDP can also be used with 1 GPU, but there's no reason to do so other than debug Implement Your Own Distributed (DDP) training ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you need your own way to init PyTorch DDP you can override :meth:`pytorch_lightning.strategies.ddp.DDPStrategy.setup_distributed`. +If you need your own way to init PyTorch DDP you can override :meth:`lightning.pytorch.strategies.ddp.DDPStrategy.setup_distributed`. -If you also need to use your own DDP implementation, override :meth:`pytorch_lightning.strategies.ddp.DDPStrategy.configure_ddp`. +If you also need to use your own DDP implementation, override :meth:`lightning.pytorch.strategies.ddp.DDPStrategy.configure_ddp`. ---------- @@ -279,7 +279,7 @@ Lightning allows explicitly specifying the backend via the `process_group_backen .. code-block:: python - from pytorch_lightning.strategies import DDPStrategy + from lightning.pytorch.strategies import DDPStrategy # Explicitly specify the process group backend if you choose to ddp = DDPStrategy(process_group_backend="nccl") diff --git a/docs/source-pytorch/accelerators/hpu_basic.rst b/docs/source-pytorch/accelerators/hpu_basic.rst index 28e4c93996f11..cf18fe119fa0b 100644 --- a/docs/source-pytorch/accelerators/hpu_basic.rst +++ b/docs/source-pytorch/accelerators/hpu_basic.rst @@ -46,7 +46,7 @@ To enable PyTorch Lightning to utilize the HPU accelerator, simply provide ``acc The ``devices>1`` parameter with HPUs enables the Habana accelerator for distributed training. -It uses :class:`~pytorch_lightning.strategies.hpu_parallel.HPUParallelStrategy` internally which is based on DDP +It uses :class:`~lightning.pytorch.strategies.hpu_parallel.HPUParallelStrategy` internally which is based on DDP strategy with the addition of Habana's collective communication library (HCCL) to support scale-up within a node and scale-out across multiple nodes. diff --git a/docs/source-pytorch/accelerators/hpu_intermediate.rst b/docs/source-pytorch/accelerators/hpu_intermediate.rst index 3ef5a6f2bb485..8c21f8b549f5c 100644 --- a/docs/source-pytorch/accelerators/hpu_intermediate.rst +++ b/docs/source-pytorch/accelerators/hpu_intermediate.rst @@ -23,21 +23,21 @@ By default, HPU training will use 32-bit precision. To enable mixed precision, s Customize Mixed Precision ------------------------- -Internally, :class:`~pytorch_lightning.plugins.precision.hpu.HPUPrecisionPlugin` uses the Habana Mixed Precision (HMP) package to enable mixed precision training. +Internally, :class:`~lightning.pytorch.plugins.precision.hpu.HPUPrecisionPlugin` uses the Habana Mixed Precision (HMP) package to enable mixed precision training. You can execute the ops in FP32 or BF16 precision. The HMP package modifies the Python operators to add the appropriate cast operations for the arguments before execution. The default settings enable users to enable mixed precision training with minimal code easily. In addition to the default settings in HMP, users also have the option of overriding these defaults and providing their -BF16 and FP32 operator lists by passing them as parameter to :class:`~pytorch_lightning.plugins.precision.hpu.HPUPrecisionPlugin`. +BF16 and FP32 operator lists by passing them as parameter to :class:`~lightning.pytorch.plugins.precision.hpu.HPUPrecisionPlugin`. The below snippet shows an example model using MNIST with a single Habana Gaudi device and making use of HMP by overriding the default parameters. This enables advanced users to provide their own BF16 and FP32 operator list instead of using the HMP defaults. .. code-block:: python - import pytorch_lightning as pl - from pytorch_lightning.plugins import HPUPrecisionPlugin + import lightning.pytorch as pl + from lightning.pytorch.plugins import HPUPrecisionPlugin # Initialize a trainer with HPU accelerator for HPU strategy for single device, # with mixed precision using overidden HMP settings @@ -72,7 +72,7 @@ For more details, please refer to `PyTorch Mixed Precision Training on Gaudi `_ function as a string to select which weights to prune (`random_unstructured `_, `RandomStructured `_, etc) or implement your own by subclassing `BasePruningMethod `_. .. code-block:: python - from pytorch_lightning.callbacks import ModelPruning + from lightning.pytorch.callbacks import ModelPruning # set the amount to be the fraction of parameters to prune trainer = Trainer(callbacks=[ModelPruning("l1_unstructured", amount=0.5)]) diff --git a/docs/source-pytorch/advanced/strategy_registry.rst b/docs/source-pytorch/advanced/strategy_registry.rst index 914db517eb121..af56da2aee4e4 100644 --- a/docs/source-pytorch/advanced/strategy_registry.rst +++ b/docs/source-pytorch/advanced/strategy_registry.rst @@ -25,7 +25,7 @@ Additionally, you can pass your custom registered training strategies to the ``s .. code-block:: python - from pytorch_lightning.strategies import DDPStrategy, StrategyRegistry, CheckpointIO + from lightning.pytorch.strategies import DDPStrategy, StrategyRegistry, CheckpointIO class CustomCheckpointIO(CheckpointIO): diff --git a/docs/source-pytorch/advanced/third_party/colossalai.rst b/docs/source-pytorch/advanced/third_party/colossalai.rst index 5223bdc0ad60d..4132e142eaa47 100644 --- a/docs/source-pytorch/advanced/third_party/colossalai.rst +++ b/docs/source-pytorch/advanced/third_party/colossalai.rst @@ -55,7 +55,7 @@ See a full example of a benchmark with the a `GPT-2 model `__ by the PyTorch team. -.. seealso:: The :class:`~pytorch_lightning.callbacks.StochasticWeightAveraging` callback +.. seealso:: The :class:`~lightning.pytorch.callbacks.StochasticWeightAveraging` callback .. testcode:: @@ -78,11 +78,11 @@ Auto-scaling of batch size can be enabled to find the largest batch size that fi memory. Large batch size often yields a better estimation of the gradients, but may also result in longer training time. Inspired by https://github.com/BlackHC/toma. -.. seealso:: :class:`~pytorch_lightning.tuner.tuning.Tuner` +.. seealso:: :class:`~lightning.pytorch.tuner.tuning.Tuner` .. code-block:: python - from pytorch_lightning.tuner import Tuner + from lightning.pytorch.tuner import Tuner # Create a tuner for the trainer trainer = Trainer(...) @@ -178,13 +178,13 @@ The algorithm in short works by: Customizing Batch Size Finder ============================= -1. You can also customize the :class:`~pytorch_lightning.callbacks.batch_size_finder.BatchSizeFinder` callback to run +1. You can also customize the :class:`~lightning.pytorch.callbacks.batch_size_finder.BatchSizeFinder` callback to run at different epochs. This feature is useful while fine-tuning models since you can't always use the same batch size after unfreezing the backbone. .. code-block:: python - from pytorch_lightning.callbacks import BatchSizeFinder + from lightning.pytorch.callbacks import BatchSizeFinder class FineTuneBatchSizeFinder(BatchSizeFinder): @@ -208,7 +208,7 @@ Customizing Batch Size Finder .. code-block:: python - from pytorch_lightning.callbacks import BatchSizeFinder + from lightning.pytorch.callbacks import BatchSizeFinder class EvalBatchSizeFinder(BatchSizeFinder): @@ -263,7 +263,7 @@ Using Lightning's built-in LR finder To enable the learning rate finder, your :doc:`lightning module <../common/lightning_module>` needs to have a ``learning_rate`` or ``lr`` attribute (or as a field in your ``hparams`` i.e. -``hparams.learning_rate`` or ``hparams.lr``). Then, create the :class:`~pytorch_lightning.tuner.tuning.Tuner` via ``tuner = Tuner(trainer)`` +``hparams.learning_rate`` or ``hparams.lr``). Then, create the :class:`~lightning.pytorch.tuner.tuning.Tuner` via ``tuner = Tuner(trainer)`` and call ``tuner.lr_find(model)`` to run the LR finder. The suggested ``learning_rate`` will be written to the console and will be automatically set to your :doc:`lightning module <../common/lightning_module>`, which can be accessed @@ -271,7 +271,7 @@ via ``self.learning_rate`` or ``self.lr``. .. code-block:: python - from pytorch_lightning.tuner import Tuner + from lightning.pytorch.tuner import Tuner class LitModel(LightningModule): @@ -344,11 +344,11 @@ This is the point returned py ``lr_finder.suggestion()``. Customizing Learning Rate Finder ================================ -You can also customize the :class:`~pytorch_lightning.callbacks.lr_finder.LearningRateFinder` callback to run at different epochs. This feature is useful while fine-tuning models. +You can also customize the :class:`~lightning.pytorch.callbacks.lr_finder.LearningRateFinder` callback to run at different epochs. This feature is useful while fine-tuning models. .. code-block:: python - from pytorch_lightning.callbacks import LearningRateFinder + from lightning.pytorch.callbacks import LearningRateFinder class FineTuneLearningRateFinder(LearningRateFinder): @@ -388,7 +388,7 @@ Refer to :doc:`Advanced GPU Optimized Training <../advanced/model_parallel>` for Sharing Datasets Across Process Boundaries ****************************************** -The :class:`~pytorch_lightning.core.datamodule.LightningDataModule` class provides an organized way to decouple data loading from training logic, with :meth:`~pytorch_lightning.core.hooks.DataHooks.prepare_data` being used for downloading and pre-processing the dataset on a single process, and :meth:`~pytorch_lightning.core.hooks.DataHooks.setup` loading the pre-processed data for each process individually: +The :class:`~lightning.pytorch.core.datamodule.LightningDataModule` class provides an organized way to decouple data loading from training logic, with :meth:`~lightning.pytorch.core.hooks.DataHooks.prepare_data` being used for downloading and pre-processing the dataset on a single process, and :meth:`~lightning.pytorch.core.hooks.DataHooks.setup` loading the pre-processed data for each process individually: .. code-block:: python @@ -406,7 +406,7 @@ However, for in-memory datasets, that means that each process will hold a (redun For example, when training Graph Neural Networks, a common strategy is to load the entire graph into CPU memory for fast access to the entire graph structure and its features, and to then perform neighbor sampling to obtain mini-batches that fit onto the GPU. A simple way to prevent redundant dataset replicas is to rely on :obj:`torch.multiprocessing` to share the `data automatically between spawned processes via shared memory `_. -For this, all data pre-loading should be done on the main process inside :meth:`DataModule.__init__`. As a result, all tensor-data will get automatically shared when using the :class:`~pytorch_lightning.plugins.strategies.ddp_spawn.DDPSpawnStrategy` strategy. +For this, all data pre-loading should be done on the main process inside :meth:`DataModule.__init__`. As a result, all tensor-data will get automatically shared when using the :class:`~lightning.pytorch.plugins.strategies.ddp_spawn.DDPSpawnStrategy` strategy. .. warning:: @@ -429,4 +429,4 @@ For this, all data pre-loading should be done on the main process inside :meth:` trainer = Trainer(accelerator="gpu", devices=2, strategy="ddp_spawn") trainer.fit(model, datamodule) -See the `graph-level `_ and `node-level `_ prediction examples in PyTorch Geometric for practical use-cases. +See the `graph-level `_ and `node-level `_ prediction examples in PyTorch Geometric for practical use-cases. diff --git a/docs/source-pytorch/api_references.rst b/docs/source-pytorch/api_references.rst index 8187a74ff49fd..b8d499854f08a 100644 --- a/docs/source-pytorch/api_references.rst +++ b/docs/source-pytorch/api_references.rst @@ -3,7 +3,7 @@ accelerators ------------ -.. currentmodule:: pytorch_lightning.accelerators +.. currentmodule:: lightning.pytorch.accelerators .. autosummary:: :toctree: api @@ -20,7 +20,7 @@ accelerators callbacks --------- -.. currentmodule:: pytorch_lightning.callbacks +.. currentmodule:: lightning.pytorch.callbacks .. autosummary:: :toctree: api @@ -52,7 +52,7 @@ callbacks cli ----- -.. currentmodule:: pytorch_lightning.cli +.. currentmodule:: lightning.pytorch.cli .. autosummary:: :toctree: api @@ -66,7 +66,7 @@ cli core ---- -.. currentmodule:: pytorch_lightning.core +.. currentmodule:: lightning.pytorch.core .. autosummary:: :toctree: api @@ -86,7 +86,7 @@ core loggers ------- -.. currentmodule:: pytorch_lightning.loggers +.. currentmodule:: lightning.pytorch.loggers .. autosummary:: :toctree: api @@ -106,7 +106,7 @@ plugins precision """"""""" -.. currentmodule:: pytorch_lightning.plugins.precision +.. currentmodule:: lightning.pytorch.plugins.precision .. autosummary:: :toctree: api @@ -126,7 +126,7 @@ precision environments """""""""""" -.. currentmodule:: pytorch_lightning.plugins.environments +.. currentmodule:: lightning.pytorch.plugins.environments .. autosummary:: :toctree: api @@ -145,7 +145,7 @@ environments io "" -.. currentmodule:: pytorch_lightning.plugins.io +.. currentmodule:: lightning.pytorch.plugins.io .. autosummary:: :toctree: api @@ -162,7 +162,7 @@ io others """""" -.. currentmodule:: pytorch_lightning.plugins +.. currentmodule:: lightning.pytorch.plugins .. autosummary:: :toctree: api @@ -175,7 +175,7 @@ others profiler -------- -.. currentmodule:: pytorch_lightning.profilers +.. currentmodule:: lightning.pytorch.profilers .. autosummary:: :toctree: api @@ -192,7 +192,7 @@ profiler trainer ------- -.. currentmodule:: pytorch_lightning.trainer.trainer +.. currentmodule:: lightning.pytorch.trainer.trainer .. autosummary:: :toctree: api @@ -204,7 +204,7 @@ trainer strategies ---------- -.. currentmodule:: pytorch_lightning.strategies +.. currentmodule:: lightning.pytorch.strategies .. autosummary:: :toctree: api @@ -227,7 +227,7 @@ strategies tuner ----- -.. currentmodule:: pytorch_lightning.tuner.tuning +.. currentmodule:: lightning.pytorch.tuner.tuning .. autosummary:: :toctree: api @@ -239,7 +239,7 @@ tuner utilities --------- -.. currentmodule:: pytorch_lightning.utilities +.. currentmodule:: lightning.pytorch.utilities .. autosummary:: :toctree: api diff --git a/docs/source-pytorch/cli/lightning_cli_advanced.rst b/docs/source-pytorch/cli/lightning_cli_advanced.rst index 1534078c9c4d6..da9bec2ea4212 100644 --- a/docs/source-pytorch/cli/lightning_cli_advanced.rst +++ b/docs/source-pytorch/cli/lightning_cli_advanced.rst @@ -9,7 +9,7 @@ Configure hyperparameters from the CLI (Advanced) As a project becomes more complex, the number of configurable options becomes very large, making it inconvenient to control through individual command line arguments. To address this, CLIs implemented using -:class:`~pytorch_lightning.cli.LightningCLI` always support receiving input from configuration files. The default format +:class:`~lightning.pytorch.cli.LightningCLI` always support receiving input from configuration files. The default format used for config files is YAML. .. tip:: @@ -48,7 +48,7 @@ respective log directory a ``config.yaml`` file. These files can be used to triv python main.py fit --config lightning_logs/version_7/config.yaml -The automatic saving of the config is done by the special callback :class:`~pytorch_lightning.cli.SaveConfigCallback`. +The automatic saving of the config is done by the special callback :class:`~lightning.pytorch.cli.SaveConfigCallback`. This callback is automatically added to the ``Trainer``. To disable the save of the config, instantiate ``LightningCLI`` with ``save_config_callback=None``. @@ -107,7 +107,7 @@ which generates a config like: trainer: ... model: - class_path: pytorch_lightning.demos.boring_classes.DemoModel + class_path: lightning.pytorch.demos.boring_classes.DemoModel init_args: out_dim: 10 learning_rate: 0.02 diff --git a/docs/source-pytorch/cli/lightning_cli_advanced_2.rst b/docs/source-pytorch/cli/lightning_cli_advanced_2.rst index bd5136a1f4ca7..749ce9e49de0e 100644 --- a/docs/source-pytorch/cli/lightning_cli_advanced_2.rst +++ b/docs/source-pytorch/cli/lightning_cli_advanced_2.rst @@ -6,8 +6,8 @@ import torch from unittest import mock from typing import List - import pytorch_lightning.cli as pl_cli - from pytorch_lightning import LightningModule, LightningDataModule, Trainer, Callback + import lightning.pytorch.cli as pl_cli + from lightning.pytorch import LightningModule, LightningDataModule, Trainer, Callback class NoFitTrainer(Trainer): diff --git a/docs/source-pytorch/cli/lightning_cli_advanced_3.rst b/docs/source-pytorch/cli/lightning_cli_advanced_3.rst index b4f63289e2995..06e5d708d87f4 100644 --- a/docs/source-pytorch/cli/lightning_cli_advanced_3.rst +++ b/docs/source-pytorch/cli/lightning_cli_advanced_3.rst @@ -6,8 +6,8 @@ import torch from unittest import mock from typing import List - import pytorch_lightning.cli as pl_cli - from pytorch_lightning import LightningModule, LightningDataModule, Trainer, Callback + import lightning.pytorch.cli as pl_cli + from lightning.pytorch import LightningModule, LightningDataModule, Trainer, Callback class NoFitTrainer(Trainer): @@ -69,9 +69,9 @@ to subclass the CLI, but still, use the CLI's instantiation and argument parsing Trainer Callbacks and arguments with class type ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A very important argument of the :class:`~pytorch_lightning.trainer.trainer.Trainer` class is the ``callbacks``. In +A very important argument of the :class:`~lightning.pytorch.trainer.trainer.Trainer` class is the ``callbacks``. In contrast to simpler arguments that take numbers or strings, ``callbacks`` expects a list of instances of subclasses of -:class:`~pytorch_lightning.callbacks.Callback`. To specify this kind of argument in a config file, each callback must be +:class:`~lightning.pytorch.callbacks.Callback`. To specify this kind of argument in a config file, each callback must be given as a dictionary, including a ``class_path`` entry with an import path of the class and optionally an ``init_args`` entry with arguments to use to instantiate. Therefore, a simple configuration file that defines two callbacks is the following: @@ -80,18 +80,18 @@ following: trainer: callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: patience: 5 - - class_path: pytorch_lightning.callbacks.LearningRateMonitor + - class_path: lightning.pytorch.callbacks.LearningRateMonitor init_args: ... -Similar to the callbacks, any parameter in :class:`~pytorch_lightning.trainer.trainer.Trainer` and user extended -:class:`~pytorch_lightning.core.module.LightningModule` and -:class:`~pytorch_lightning.core.datamodule.LightningDataModule` classes that have as type hint a class, can be +Similar to the callbacks, any parameter in :class:`~lightning.pytorch.trainer.trainer.Trainer` and user extended +:class:`~lightning.pytorch.core.module.LightningModule` and +:class:`~lightning.pytorch.core.datamodule.LightningDataModule` classes that have as type hint a class, can be configured the same way using ``class_path`` and ``init_args``. If the package that defines a subclass is imported -before the :class:`~pytorch_lightning.cli.LightningCLI` class is run, the name can be used instead of the full import +before the :class:`~lightning.pytorch.cli.LightningCLI` class is run, the name can be used instead of the full import path. From command line the syntax is the following: @@ -120,7 +120,7 @@ callback appended. Here is an example: .. note:: - Serialized config files (e.g. ``--print_config`` or :class:`~pytorch_lightning.cli.SaveConfigCallback`) always have + Serialized config files (e.g. ``--print_config`` or :class:`~lightning.pytorch.cli.SaveConfigCallback`) always have the full ``class_path``, even when class name shorthand notation is used in the command line or in input config files. @@ -152,14 +152,14 @@ A possible config file could be as follows: ... trainer: callbacks: - - class_path: pytorch_lightning.callbacks.EarlyStopping + - class_path: lightning.pytorch.callbacks.EarlyStopping init_args: patience: 5 ... Only model classes that are a subclass of ``MyModelBaseClass`` would be allowed, and similarly, only subclasses of -``MyDataModuleBaseClass``. If as base classes :class:`~pytorch_lightning.core.module.LightningModule` and -:class:`~pytorch_lightning.core.datamodule.LightningDataModule` is given, then the CLI would allow any lightning module +``MyDataModuleBaseClass``. If as base classes :class:`~lightning.pytorch.core.module.LightningModule` and +:class:`~lightning.pytorch.core.datamodule.LightningDataModule` is given, then the CLI would allow any lightning module and data module. .. tip:: @@ -257,7 +257,7 @@ Multiple optimizers and schedulers By default, the CLIs support multiple optimizers and/or learning schedulers, automatically implementing ``configure_optimizers``. This behavior can be disabled by providing ``auto_configure_optimizers=False`` on -instantiation of :class:`~pytorch_lightning.cli.LightningCLI`. This would be required for example to support multiple +instantiation of :class:`~lightning.pytorch.cli.LightningCLI`. This would be required for example to support multiple optimizers, for each selecting a particular optimizer class. Similar to multiple submodules, this can be done via `dependency injection `__. Unlike the submodules, it is not possible to expect an instance of a class, because optimizers require the module's parameters to optimize, which are only @@ -308,7 +308,7 @@ that uses dependency injection for an optimizer and a learning scheduler is: .. code-block:: python - from pytorch_lightning.cli import OptimizerCallable, LRSchedulerCallable, LightningCLI + from lightning.pytorch.cli import OptimizerCallable, LRSchedulerCallable, LightningCLI class MyModel(LightningModule): @@ -338,14 +338,14 @@ OptimizerCallable = lambda p: torch.optim.SGD(p, lr=0.01)``. Run from Python ^^^^^^^^^^^^^^^ -Even though the :class:`~pytorch_lightning.cli.LightningCLI` class is designed to help in the implementation of command +Even though the :class:`~lightning.pytorch.cli.LightningCLI` class is designed to help in the implementation of command line tools, for some use cases it is desired to run directly from Python. To allow this there is the ``args`` parameter. An example could be to first implement a normal CLI script, but adding an ``args`` parameter with default ``None`` to the main function as follows: .. code:: python - from pytorch_lightning.cli import ArgsType, LightningCLI + from lightning.pytorch.cli import ArgsType, LightningCLI def cli_main(args: ArgsType = None): diff --git a/docs/source-pytorch/cli/lightning_cli_expert.rst b/docs/source-pytorch/cli/lightning_cli_expert.rst index 94c28242d380c..105b7e2a80217 100644 --- a/docs/source-pytorch/cli/lightning_cli_expert.rst +++ b/docs/source-pytorch/cli/lightning_cli_expert.rst @@ -6,8 +6,8 @@ import torch from unittest import mock from typing import List - import pytorch_lightning.cli as pl_cli - from pytorch_lightning import LightningModule, LightningDataModule, Trainer, Callback + import lightning.pytorch.cli as pl_cli + from lightning.pytorch import LightningModule, LightningDataModule, Trainer, Callback class NoFitTrainer(Trainer): @@ -62,22 +62,22 @@ Configure hyperparameters from the CLI (Expert) Customize the LightningCLI ************************** -The init parameters of the :class:`~pytorch_lightning.cli.LightningCLI` class can be used to customize some things, +The init parameters of the :class:`~lightning.pytorch.cli.LightningCLI` class can be used to customize some things, e.g., the description of the tool, enabling parsing of environment variables, and additional arguments to instantiate the trainer and configuration parser. Nevertheless, the init arguments are not enough for many use cases. For this reason, the class is designed so that it can be extended to customize different parts of the command line tool. The argument parser class used by -:class:`~pytorch_lightning.cli.LightningCLI` is :class:`~pytorch_lightning.cli.LightningArgumentParser`, which is an +:class:`~lightning.pytorch.cli.LightningCLI` is :class:`~lightning.pytorch.cli.LightningArgumentParser`, which is an extension of python's argparse, thus adding arguments can be done using the :func:`add_argument` method. In contrast to argparse, it has additional methods to add arguments. For example :func:`add_class_arguments` add all arguments from the init of a class. For more details, see the `respective documentation `_. -The :class:`~pytorch_lightning.cli.LightningCLI` class has the -:meth:`~pytorch_lightning.cli.LightningCLI.add_arguments_to_parser` method can be implemented to include more arguments. +The :class:`~lightning.pytorch.cli.LightningCLI` class has the +:meth:`~lightning.pytorch.cli.LightningCLI.add_arguments_to_parser` method can be implemented to include more arguments. After parsing, the configuration is stored in the ``config`` attribute of the class instance. The -:class:`~pytorch_lightning.cli.LightningCLI` class also has two methods that can be used to run code before and after +:class:`~lightning.pytorch.cli.LightningCLI` class also has two methods that can be used to run code before and after the trainer runs: ``before_`` and ``after_``. A realistic example of this would be to send an email before and after the execution. The code for the ``fit`` subcommand would be something like this: @@ -102,7 +102,7 @@ trainer class can be found in ``self.config['fit']['trainer']``. .. tip:: - Have a look at the :class:`~pytorch_lightning.cli.LightningCLI` class API reference to learn about other methods + Have a look at the :class:`~lightning.pytorch.cli.LightningCLI` class API reference to learn about other methods that can be extended to customize a CLI. ---- @@ -118,7 +118,7 @@ implemented as follows: .. testcode:: - from pytorch_lightning.callbacks import EarlyStopping + from lightning.pytorch.callbacks import EarlyStopping class MyLightningCLI(LightningCLI): @@ -211,7 +211,7 @@ A more compact version that avoids writing a dictionary would be: **************** Argument linking **************** -Another case in which it might be desired to extend :class:`~pytorch_lightning.cli.LightningCLI` is that the model and +Another case in which it might be desired to extend :class:`~lightning.pytorch.cli.LightningCLI` is that the model and data module depends on a common parameter. For example, in some cases, both classes require to know the ``batch_size``. It is a burden and error-prone to give the same value twice in a config file. To avoid this, the parser can be configured so that a value is only given once and then propagated accordingly. With a tool implemented like the one diff --git a/docs/source-pytorch/cli/lightning_cli_faq.rst b/docs/source-pytorch/cli/lightning_cli_faq.rst index 29a679a85871f..6afaacf67863c 100644 --- a/docs/source-pytorch/cli/lightning_cli_faq.rst +++ b/docs/source-pytorch/cli/lightning_cli_faq.rst @@ -78,9 +78,9 @@ use a subcommand as follows: What is the relation between LightningCLI and argparse? ******************************************************* -:class:`~pytorch_lightning.cli.LightningCLI` makes use of `jsonargparse `__ +:class:`~lightning.pytorch.cli.LightningCLI` makes use of `jsonargparse `__ which is an extension of `argparse `__. Due to this, -:class:`~pytorch_lightning.cli.LightningCLI` follows the same arguments style as many POSIX command line tools. Long +:class:`~lightning.pytorch.cli.LightningCLI` follows the same arguments style as many POSIX command line tools. Long options are prefixed with two dashes and its corresponding values are separated by space or an equal sign, as ``--option value`` or ``--option=value``. Command line options are parsed from left to right, therefore if a setting appears multiple times, the value most to the right will override the previous ones. @@ -91,7 +91,7 @@ multiple times, the value most to the right will override the previous ones. What is the override order of LightningCLI? ******************************************* -The final configuration of CLIs implemented with :class:`~pytorch_lightning.cli.LightningCLI` can depend on default +The final configuration of CLIs implemented with :class:`~lightning.pytorch.cli.LightningCLI` can depend on default config files (if defined), environment variables (if enabled) and command line arguments. The override order between these is the following: diff --git a/docs/source-pytorch/cli/lightning_cli_intermediate.rst b/docs/source-pytorch/cli/lightning_cli_intermediate.rst index 34d54fb56ae97..dc81b4191e6f8 100644 --- a/docs/source-pytorch/cli/lightning_cli_intermediate.rst +++ b/docs/source-pytorch/cli/lightning_cli_intermediate.rst @@ -13,7 +13,7 @@ Configure hyperparameters from the CLI (Intermediate) LightningCLI requirements ************************* -The :class:`~pytorch_lightning.cli.LightningCLI` class is designed to significantly ease the implementation of CLIs. To +The :class:`~lightning.pytorch.cli.LightningCLI` class is designed to significantly ease the implementation of CLIs. To use this class, an additional Python requirement is necessary than the minimal installation of Lightning provides. To enable, either install all extras: @@ -32,16 +32,16 @@ or if only interested in ``LightningCLI``, just install jsonargparse: ****************** Implementing a CLI ****************** -Implementing a CLI is as simple as instantiating a :class:`~pytorch_lightning.cli.LightningCLI` object giving as +Implementing a CLI is as simple as instantiating a :class:`~lightning.pytorch.cli.LightningCLI` object giving as arguments classes for a ``LightningModule`` and optionally a ``LightningDataModule``: .. code:: python # main.py - from pytorch_lightning.cli import LightningCLI + from lightning.pytorch.cli import LightningCLI # simple demo classes for your convenience - from pytorch_lightning.demos.boring_classes import DemoModel, BoringDataModule + from lightning.pytorch.demos.boring_classes import DemoModel, BoringDataModule def cli_main(): @@ -131,7 +131,7 @@ View all available options with the ``--help`` argument given after the subcomma (type: int, default: 10) --model.learning_rate LEARNING_RATE (type: float, default: 0.02) - : + : --data CONFIG Path to a configuration file. --data.data_dir DATA_DIR (type: str, default: ./) diff --git a/docs/source-pytorch/cli/lightning_cli_intermediate_2.rst b/docs/source-pytorch/cli/lightning_cli_intermediate_2.rst index c60c339607281..d484928a0059e 100644 --- a/docs/source-pytorch/cli/lightning_cli_intermediate_2.rst +++ b/docs/source-pytorch/cli/lightning_cli_intermediate_2.rst @@ -56,8 +56,8 @@ To support multiple models, when instantiating ``LightningCLI`` omit the ``model .. code:: python # main.py - from pytorch_lightning.cli import LightningCLI - from pytorch_lightning.demos.boring_classes import DemoModel + from lightning.pytorch.cli import LightningCLI + from lightning.pytorch.demos.boring_classes import DemoModel class Model1(DemoModel): @@ -100,8 +100,8 @@ To support multiple data modules, when instantiating ``LightningCLI`` omit the ` # main.py import torch - from pytorch_lightning.cli import LightningCLI - from pytorch_lightning.demos.boring_classes import BoringDataModule + from lightning.pytorch.cli import LightningCLI + from lightning.pytorch.demos.boring_classes import BoringDataModule class FakeDataset1(BoringDataModule): @@ -156,8 +156,8 @@ Furthermore, any custom subclass of :class:`torch.optim.Optimizer` can be used a # main.py import torch - from pytorch_lightning.cli import LightningCLI - from pytorch_lightning.demos.boring_classes import DemoModel, BoringDataModule + from lightning.pytorch.cli import LightningCLI + from lightning.pytorch.demos.boring_classes import DemoModel, BoringDataModule class LitAdam(torch.optim.Adam): @@ -207,8 +207,8 @@ Furthermore, any custom subclass of ``torch.optim.lr_scheduler.LRScheduler`` can # main.py import torch - from pytorch_lightning.cli import LightningCLI - from pytorch_lightning.demos.boring_classes import DemoModel, BoringDataModule + from lightning.pytorch.cli import LightningCLI + from lightning.pytorch.demos.boring_classes import DemoModel, BoringDataModule class LitLRScheduler(torch.optim.lr_scheduler.CosineAnnealingLR): @@ -237,7 +237,7 @@ is run. To select classes from any package by using only the class name, import .. code:: python - from pytorch_lightning.cli import LightningCLI + from lightning.pytorch.cli import LightningCLI import my_code.models # noqa: F401 import my_code.data_modules # noqa: F401 import my_code.optimizers # noqa: F401 diff --git a/docs/source-pytorch/clouds/cluster_advanced.rst b/docs/source-pytorch/clouds/cluster_advanced.rst index 8b812536e4744..526e6c7cc8783 100644 --- a/docs/source-pytorch/clouds/cluster_advanced.rst +++ b/docs/source-pytorch/clouds/cluster_advanced.rst @@ -116,17 +116,17 @@ You can change this signal if your environment requires the use of a different o #SBATCH --signal=SIGHUP@90 -Then, when you make your trainer, pass the `requeue_signal` option to the :class:`~pytorch_lightning.plugins.environments.slurm_environment.SLURMEnvironment` plugin: +Then, when you make your trainer, pass the `requeue_signal` option to the :class:`~lightning.pytorch.plugins.environments.slurm_environment.SLURMEnvironment` plugin: .. code-block:: python trainer = Trainer(plugins=[SLURMEnvironment(requeue_signal=signal.SIGHUP)]) -If auto-resubmit is not desired, it can be turned off in the :class:`~pytorch_lightning.plugins.environments.slurm_environment.SLURMEnvironment` plugin: +If auto-resubmit is not desired, it can be turned off in the :class:`~lightning.pytorch.plugins.environments.slurm_environment.SLURMEnvironment` plugin: .. code-block:: python - from pytorch_lightning.plugins.environments import SLURMEnvironment + from lightning.pytorch.plugins.environments import SLURMEnvironment trainer = Trainer(plugins=[SLURMEnvironment(auto_requeue=False)]) diff --git a/docs/source-pytorch/clouds/cluster_expert.rst b/docs/source-pytorch/clouds/cluster_expert.rst index a66a876dc208f..4306b3f1d866e 100644 --- a/docs/source-pytorch/clouds/cluster_expert.rst +++ b/docs/source-pytorch/clouds/cluster_expert.rst @@ -15,12 +15,12 @@ Integrate your own cluster Lightning provides an interface for providing your own definition of a cluster environment. It mainly consists of parsing the right environment variables to access information such as world size, global and local rank (process id), and node rank (node id). Here is an example of a custom -:class:`~pytorch_lightning.plugins.environments.cluster_environment.ClusterEnvironment`: +:class:`~lightning.pytorch.plugins.environments.cluster_environment.ClusterEnvironment`: .. code-block:: python import os - from pytorch_lightning.plugins.environments import ClusterEnvironment + from lightning.pytorch.plugins.environments import ClusterEnvironment class MyClusterEnvironment(ClusterEnvironment): diff --git a/docs/source-pytorch/common/checkpointing.rst b/docs/source-pytorch/common/checkpointing.rst index c1552fa153263..fdb4e85df6855 100644 --- a/docs/source-pytorch/common/checkpointing.rst +++ b/docs/source-pytorch/common/checkpointing.rst @@ -69,7 +69,7 @@ Checkpointing :header: ModelCheckpoint API :description: Dig into the ModelCheckpoint API :col_css: col-md-4 - :button_link: ../api/pytorch_lightning.callbacks.ModelCheckpoint.html + :button_link: ../api/lightning.pytorch.callbacks.ModelCheckpoint.html :height: 150 .. raw:: html diff --git a/docs/source-pytorch/common/checkpointing_advanced.rst b/docs/source-pytorch/common/checkpointing_advanced.rst index 3ef5bf6b778f1..80b10e1618308 100644 --- a/docs/source-pytorch/common/checkpointing_advanced.rst +++ b/docs/source-pytorch/common/checkpointing_advanced.rst @@ -52,7 +52,7 @@ Checkpoints can also save the state of :doc:`datamodules <../extensions/datamodu **************************** Modify a checkpoint anywhere **************************** -When you need to change the components of a checkpoint before saving or loading, use the :meth:`~pytorch_lightning.core.hooks.CheckpointHooks.on_save_checkpoint` and :meth:`~pytorch_lightning.core.hooks.CheckpointHooks.on_load_checkpoint` of your ``LightningModule``. +When you need to change the components of a checkpoint before saving or loading, use the :meth:`~lightning.pytorch.core.hooks.CheckpointHooks.on_save_checkpoint` and :meth:`~lightning.pytorch.core.hooks.CheckpointHooks.on_load_checkpoint` of your ``LightningModule``. .. code:: python @@ -63,7 +63,7 @@ When you need to change the components of a checkpoint before saving or loading, def on_load_checkpoint(self, checkpoint): my_cool_pickable_object = checkpoint["something_cool_i_want_to_save"] -Use the above approach when you need to couple this behavior to your LightningModule for reproducibility reasons. Otherwise, Callbacks also have the :meth:`~pytorch_lightning.callbacks.callback.Callback.on_save_checkpoint` and :meth:`~pytorch_lightning.callbacks.callback.Callback.on_load_checkpoint` which you should use instead: +Use the above approach when you need to couple this behavior to your LightningModule for reproducibility reasons. Otherwise, Callbacks also have the :meth:`~lightning.pytorch.callbacks.callback.Callback.on_save_checkpoint` and :meth:`~lightning.pytorch.callbacks.callback.Callback.on_load_checkpoint` which you should use instead: .. code:: python diff --git a/docs/source-pytorch/common/checkpointing_expert.rst b/docs/source-pytorch/common/checkpointing_expert.rst index 20511d3a3c97c..f1f92bdd0c548 100644 --- a/docs/source-pytorch/common/checkpointing_expert.rst +++ b/docs/source-pytorch/common/checkpointing_expert.rst @@ -23,8 +23,8 @@ Customize Checkpointing Lightning supports modifying the checkpointing save/load functionality through the ``CheckpointIO``. This encapsulates the save/load logic -that is managed by the ``Strategy``. ``CheckpointIO`` is different from :meth:`~pytorch_lightning.core.hooks.CheckpointHooks.on_save_checkpoint` -and :meth:`~pytorch_lightning.core.hooks.CheckpointHooks.on_load_checkpoint` methods as it determines how the checkpoint is saved/loaded to storage rather than +that is managed by the ``Strategy``. ``CheckpointIO`` is different from :meth:`~lightning.pytorch.core.hooks.CheckpointHooks.on_save_checkpoint` +and :meth:`~lightning.pytorch.core.hooks.CheckpointHooks.on_load_checkpoint` methods as it determines how the checkpoint is saved/loaded to storage rather than what's saved in the checkpoint. @@ -40,14 +40,14 @@ Built-in Checkpoint IO Plugins * - Plugin - Description - * - :class:`~pytorch_lightning.plugins.io.TorchCheckpointIO` + * - :class:`~lightning.pytorch.plugins.io.TorchCheckpointIO` - CheckpointIO that utilizes :func:`torch.save` and :func:`torch.load` to save and load checkpoints respectively, common for most use cases. - * - :class:`~pytorch_lightning.plugins.io.XLACheckpointIO` + * - :class:`~lightning.pytorch.plugins.io.XLACheckpointIO` - CheckpointIO that utilizes :func:`xm.save` to save checkpoints for TPU training strategies. - * - :class:`~pytorch_lightning.plugins.io.HPUCheckpointIO` + * - :class:`~lightning.pytorch.plugins.io.HPUCheckpointIO` - CheckpointIO to save checkpoints for HPU training strategies. - * - :class:`~pytorch_lightning.plugins.io.AsyncCheckpointIO` + * - :class:`~lightning.pytorch.plugins.io.AsyncCheckpointIO` - ``AsyncCheckpointIO`` enables saving the checkpoints asynchronously in a thread. @@ -59,10 +59,10 @@ Custom Checkpoint IO Plugin .. code-block:: python - from pytorch_lightning import Trainer - from pytorch_lightning.callbacks import ModelCheckpoint - from pytorch_lightning.plugins import CheckpointIO - from pytorch_lightning.strategies import SingleDeviceStrategy + from lightning.pytorch import Trainer + from lightning.pytorch.callbacks import ModelCheckpoint + from lightning.pytorch.plugins import CheckpointIO + from lightning.pytorch.strategies import SingleDeviceStrategy class CustomCheckpointIO(CheckpointIO): @@ -109,11 +109,11 @@ Asynchronous Checkpointing This is currently an experimental plugin/feature and API changes are to be expected. To enable saving the checkpoints asynchronously without blocking your training, you can configure -:class:`~pytorch_lightning.plugins.io.async_plugin.AsyncCheckpointIO` plugin to ``Trainer``. +:class:`~lightning.pytorch.plugins.io.async_plugin.AsyncCheckpointIO` plugin to ``Trainer``. .. code-block:: python - from pytorch_lightning.plugins.io import AsyncCheckpointIO + from lightning.pytorch.plugins.io import AsyncCheckpointIO async_ckpt_io = AsyncCheckpointIO() @@ -126,7 +126,7 @@ But if you want the plugin to use your own custom base ``CheckpointIO`` and want .. code-block:: python - from pytorch_lightning.plugins.io import AsyncCheckpointIO + from lightning.pytorch.plugins.io import AsyncCheckpointIO base_ckpt_io = MyCustomCheckpointIO() async_ckpt_io = AsyncCheckpointIO(checkpoint_io=base_ckpt_io) diff --git a/docs/source-pytorch/common/checkpointing_intermediate.rst b/docs/source-pytorch/common/checkpointing_intermediate.rst index e20ad6884618c..5c4e483e36f7e 100644 --- a/docs/source-pytorch/common/checkpointing_intermediate.rst +++ b/docs/source-pytorch/common/checkpointing_intermediate.rst @@ -12,11 +12,11 @@ Customize checkpointing behavior (intermediate) ***************************** Modify checkpointing behavior ***************************** -For fine-grained control over checkpointing behavior, use the :class:`~pytorch_lightning.callbacks.ModelCheckpoint` object +For fine-grained control over checkpointing behavior, use the :class:`~lightning.pytorch.callbacks.ModelCheckpoint` object .. code-block:: python - from pytorch_lightning.callbacks import ModelCheckpoint + from lightning.pytorch.callbacks import ModelCheckpoint checkpoint_callback = ModelCheckpoint(dirpath="my/path/", save_top_k=2, monitor="val_loss") trainer = Trainer(callbacks=[checkpoint_callback]) @@ -40,7 +40,7 @@ Any value that has been logged via *self.log* in the LightningModule can be moni ***************************** Save checkpoints by condition ***************************** -To save checkpoints based on a (*when/which/what/where*) condition (for example *when* the validation_loss is lower) modify the :class:`~pytorch_lightning.callbacks.ModelCheckpoint` properties. +To save checkpoints based on a (*when/which/what/where*) condition (for example *when* the validation_loss is lower) modify the :class:`~lightning.pytorch.callbacks.ModelCheckpoint` properties. When ==== @@ -61,7 +61,7 @@ Which .. testcode:: - from pytorch_lightning.callbacks import ModelCheckpoint + from lightning.pytorch.callbacks import ModelCheckpoint # saves top-K checkpoints based on "val_loss" metric @@ -89,7 +89,7 @@ Which .. testcode:: - from pytorch_lightning.callbacks import ModelCheckpoint + from lightning.pytorch.callbacks import ModelCheckpoint class LitAutoEncoder(LightningModule): @@ -120,13 +120,13 @@ What Where ===== -- By default, the ``ModelCheckpoint`` will save files into the ``Trainer.log_dir``. It gives you the ability to specify the ``dirpath`` and ``filename`` for your checkpoints. Filename can also be dynamic so you can inject the metrics that are being logged using :meth:`~pytorch_lightning.core.module.LightningModule.log`. +- By default, the ``ModelCheckpoint`` will save files into the ``Trainer.log_dir``. It gives you the ability to specify the ``dirpath`` and ``filename`` for your checkpoints. Filename can also be dynamic so you can inject the metrics that are being logged using :meth:`~lightning.pytorch.core.module.LightningModule.log`. | .. testcode:: - from pytorch_lightning.callbacks import ModelCheckpoint + from lightning.pytorch.callbacks import ModelCheckpoint # saves a file like: my/path/sample-mnist-epoch=02-val_loss=0.32.ckpt @@ -137,7 +137,7 @@ Where | -The :class:`~pytorch_lightning.callbacks.ModelCheckpoint` callback is very robust and should cover 99% of the use-cases. If you find a use-case that is not configured yet, feel free to open an issue with a feature request on GitHub +The :class:`~lightning.pytorch.callbacks.ModelCheckpoint` callback is very robust and should cover 99% of the use-cases. If you find a use-case that is not configured yet, feel free to open an issue with a feature request on GitHub and the Lightning Team will be happy to integrate/help integrate it. ---- @@ -146,8 +146,8 @@ and the Lightning Team will be happy to integrate/help integrate it. Save checkpoints manually ************************* -You can manually save checkpoints and restore your model from the checkpointed state using :meth:`~pytorch_lightning.trainer.trainer.Trainer.save_checkpoint` -and :meth:`~pytorch_lightning.core.saving.ModelIO.load_from_checkpoint`. +You can manually save checkpoints and restore your model from the checkpointed state using :meth:`~lightning.pytorch.trainer.trainer.Trainer.save_checkpoint` +and :meth:`~lightning.pytorch.core.saving.ModelIO.load_from_checkpoint`. .. code-block:: python @@ -170,6 +170,6 @@ In distributed training cases where a model is running across many machines, Lig # Saves only on the main process trainer.save_checkpoint("example.ckpt") -Not using :meth:`~pytorch_lightning.trainer.trainer.Trainer.save_checkpoint` can lead to unexpected behavior and potential deadlock. Using other saving functions will result in all devices attempting to save the checkpoint. As a result, we highly recommend using the Trainer's save functionality. -If using custom saving functions cannot be avoided, we recommend using the :func:`~pytorch_lightning.utilities.rank_zero.rank_zero_only` decorator to ensure saving occurs only on the main process. Note that this will only work if all ranks hold the exact same state and won't work when using +Not using :meth:`~lightning.pytorch.trainer.trainer.Trainer.save_checkpoint` can lead to unexpected behavior and potential deadlock. Using other saving functions will result in all devices attempting to save the checkpoint. As a result, we highly recommend using the Trainer's save functionality. +If using custom saving functions cannot be avoided, we recommend using the :func:`~lightning.pytorch.utilities.rank_zero.rank_zero_only` decorator to ensure saving occurs only on the main process. Note that this will only work if all ranks hold the exact same state and won't work when using model parallel distributed strategies such as deepspeed or sharded training. diff --git a/docs/source-pytorch/common/checkpointing_migration.rst b/docs/source-pytorch/common/checkpointing_migration.rst index d04b24f4c60b9..5c536cc739da0 100644 --- a/docs/source-pytorch/common/checkpointing_migration.rst +++ b/docs/source-pytorch/common/checkpointing_migration.rst @@ -41,11 +41,11 @@ You can upgrade checkpoint files permanently with the following command .. code-block:: - python -m pytorch_lightning.utilities.upgrade_checkpoint path/to/model.ckpt + python -m lightning.pytorch.utilities.upgrade_checkpoint path/to/model.ckpt or a folder with multiple files: .. code-block:: - python -m pytorch_lightning.utilities.upgrade_checkpoint /path/to/checkpoints/folder + python -m lightning.pytorch.utilities.upgrade_checkpoint /path/to/checkpoints/folder diff --git a/docs/source-pytorch/common/child_modules.rst b/docs/source-pytorch/common/child_modules.rst index 94d1c524204ff..ab6e395786f23 100644 --- a/docs/source-pytorch/common/child_modules.rst +++ b/docs/source-pytorch/common/child_modules.rst @@ -61,7 +61,7 @@ and we can train this using the ``Trainer``: trainer = Trainer() trainer.fit(lightning_module, train_dataloader, val_dataloader) -And remember that the forward method should define the practical use of a :class:`~pytorch_lightning.core.module.LightningModule`. +And remember that the forward method should define the practical use of a :class:`~lightning.pytorch.core.module.LightningModule`. In this case, we want to use the ``LitAutoEncoder`` to extract image representations: .. code-block:: python diff --git a/docs/source-pytorch/common/console_logs.rst b/docs/source-pytorch/common/console_logs.rst index 6761432378c2b..210f14b7a0fe0 100644 --- a/docs/source-pytorch/common/console_logs.rst +++ b/docs/source-pytorch/common/console_logs.rst @@ -17,10 +17,10 @@ or redirect output for certain modules to log files: import logging # configure logging at the root level of Lightning - logging.getLogger("pytorch_lightning").setLevel(logging.ERROR) + logging.getLogger("lightning.pytorch").setLevel(logging.ERROR) # configure logging on module level, redirect to file - logger = logging.getLogger("pytorch_lightning.core") + logger = logging.getLogger("lightning.pytorch.core") logger.addHandler(logging.FileHandler("core.log")) Read more about custom Python logging `here `_. diff --git a/docs/source-pytorch/common/early_stopping.rst b/docs/source-pytorch/common/early_stopping.rst index d63df50298114..9e9358d1fc34d 100644 --- a/docs/source-pytorch/common/early_stopping.rst +++ b/docs/source-pytorch/common/early_stopping.rst @@ -1,6 +1,6 @@ .. testsetup:: * - from pytorch_lightning.callbacks.early_stopping import EarlyStopping + from lightning.pytorch.callbacks.early_stopping import EarlyStopping .. _early_stopping: @@ -20,7 +20,7 @@ Early Stopping Stopping an Epoch Early *********************** -You can stop and skip the rest of the current epoch early by overriding :meth:`~pytorch_lightning.core.hooks.ModelHooks.on_train_batch_start` to return ``-1`` when some condition is met. +You can stop and skip the rest of the current epoch early by overriding :meth:`~lightning.pytorch.core.hooks.ModelHooks.on_train_batch_start` to return ``-1`` when some condition is met. If you do this repeatedly, for every epoch you had originally requested, then this will stop your entire training. @@ -29,19 +29,19 @@ If you do this repeatedly, for every epoch you had originally requested, then th EarlyStopping Callback ********************** -The :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback can be used to monitor a metric and stop the training when no improvement is observed. +The :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback can be used to monitor a metric and stop the training when no improvement is observed. To enable it: -- Import :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback. -- Log the metric you want to monitor using :meth:`~pytorch_lightning.core.module.LightningModule.log` method. +- Import :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback. +- Log the metric you want to monitor using :meth:`~lightning.pytorch.core.module.LightningModule.log` method. - Init the callback, and set ``monitor`` to the logged metric of your choice. - Set the ``mode`` based on the metric needs to be monitored. -- Pass the :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback to the :class:`~pytorch_lightning.trainer.trainer.Trainer` callbacks flag. +- Pass the :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback to the :class:`~lightning.pytorch.trainer.trainer.Trainer` callbacks flag. .. code-block:: python - from pytorch_lightning.callbacks.early_stopping import EarlyStopping + from lightning.pytorch.callbacks.early_stopping import EarlyStopping class LitModel(LightningModule): @@ -73,7 +73,7 @@ Additional parameters that stop training at extreme points: training-specific hooks on epoch-level. -In case you need early stopping in a different part of training, subclass :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` +In case you need early stopping in a different part of training, subclass :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` and change where it is called: .. testcode:: @@ -88,11 +88,11 @@ and change where it is called: self._run_early_stopping_check(trainer) .. note:: - The :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback runs + The :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback runs at the end of every validation epoch by default. However, the frequency of validation - can be modified by setting various parameters in the :class:`~pytorch_lightning.trainer.trainer.Trainer`, - for example :paramref:`~pytorch_lightning.trainer.trainer.Trainer.check_val_every_n_epoch` - and :paramref:`~pytorch_lightning.trainer.trainer.Trainer.val_check_interval`. + can be modified by setting various parameters in the :class:`~lightning.pytorch.trainer.trainer.Trainer`, + for example :paramref:`~lightning.pytorch.trainer.trainer.Trainer.check_val_every_n_epoch` + and :paramref:`~lightning.pytorch.trainer.trainer.Trainer.val_check_interval`. It must be noted that the ``patience`` parameter counts the number of validation checks with no improvement, and not the number of training epochs. Therefore, with parameters ``check_val_every_n_epoch=10`` and ``patience=3``, the trainer diff --git a/docs/source-pytorch/common/evaluation_intermediate.rst b/docs/source-pytorch/common/evaluation_intermediate.rst index 1366e6f940852..91f018cb8ea0e 100644 --- a/docs/source-pytorch/common/evaluation_intermediate.rst +++ b/docs/source-pytorch/common/evaluation_intermediate.rst @@ -22,12 +22,12 @@ Testing ******* Lightning allows the user to test their models with any compatible test dataloaders. This can be done before/after training -and is completely agnostic to :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` call. The logic used here is defined under -:meth:`~pytorch_lightning.core.module.LightningModule.test_step`. +and is completely agnostic to :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` call. The logic used here is defined under +:meth:`~lightning.pytorch.core.module.LightningModule.test_step`. Testing is performed using the ``Trainer`` object's ``.test()`` method. -.. automethod:: pytorch_lightning.trainer.Trainer.test +.. automethod:: lightning.pytorch.trainer.Trainer.test :noindex: @@ -102,7 +102,7 @@ running the test set (ie: 16-bit, dp, ddp, etc...) Test with Additional DataLoaders ================================ -You can still run inference on a test dataset even if the :meth:`~pytorch_lightning.core.hooks.DataHooks.test_dataloader` method hasn't been +You can still run inference on a test dataset even if the :meth:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader` method hasn't been defined within your :doc:`lightning module <../common/lightning_module>` instance. This would be the case when your test data is not available at the time your model was declared. @@ -141,13 +141,13 @@ Validation ********** Lightning allows the user to validate their models with any compatible ``val dataloaders``. This can be done before/after training. -The logic associated to the validation is defined within the :meth:`~pytorch_lightning.core.module.LightningModule.validation_step`. +The logic associated to the validation is defined within the :meth:`~lightning.pytorch.core.module.LightningModule.validation_step`. -Apart from this ``.validate`` has same API as ``.test``, but would rely respectively on :meth:`~pytorch_lightning.core.module.LightningModule.validation_step` and :meth:`~pytorch_lightning.core.module.LightningModule.test_step`. +Apart from this ``.validate`` has same API as ``.test``, but would rely respectively on :meth:`~lightning.pytorch.core.module.LightningModule.validation_step` and :meth:`~lightning.pytorch.core.module.LightningModule.test_step`. .. note:: ``.validate`` method uses the same validation logic being used under validation happening within - :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` call. + :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` call. .. warning:: @@ -156,5 +156,5 @@ Apart from this ``.validate`` has same API as ``.test``, but would rely respecti make sure all devices have same batch size in case of uneven inputs. This is helpful to make sure benchmarking for research papers is done the right way. -.. automethod:: pytorch_lightning.trainer.Trainer.validate +.. automethod:: lightning.pytorch.trainer.Trainer.validate :noindex: diff --git a/docs/source-pytorch/common/gradient_accumulation.rst b/docs/source-pytorch/common/gradient_accumulation.rst index cf3e2bb2f5da4..cc74b2c84500b 100644 --- a/docs/source-pytorch/common/gradient_accumulation.rst +++ b/docs/source-pytorch/common/gradient_accumulation.rst @@ -9,7 +9,7 @@ effective batch size is increased but there is no memory overhead. step, the effective batch size on each device will remain ``N*K`` but right before the ``optimizer.step()``, the gradient sync will make the effective batch size as ``P*N*K``. For DP, since the batch is split across devices, the final effective batch size will be ``N*K``. -.. seealso:: :class:`~pytorch_lightning.trainer.trainer.Trainer` +.. seealso:: :class:`~lightning.pytorch.trainer.trainer.Trainer` .. testcode:: @@ -19,12 +19,12 @@ effective batch size is increased but there is no memory overhead. # Accumulate gradients for 7 batches trainer = Trainer(accumulate_grad_batches=7) -Optionally, you can make the ``accumulate_grad_batches`` value change over time by using the :class:`~pytorch_lightning.callbacks.gradient_accumulation_scheduler.GradientAccumulationScheduler`. +Optionally, you can make the ``accumulate_grad_batches`` value change over time by using the :class:`~lightning.pytorch.callbacks.gradient_accumulation_scheduler.GradientAccumulationScheduler`. Pass in a scheduling dictionary, where the key represents the epoch at which the value for gradient accumulation should be updated. .. testcode:: - from pytorch_lightning.callbacks import GradientAccumulationScheduler + from lightning.pytorch.callbacks import GradientAccumulationScheduler # till 5th epoch, it will accumulate every 8 batches. From 5th epoch # till 9th epoch it will accumulate every 4 batches and after that no accumulation diff --git a/docs/source-pytorch/common/lightning_module.rst b/docs/source-pytorch/common/lightning_module.rst index 6abc3349ffb38..c13b0768b4f4a 100644 --- a/docs/source-pytorch/common/lightning_module.rst +++ b/docs/source-pytorch/common/lightning_module.rst @@ -95,7 +95,7 @@ Here are the only required methods. .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl import torch.nn as nn import torch.nn.functional as F @@ -159,7 +159,7 @@ Training Training Loop ============= -To activate the training loop, override the :meth:`~pytorch_lightning.core.module.LightningModule.training_step` method. +To activate the training loop, override the :meth:`~lightning.pytorch.core.module.LightningModule.training_step` method. .. code-block:: python @@ -198,7 +198,7 @@ Under the hood, Lightning does the following (pseudocode): Train Epoch-level Metrics ========================= -If you want to calculate epoch-level metrics and log them, use :meth:`~pytorch_lightning.core.module.LightningModule.log`. +If you want to calculate epoch-level metrics and log them, use :meth:`~lightning.pytorch.core.module.LightningModule.log`. .. code-block:: python @@ -212,7 +212,7 @@ If you want to calculate epoch-level metrics and log them, use :meth:`~pytorch_l self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) return loss -The :meth:`~pytorch_lightning.core.module.LightningModule.log` method automatically reduces the +The :meth:`~lightning.pytorch.core.module.LightningModule.log` method automatically reduces the requested metrics across a complete epoch and devices. Here's the pseudocode of what it does under the hood: .. code-block:: python @@ -236,8 +236,8 @@ requested metrics across a complete epoch and devices. Here's the pseudocode of Train Epoch-level Operations ============================ -In the case that you need to make use of all the outputs from each :meth:`~pytorch_lightning.LightningModule.training_step`, -override the :meth:`~pytorch_lightning.LightningModule.on_training_epoch_end` method. +In the case that you need to make use of all the outputs from each :meth:`~lightning.pytorch.LightningModule.training_step`, +override the :meth:`~lightning.pytorch.LightningModule.on_training_epoch_end` method. .. code-block:: python @@ -271,7 +271,7 @@ Validation Validation Loop =============== -To activate the validation loop while training, override the :meth:`~pytorch_lightning.core.module.LightningModule.validation_step` method. +To activate the validation loop while training, override the :meth:`~lightning.pytorch.core.module.LightningModule.validation_step` method. .. code-block:: python @@ -306,8 +306,8 @@ Under the hood, Lightning does the following (pseudocode): torch.set_grad_enabled(True) model.train() -You can also run just the validation loop on your validation dataloaders by overriding :meth:`~pytorch_lightning.core.module.LightningModule.validation_step` -and calling :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. +You can also run just the validation loop on your validation dataloaders by overriding :meth:`~lightning.pytorch.core.module.LightningModule.validation_step` +and calling :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`. .. code-block:: python @@ -327,9 +327,9 @@ and calling :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. Validation Epoch-level Metrics ============================== -In the case that you need to make use of all the outputs from each :meth:`~pytorch_lightning.LightningModule.validation_step`, -override the :meth:`~pytorch_lightning.LightningModule.on_validation_epoch_end` method. -Note that this method is called before :meth:`~pytorch_lightning.LightningModule.on_train_epoch_end`. +In the case that you need to make use of all the outputs from each :meth:`~lightning.pytorch.LightningModule.validation_step`, +override the :meth:`~lightning.pytorch.LightningModule.on_validation_epoch_end` method. +Note that this method is called before :meth:`~lightning.pytorch.LightningModule.on_train_epoch_end`. .. code-block:: python @@ -363,9 +363,9 @@ Test Loop ========= The process for enabling a test loop is the same as the process for enabling a validation loop. Please refer to -the section above for details. For this you need to override the :meth:`~pytorch_lightning.core.module.LightningModule.test_step` method. +the section above for details. For this you need to override the :meth:`~lightning.pytorch.core.module.LightningModule.test_step` method. -The only difference is that the test loop is only called when :meth:`~pytorch_lightning.trainer.trainer.Trainer.test` is used. +The only difference is that the test loop is only called when :meth:`~lightning.pytorch.trainer.trainer.Trainer.test` is used. .. code-block:: python @@ -410,9 +410,9 @@ Inference Prediction Loop =============== -By default, the :meth:`~pytorch_lightning.core.module.LightningModule.predict_step` method runs the -:meth:`~pytorch_lightning.core.module.LightningModule.forward` method. In order to customize this behaviour, -simply override the :meth:`~pytorch_lightning.core.module.LightningModule.predict_step` method. +By default, the :meth:`~lightning.pytorch.core.module.LightningModule.predict_step` method runs the +:meth:`~lightning.pytorch.core.module.LightningModule.forward` method. In order to customize this behaviour, +simply override the :meth:`~lightning.pytorch.core.module.LightningModule.predict_step` method. For the example let's override ``predict_step`` and try out `Monte Carlo Dropout `_: @@ -496,7 +496,7 @@ such as text generation: return decoded In the case where you want to scale your inference, you should be using -:meth:`~pytorch_lightning.core.module.LightningModule.predict_step`. +:meth:`~lightning.pytorch.core.module.LightningModule.predict_step`. .. code-block:: python @@ -622,8 +622,8 @@ improve readability and reproducibility. save_hyperparameters ==================== -Use :meth:`~pytorch_lightning.core.module.LightningModule.save_hyperparameters` within your -:class:`~pytorch_lightning.core.module.LightningModule`'s ``__init__`` method. It will enable Lightning to store all the +Use :meth:`~lightning.pytorch.core.module.LightningModule.save_hyperparameters` within your +:class:`~lightning.pytorch.core.module.LightningModule`'s ``__init__`` method. It will enable Lightning to store all the provided arguments under the ``self.hparams`` attribute. These hyperparameters will also be stored within the model checkpoint, which simplifies model re-instantiation after training. @@ -670,8 +670,8 @@ load_from_checkpoint ==================== LightningModules that have hyperparameters automatically saved with -:meth:`~pytorch_lightning.core.module.LightningModule.save_hyperparameters` can conveniently be loaded and instantiated -directly from a checkpoint with :meth:`~pytorch_lightning.core.module.LightningModule.load_from_checkpoint`: +:meth:`~lightning.pytorch.core.module.LightningModule.save_hyperparameters` can conveniently be loaded and instantiated +directly from a checkpoint with :meth:`~lightning.pytorch.core.module.LightningModule.load_from_checkpoint`: .. code-block:: python @@ -709,31 +709,31 @@ Methods all_gather ~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.all_gather +.. automethod:: lightning.pytorch.core.module.LightningModule.all_gather :noindex: configure_callbacks ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.configure_callbacks +.. automethod:: lightning.pytorch.core.module.LightningModule.configure_callbacks :noindex: configure_optimizers ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.configure_optimizers +.. automethod:: lightning.pytorch.core.module.LightningModule.configure_optimizers :noindex: forward ~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.forward +.. automethod:: lightning.pytorch.core.module.LightningModule.forward :noindex: freeze ~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.freeze +.. automethod:: lightning.pytorch.core.module.LightningModule.freeze :noindex: .. _lm-log: @@ -741,97 +741,97 @@ freeze log ~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.log +.. automethod:: lightning.pytorch.core.module.LightningModule.log :noindex: log_dict ~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.log_dict +.. automethod:: lightning.pytorch.core.module.LightningModule.log_dict :noindex: lr_schedulers ~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.lr_schedulers +.. automethod:: lightning.pytorch.core.module.LightningModule.lr_schedulers :noindex: manual_backward ~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.manual_backward +.. automethod:: lightning.pytorch.core.module.LightningModule.manual_backward :noindex: optimizers ~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.optimizers +.. automethod:: lightning.pytorch.core.module.LightningModule.optimizers :noindex: print ~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.print +.. automethod:: lightning.pytorch.core.module.LightningModule.print :noindex: predict_step ~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.predict_step +.. automethod:: lightning.pytorch.core.module.LightningModule.predict_step :noindex: save_hyperparameters ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.save_hyperparameters +.. automethod:: lightning.pytorch.core.module.LightningModule.save_hyperparameters :noindex: toggle_optimizer ~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.toggle_optimizer +.. automethod:: lightning.pytorch.core.module.LightningModule.toggle_optimizer :noindex: test_step ~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.test_step +.. automethod:: lightning.pytorch.core.module.LightningModule.test_step :noindex: to_onnx ~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.to_onnx +.. automethod:: lightning.pytorch.core.module.LightningModule.to_onnx :noindex: to_torchscript ~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.to_torchscript +.. automethod:: lightning.pytorch.core.module.LightningModule.to_torchscript :noindex: training_step ~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.training_step +.. automethod:: lightning.pytorch.core.module.LightningModule.training_step :noindex: unfreeze ~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.unfreeze +.. automethod:: lightning.pytorch.core.module.LightningModule.unfreeze :noindex: untoggle_optimizer ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.untoggle_optimizer +.. automethod:: lightning.pytorch.core.module.LightningModule.untoggle_optimizer :noindex: validation_step ~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.validation_step +.. automethod:: lightning.pytorch.core.module.LightningModule.validation_step :noindex: ----------- @@ -891,7 +891,7 @@ hparams ~~~~~~~ The arguments passed through ``LightningModule.__init__()`` and saved by calling -:meth:`~pytorch_lightning.core.mixins.hparams_mixin.HyperparametersMixin.save_hyperparameters` could be accessed by the ``hparams`` attribute. +:meth:`~lightning.pytorch.core.mixins.hparams_mixin.HyperparametersMixin.save_hyperparameters` could be accessed by the ``hparams`` attribute. .. code-block:: python @@ -1046,7 +1046,7 @@ Set and access example_input_array, which basically represents a single batch. Hooks ===== -This is the pseudocode to describe the structure of :meth:`~pytorch_lightning.trainer.Trainer.fit`. +This is the pseudocode to describe the structure of :meth:`~lightning.pytorch.trainer.Trainer.fit`. The inputs and outputs of each function are not represented for simplicity. Please check each function's API reference for more information. @@ -1140,311 +1140,311 @@ for more information. backward ~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.backward +.. automethod:: lightning.pytorch.core.module.LightningModule.backward :noindex: on_before_backward ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_before_backward +.. automethod:: lightning.pytorch.core.module.LightningModule.on_before_backward :noindex: on_after_backward ~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_after_backward +.. automethod:: lightning.pytorch.core.module.LightningModule.on_after_backward :noindex: on_before_zero_grad ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_before_zero_grad +.. automethod:: lightning.pytorch.core.module.LightningModule.on_before_zero_grad :noindex: on_fit_start ~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_fit_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_fit_start :noindex: on_fit_end ~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_fit_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_fit_end :noindex: on_load_checkpoint ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_load_checkpoint +.. automethod:: lightning.pytorch.core.module.LightningModule.on_load_checkpoint :noindex: on_save_checkpoint ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_save_checkpoint +.. automethod:: lightning.pytorch.core.module.LightningModule.on_save_checkpoint :noindex: load_from_checkpoint ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.load_from_checkpoint +.. automethod:: lightning.pytorch.core.module.LightningModule.load_from_checkpoint :noindex: on_train_start ~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_start :noindex: on_train_end ~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_end :noindex: on_validation_start ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_start :noindex: on_validation_end ~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_end :noindex: on_test_batch_start ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_batch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_batch_start :noindex: on_test_batch_end ~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_batch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_batch_end :noindex: on_test_epoch_start ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_epoch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_epoch_start :noindex: on_test_epoch_end ~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_epoch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_epoch_end :noindex: on_test_start ~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_start :noindex: on_test_end ~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_end :noindex: on_predict_batch_start ~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_batch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_batch_start :noindex: on_predict_batch_end ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_batch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_batch_end :noindex: on_predict_epoch_start ~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_epoch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_epoch_start :noindex: on_predict_epoch_end ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_epoch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_epoch_end :noindex: on_predict_start ~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_start :noindex: on_predict_end ~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_predict_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_predict_end :noindex: on_train_batch_start ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_batch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_batch_start :noindex: on_train_batch_end ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_batch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_batch_end :noindex: on_train_epoch_start ~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_epoch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_epoch_start :noindex: on_train_epoch_end ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_train_epoch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_train_epoch_end :noindex: on_validation_batch_start ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_batch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_batch_start :noindex: on_validation_batch_end ~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_batch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_batch_end :noindex: on_validation_epoch_start ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_epoch_start +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_epoch_start :noindex: on_validation_epoch_end ~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_epoch_end +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_epoch_end :noindex: configure_sharded_model ~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.configure_sharded_model +.. automethod:: lightning.pytorch.core.module.LightningModule.configure_sharded_model :noindex: on_validation_model_eval ~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_model_eval +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_model_eval :noindex: on_validation_model_train ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_validation_model_train +.. automethod:: lightning.pytorch.core.module.LightningModule.on_validation_model_train :noindex: on_test_model_eval ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_model_eval +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_model_eval :noindex: on_test_model_train ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_test_model_train +.. automethod:: lightning.pytorch.core.module.LightningModule.on_test_model_train :noindex: on_before_optimizer_step ~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_before_optimizer_step +.. automethod:: lightning.pytorch.core.module.LightningModule.on_before_optimizer_step :noindex: configure_gradient_clipping ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.configure_gradient_clipping +.. automethod:: lightning.pytorch.core.module.LightningModule.configure_gradient_clipping :noindex: optimizer_step ~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.optimizer_step +.. automethod:: lightning.pytorch.core.module.LightningModule.optimizer_step :noindex: optimizer_zero_grad ~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.optimizer_zero_grad +.. automethod:: lightning.pytorch.core.module.LightningModule.optimizer_zero_grad :noindex: prepare_data ~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.prepare_data +.. automethod:: lightning.pytorch.core.module.LightningModule.prepare_data :noindex: setup ~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.setup +.. automethod:: lightning.pytorch.core.module.LightningModule.setup :noindex: teardown ~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.teardown +.. automethod:: lightning.pytorch.core.module.LightningModule.teardown :noindex: train_dataloader ~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.train_dataloader +.. automethod:: lightning.pytorch.core.module.LightningModule.train_dataloader :noindex: val_dataloader ~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.val_dataloader +.. automethod:: lightning.pytorch.core.module.LightningModule.val_dataloader :noindex: test_dataloader ~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.test_dataloader +.. automethod:: lightning.pytorch.core.module.LightningModule.test_dataloader :noindex: predict_dataloader ~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.predict_dataloader +.. automethod:: lightning.pytorch.core.module.LightningModule.predict_dataloader :noindex: transfer_batch_to_device ~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.transfer_batch_to_device +.. automethod:: lightning.pytorch.core.module.LightningModule.transfer_batch_to_device :noindex: on_before_batch_transfer ~~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_before_batch_transfer +.. automethod:: lightning.pytorch.core.module.LightningModule.on_before_batch_transfer :noindex: on_after_batch_transfer ~~~~~~~~~~~~~~~~~~~~~~~ -.. automethod:: pytorch_lightning.core.module.LightningModule.on_after_batch_transfer +.. automethod:: lightning.pytorch.core.module.LightningModule.on_after_batch_transfer :noindex: diff --git a/docs/source-pytorch/common/optimization.rst b/docs/source-pytorch/common/optimization.rst index 4b7954eb8271b..d29415b64d9ab 100644 --- a/docs/source-pytorch/common/optimization.rst +++ b/docs/source-pytorch/common/optimization.rst @@ -68,8 +68,8 @@ Gradient Accumulation Access your Own Optimizer ========================= -The provided ``optimizer`` is a :class:`~pytorch_lightning.core.optimizer.LightningOptimizer` object wrapping your own optimizer -configured in your :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers`. +The provided ``optimizer`` is a :class:`~lightning.pytorch.core.optimizer.LightningOptimizer` object wrapping your own optimizer +configured in your :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers`. You can access your own optimizer with ``optimizer.optimizer``. However, if you use your own optimizer to perform a step, Lightning won't be able to support accelerators, precision and profiling for you. @@ -107,7 +107,7 @@ Bring your own Custom Learning Rate Schedulers Lightning allows using custom learning rate schedulers that aren't available in `PyTorch natively `_. One good example is `Timm Schedulers `_. When using custom learning rate schedulers -relying on a different API from Native PyTorch ones, you should override the :meth:`~pytorch_lightning.core.module.LightningModule.lr_scheduler_step` with your desired logic. +relying on a different API from Native PyTorch ones, you should override the :meth:`~lightning.pytorch.core.module.LightningModule.lr_scheduler_step` with your desired logic. If you are using native PyTorch schedulers, there is no need to override this hook since Lightning will handle it automatically by default. .. code-block:: python @@ -131,17 +131,17 @@ Configure Gradient Clipping =========================== To configure custom gradient clipping, consider overriding -the :meth:`~pytorch_lightning.core.module.LightningModule.configure_gradient_clipping` method. +the :meth:`~lightning.pytorch.core.module.LightningModule.configure_gradient_clipping` method. The attributes ``gradient_clip_val`` and ``gradient_clip_algorithm`` from Trainer will be passed in the respective arguments here and Lightning will handle gradient clipping for you. In case you want to set different values for your arguments of your choice and let Lightning handle the gradient clipping, you can -use the inbuilt :meth:`~pytorch_lightning.core.module.LightningModule.clip_gradients` method and pass +use the inbuilt :meth:`~lightning.pytorch.core.module.LightningModule.clip_gradients` method and pass the arguments along with your optimizer. .. warning:: - Make sure to not override :meth:`~pytorch_lightning.core.module.LightningModule.clip_gradients` + Make sure to not override :meth:`~lightning.pytorch.core.module.LightningModule.clip_gradients` method. If you want to customize gradient clipping, consider using - :meth:`~pytorch_lightning.core.module.LightningModule.configure_gradient_clipping` method. + :meth:`~lightning.pytorch.core.module.LightningModule.configure_gradient_clipping` method. For example, here we will apply a stronger gradient clipping after a certain number of epochs: @@ -158,7 +158,7 @@ For example, here we will apply a stronger gradient clipping after a certain num Total Stepping Batches ====================== -You can use built-in trainer property :paramref:`~pytorch_lightning.trainer.trainer.Trainer.estimated_stepping_batches` to compute +You can use built-in trainer property :paramref:`~lightning.pytorch.trainer.trainer.Trainer.estimated_stepping_batches` to compute total number of stepping batches for the complete training. The property is computed considering gradient accumulation factor and distributed setting into consideration so you don't have to derive it manually. One good example where this can be helpful is while using :class:`~torch.optim.lr_scheduler.OneCycleLR` scheduler, which requires pre-computed ``total_steps`` during initialization. diff --git a/docs/source-pytorch/common/precision_expert.rst b/docs/source-pytorch/common/precision_expert.rst index 7a6c2dada1c17..e63f355f0d048 100644 --- a/docs/source-pytorch/common/precision_expert.rst +++ b/docs/source-pytorch/common/precision_expert.rst @@ -12,7 +12,7 @@ N-Bit Precision (Expert) Precision Plugins ***************** -You can also customize and pass your own Precision Plugin by subclassing the :class:`~pytorch_lightning.plugins.precision.precision_plugin.PrecisionPlugin` class. +You can also customize and pass your own Precision Plugin by subclassing the :class:`~lightning.pytorch.plugins.precision.precision_plugin.PrecisionPlugin` class. - Perform pre and post backward/optimizer step operations such as scaling gradients. - Provide context managers for forward, training_step, etc. diff --git a/docs/source-pytorch/common/progress_bar.rst b/docs/source-pytorch/common/progress_bar.rst index e03c6491edc3e..a3ddd77dbb9c6 100644 --- a/docs/source-pytorch/common/progress_bar.rst +++ b/docs/source-pytorch/common/progress_bar.rst @@ -1,6 +1,6 @@ .. testsetup:: * - from pytorch_lightning.trainer.trainer import Trainer + from lightning.pytorch.trainer.trainer import Trainer .. _progress_bar: @@ -8,36 +8,36 @@ Customize the progress bar ========================== -Lightning supports two different types of progress bars (`tqdm `_ and `rich `_). :class:`~pytorch_lightning.callbacks.TQDMProgressBar` is used by default, -but you can override it by passing a custom :class:`~pytorch_lightning.callbacks.TQDMProgressBar` or :class:`~pytorch_lightning.callbacks.RichProgressBar` to the ``callbacks`` argument of the :class:`~pytorch_lightning.trainer.trainer.Trainer`. +Lightning supports two different types of progress bars (`tqdm `_ and `rich `_). :class:`~lightning.pytorch.callbacks.TQDMProgressBar` is used by default, +but you can override it by passing a custom :class:`~lightning.pytorch.callbacks.TQDMProgressBar` or :class:`~lightning.pytorch.callbacks.RichProgressBar` to the ``callbacks`` argument of the :class:`~lightning.pytorch.trainer.trainer.Trainer`. -You could also use the :class:`~pytorch_lightning.callbacks.ProgressBarBase` class to implement your own progress bar. +You could also use the :class:`~lightning.pytorch.callbacks.ProgressBarBase` class to implement your own progress bar. ------------- TQDMProgressBar --------------- -The :class:`~pytorch_lightning.callbacks.TQDMProgressBar` uses the `tqdm `_ library internally and is the default progress bar used by Lightning. +The :class:`~lightning.pytorch.callbacks.TQDMProgressBar` uses the `tqdm `_ library internally and is the default progress bar used by Lightning. It prints to ``stdout`` and shows up to four different bars: - **sanity check progress:** the progress during the sanity check run -- **train progress:** shows the training progress. It will pause if validation starts and will resume when it ends, and also accounts for multiple validation runs during training when :paramref:`~pytorch_lightning.trainer.trainer.Trainer.val_check_interval` is used. +- **train progress:** shows the training progress. It will pause if validation starts and will resume when it ends, and also accounts for multiple validation runs during training when :paramref:`~lightning.pytorch.trainer.trainer.Trainer.val_check_interval` is used. - **validation progress:** only visible during validation; shows total progress over all validation datasets. - **test progress:** only active when testing; shows total progress over all test datasets. For infinite datasets, the progress bar never ends. -You can update ``refresh_rate`` (rate (number of batches) at which the progress bar get updated) for :class:`~pytorch_lightning.callbacks.TQDMProgressBar` by: +You can update ``refresh_rate`` (rate (number of batches) at which the progress bar get updated) for :class:`~lightning.pytorch.callbacks.TQDMProgressBar` by: .. code-block:: python - from pytorch_lightning.callbacks import TQDMProgressBar + from lightning.pytorch.callbacks import TQDMProgressBar trainer = Trainer(callbacks=[TQDMProgressBar(refresh_rate=10)]) -If you want to customize the default :class:`~pytorch_lightning.callbacks.TQDMProgressBar` used by Lightning, you can override -specific methods of the callback class and pass your custom implementation to the :class:`~pytorch_lightning.trainer.trainer.Trainer`. +If you want to customize the default :class:`~lightning.pytorch.callbacks.TQDMProgressBar` used by Lightning, you can override +specific methods of the callback class and pass your custom implementation to the :class:`~lightning.pytorch.trainer.trainer.Trainer`. .. code-block:: python @@ -51,7 +51,7 @@ specific methods of the callback class and pass your custom implementation to th trainer = Trainer(callbacks=[LitProgressBar()]) .. seealso:: - - :class:`~pytorch_lightning.callbacks.TQDMProgressBar` docs. + - :class:`~lightning.pytorch.callbacks.TQDMProgressBar` docs. - `tqdm library `__ ---------------- @@ -60,26 +60,26 @@ RichProgressBar --------------- `Rich `_ is a Python library for rich text and beautiful formatting in the terminal. -To use the :class:`~pytorch_lightning.callbacks.RichProgressBar` as your progress bar, first install the package: +To use the :class:`~lightning.pytorch.callbacks.RichProgressBar` as your progress bar, first install the package: .. code-block:: bash pip install rich -Then configure the callback and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`: +Then configure the callback and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`: .. code-block:: python - from pytorch_lightning.callbacks import RichProgressBar + from lightning.pytorch.callbacks import RichProgressBar trainer = Trainer(callbacks=[RichProgressBar()]) -Customize the theme for your :class:`~pytorch_lightning.callbacks.RichProgressBar` like this: +Customize the theme for your :class:`~lightning.pytorch.callbacks.RichProgressBar` like this: .. code-block:: python - from pytorch_lightning.callbacks import RichProgressBar - from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme + from lightning.pytorch.callbacks import RichProgressBar + from lightning.pytorch.callbacks.progress.rich_progress import RichProgressBarTheme # create your own theme! progress_bar = RichProgressBar( @@ -97,8 +97,8 @@ Customize the theme for your :class:`~pytorch_lightning.callbacks.RichProgressBa trainer = Trainer(callbacks=progress_bar) -You can customize the components used within :class:`~pytorch_lightning.callbacks.RichProgressBar` with ease by overriding the -:func:`~pytorch_lightning.callbacks.RichProgressBar.configure_columns` method. +You can customize the components used within :class:`~lightning.pytorch.callbacks.RichProgressBar` with ease by overriding the +:func:`~lightning.pytorch.callbacks.RichProgressBar.configure_columns` method. .. code-block:: python @@ -115,17 +115,17 @@ You can customize the components used within :class:`~pytorch_lightning.callback progress_bar = CustomRichProgressBar() If you wish for a new progress bar to be displayed at the end of every epoch, you should enable -:paramref:`RichProgressBar.leave ` by passing ``True`` +:paramref:`RichProgressBar.leave ` by passing ``True`` .. code-block:: python - from pytorch_lightning.callbacks import RichProgressBar + from lightning.pytorch.callbacks import RichProgressBar trainer = Trainer(callbacks=[RichProgressBar(leave=True)]) .. seealso:: - - :class:`~pytorch_lightning.callbacks.RichProgressBar` docs. - - :class:`~pytorch_lightning.callbacks.RichModelSummary` docs to customize the model summary table. + - :class:`~lightning.pytorch.callbacks.RichProgressBar` docs. + - :class:`~lightning.pytorch.callbacks.RichModelSummary` docs to customize the model summary table. - `Rich library `__. diff --git a/docs/source-pytorch/common/remote_fs.rst b/docs/source-pytorch/common/remote_fs.rst index 29a4fe78a9ef4..199cba4ea2bf3 100644 --- a/docs/source-pytorch/common/remote_fs.rst +++ b/docs/source-pytorch/common/remote_fs.rst @@ -21,7 +21,7 @@ You could pass custom paths to loggers for logging data. .. code-block:: python - from pytorch_lightning.loggers import TensorBoardLogger + from lightning.pytorch.loggers import TensorBoardLogger logger = TensorBoardLogger(save_dir="s3://my_bucket/logs/") diff --git a/docs/source-pytorch/common/trainer.rst b/docs/source-pytorch/common/trainer.rst index 86e652ad008dd..dd036094a8567 100644 --- a/docs/source-pytorch/common/trainer.rst +++ b/docs/source-pytorch/common/trainer.rst @@ -4,7 +4,7 @@ .. testsetup:: * import os - from pytorch_lightning import Trainer, LightningModule, seed_everything + from lightning.pytorch import Trainer, LightningModule, seed_everything .. _trainer: @@ -132,7 +132,7 @@ resources, for example, you can conditionally run the shutdown logic for only un Validation ---------- You can perform an evaluation epoch over the validation set, outside of the training loop, -using :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. This might be +using :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate`. This might be useful if you want to collect new metrics from a model right at its initialization or after it has already been trained. @@ -161,7 +161,7 @@ and set ``deterministic`` flag in ``Trainer``. Example:: - from pytorch_lightning import Trainer, seed_everything + from lightning.pytorch import Trainer, seed_everything seed_everything(42, workers=True) # sets seeds for numpy, torch and python.random. @@ -169,7 +169,7 @@ Example:: trainer = Trainer(deterministic=True) -By setting ``workers=True`` in :func:`~pytorch_lightning.seed_everything`, Lightning derives +By setting ``workers=True`` in :func:`~lightning.pytorch.seed_everything`, Lightning derives unique seeds across all dataloader workers and processes for :mod:`torch`, :mod:`numpy` and stdlib :mod:`random` number generators. When turned on, it ensures that e.g. data augmentations are not repeated across workers. @@ -275,7 +275,7 @@ benchmark The value (``True`` or ``False``) to set ``torch.backends.cudnn.benchmark`` to. The value for ``torch.backends.cudnn.benchmark`` set in the current session will be used (``False`` if not manually set). -If :paramref:`~pytorch_lightning.trainer.Trainer.deterministic` is set to ``True``, this will default to ``False``. +If :paramref:`~lightning.pytorch.trainer.Trainer.deterministic` is set to ``True``, this will default to ``False``. You can read more about the interaction of ``torch.backends.cudnn.benchmark`` and ``torch.backends.cudnn.deterministic`` `here `__ @@ -324,8 +324,8 @@ callbacks | -Add a list of :class:`~pytorch_lightning.callbacks.callback.Callback`. Callbacks run sequentially in the order defined here -with the exception of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks which run +Add a list of :class:`~lightning.pytorch.callbacks.callback.Callback`. Callbacks run sequentially in the order defined here +with the exception of :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks which run after all others to ensure all states are saved to the checkpoints. .. code-block:: python @@ -336,7 +336,7 @@ after all others to ensure all states are saved to the checkpoints. Example:: - from pytorch_lightning.callbacks import Callback + from lightning.pytorch.callbacks import Callback class PrintCallback(Callback): def on_train_start(self, trainer, pl_module): @@ -346,10 +346,10 @@ Example:: Model-specific callbacks can also be added inside the ``LightningModule`` through -:meth:`~pytorch_lightning.core.module.LightningModule.configure_callbacks`. +:meth:`~lightning.pytorch.core.module.LightningModule.configure_callbacks`. Callbacks returned in this hook will extend the list initially given to the ``Trainer`` argument, and replace the trainer callbacks should there be two or more of the same type. -:class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks always run last. +:class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks always run last. check_val_every_n_epoch @@ -386,7 +386,7 @@ default_root_dir | Default path for logs and weights when no logger or -:class:`pytorch_lightning.callbacks.ModelCheckpoint` callback passed. On +:class:`lightning.pytorch.callbacks.ModelCheckpoint` callback passed. On certain clusters you might want to separate where logs and checkpoints are stored. If you don't then use this argument for convenience. Paths can be local paths or remote paths such as `s3://bucket/path` or 'hdfs://path/'. Credentials @@ -473,13 +473,13 @@ To disable automatic checkpointing, set this to `False`. trainer = Trainer(enable_checkpointing=False) -You can override the default behavior by initializing the :class:`~pytorch_lightning.callbacks.ModelCheckpoint` -callback, and adding it to the :paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks` list. +You can override the default behavior by initializing the :class:`~lightning.pytorch.callbacks.ModelCheckpoint` +callback, and adding it to the :paramref:`~lightning.pytorch.trainer.trainer.Trainer.callbacks` list. See :doc:`Saving and Loading Checkpoints <../common/checkpointing>` for how to customize checkpointing. .. testcode:: - from pytorch_lightning.callbacks import ModelCheckpoint + from lightning.pytorch.callbacks import ModelCheckpoint # Init ModelCheckpoint callback, monitoring 'val_loss' checkpoint_callback = ModelCheckpoint(monitor="val_loss") @@ -525,10 +525,10 @@ impact to subsequent runs. These are the changes enabled: - Sets ``Trainer(check_every_n_epoch=1)``. - Disables all loggers. - Disables passing logged metrics to loggers. -- The :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callbacks will not trigger. -- The :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callbacks will not trigger. +- The :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks will not trigger. +- The :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callbacks will not trigger. - Sets ``limit_{train,val,test,predict}_batches`` to 1 or the number passed. -- Disables the tuning callbacks (:class:`~pytorch_lightning.callbacks.batch_size_finder.BatchSizeFinder`, :class:`~pytorch_lightning.callbacks.lr_finder.LearningRateFinder`). +- Disables the tuning callbacks (:class:`~lightning.pytorch.callbacks.batch_size_finder.BatchSizeFinder`, :class:`~lightning.pytorch.callbacks.lr_finder.LearningRateFinder`). - If using the CLI, the configuration file is not saved. @@ -676,7 +676,7 @@ logger .. testcode:: :skipif: not _TENSORBOARD_AVAILABLE and not _TENSORBOARDX_AVAILABLE - from pytorch_lightning.loggers import TensorBoardLogger + from lightning.pytorch.loggers import TensorBoardLogger # default logger used by trainer (if tensorboard is installed) logger = TensorBoardLogger(save_dir=os.getcwd(), version=1, name="lightning_logs") @@ -772,7 +772,7 @@ max_time ^^^^^^^^ Set the maximum amount of time for training. Training will get interrupted mid-epoch. -For customizable options use the :class:`~pytorch_lightning.callbacks.timer.Timer` callback. +For customizable options use the :class:`~lightning.pytorch.callbacks.timer.Timer` callback. .. testcode:: @@ -884,11 +884,11 @@ plugins - :ref:`Precision Plugins ` To define your own behavior, subclass the relevant class and pass it in. Here's an example linking up your own -:class:`~pytorch_lightning.plugins.environments.ClusterEnvironment`. +:class:`~lightning.pytorch.plugins.environments.ClusterEnvironment`. .. code-block:: python - from pytorch_lightning.plugins.environments import ClusterEnvironment + from lightning.pytorch.plugins.environments import ClusterEnvironment class MyCluster(ClusterEnvironment): @@ -954,7 +954,7 @@ See the :doc:`profiler documentation <../tuning/profiler>`. for more details. .. testcode:: - from pytorch_lightning.profilers import SimpleProfiler, AdvancedProfiler + from lightning.pytorch.profilers import SimpleProfiler, AdvancedProfiler # default used by the Trainer trainer = Trainer(profiler=None) @@ -1017,7 +1017,7 @@ The pseudocode applies also to the ``val_dataloader``. use_distributed_sampler ^^^^^^^^^^^^^^^^^^^^^^^ -See :paramref:`pytorch_lightning.trainer.Trainer.params.use_distributed_sampler`. +See :paramref:`lightning.pytorch.trainer.Trainer.params.use_distributed_sampler`. .. testcode:: @@ -1054,7 +1054,7 @@ Additionally, you can pass a strategy object. .. code-block:: python - from pytorch_lightning.strategies import DDPStrategy + from lightning.pytorch.strategies import DDPStrategy trainer = Trainer(strategy=DDPStrategy(static_graph=True), accelerator="gpu", devices=2) @@ -1149,7 +1149,7 @@ Whether to enable or disable the model summarization. Defaults to True. trainer = Trainer(enable_model_summary=False) # enable custom summarization - from pytorch_lightning.callbacks import ModelSummary + from lightning.pytorch.callbacks import ModelSummary trainer = Trainer(enable_model_summary=True, callbacks=[ModelSummary(max_depth=-1)]) @@ -1197,31 +1197,31 @@ Methods init **** -.. automethod:: pytorch_lightning.trainer.Trainer.__init__ +.. automethod:: lightning.pytorch.trainer.Trainer.__init__ :noindex: fit **** -.. automethod:: pytorch_lightning.trainer.Trainer.fit +.. automethod:: lightning.pytorch.trainer.Trainer.fit :noindex: validate ******** -.. automethod:: pytorch_lightning.trainer.Trainer.validate +.. automethod:: lightning.pytorch.trainer.Trainer.validate :noindex: test **** -.. automethod:: pytorch_lightning.trainer.Trainer.test +.. automethod:: lightning.pytorch.trainer.Trainer.test :noindex: predict ******* -.. automethod:: pytorch_lightning.trainer.Trainer.predict +.. automethod:: lightning.pytorch.trainer.Trainer.predict :noindex: @@ -1373,7 +1373,7 @@ Note that property returns a list of predict dataloaders. estimated_stepping_batches ************************** -Check out :meth:`~pytorch_lightning.trainer.trainer.Trainer.estimated_stepping_batches`. +Check out :meth:`~lightning.pytorch.trainer.trainer.Trainer.estimated_stepping_batches`. state ***** diff --git a/docs/source-pytorch/conf.py b/docs/source-pytorch/conf.py index bfb9e80e0c44f..f29d5e9a03f06 100644 --- a/docs/source-pytorch/conf.py +++ b/docs/source-pytorch/conf.py @@ -20,7 +20,7 @@ import pt_lightning_sphinx_theme -import pytorch_lightning +import lightning # ----------------------- # VARIABLES WHEN WORKING ON DOCS... MAKE THIS TRUE TO BUILD FASTER @@ -81,13 +81,13 @@ def _transform_changelog(path_in: str, path_out: str) -> None: # -- Project information ----------------------------------------------------- project = "PyTorch Lightning" -copyright = pytorch_lightning.__copyright__ -author = pytorch_lightning.__author__ +copyright = lightning.__copyright__ +author = lightning.__author__ # The short X.Y version -version = pytorch_lightning.__version__ +version = lightning.__version__ # The full version, including alpha/beta/rc tags -release = pytorch_lightning.__version__ +release = lightning.__version__ # -- General configuration --------------------------------------------------- @@ -195,8 +195,8 @@ def _transform_changelog(path_in: str, path_out: str) -> None: # documentation. html_theme_options = { - "pytorch_project": "https://pytorchlightning.ai", - "canonical_url": pytorch_lightning.__about__.__docs_url__, + "pytorch_project": "https://lightning.ai", + "canonical_url": lightning.__docs_url__, "collapse_navigation": False, "display_version": True, "logo_only": False, @@ -390,20 +390,20 @@ def package_list_from_file(file): from typing import Optional import torch -import pytorch_lightning as pl +import lightning.pytorch as pl from torch import nn from torch.utils.data import IterableDataset, DataLoader, Dataset -from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.cli import _JSONARGPARSE_SIGNATURES_AVAILABLE as _JSONARGPARSE_AVAILABLE -from pytorch_lightning.utilities import ( +from lightning.pytorch import LightningDataModule, LightningModule, Trainer, seed_everything +from lightning.pytorch.callbacks import Callback +from lightning.pytorch.cli import _JSONARGPARSE_SIGNATURES_AVAILABLE as _JSONARGPARSE_AVAILABLE +from lightning.pytorch.utilities import ( _TORCHVISION_AVAILABLE, ) from lightning_fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE -from pytorch_lightning.loggers.neptune import _NEPTUNE_AVAILABLE -from pytorch_lightning.loggers.comet import _COMET_AVAILABLE -from pytorch_lightning.loggers.mlflow import _MLFLOW_AVAILABLE -from pytorch_lightning.loggers.wandb import _WANDB_AVAILABLE +from lightning.pytorch.loggers.neptune import _NEPTUNE_AVAILABLE +from lightning.pytorch.loggers.comet import _COMET_AVAILABLE +from lightning.pytorch.loggers.mlflow import _MLFLOW_AVAILABLE +from lightning.pytorch.loggers.wandb import _WANDB_AVAILABLE """ coverage_skip_undoc_in_source = True diff --git a/docs/source-pytorch/data/datamodule.rst b/docs/source-pytorch/data/datamodule.rst index fd931f13f04cd..606e83d760afc 100644 --- a/docs/source-pytorch/data/datamodule.rst +++ b/docs/source-pytorch/data/datamodule.rst @@ -122,7 +122,7 @@ Here's a more realistic, complex DataModule that shows how much more reusable th .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl from torch.utils.data import random_split, DataLoader # Note - you must have torchvision installed for this example @@ -184,9 +184,9 @@ To define a DataModule the following methods are used to create train/val/test/p prepare_data ============ Downloading and saving data with multiple processes (distributed settings) will result in corrupted data. Lightning -ensures the :meth:`~pytorch_lightning.core.hooks.DataHooks.prepare_data` is called only within a single process on CPU, +ensures the :meth:`~lightning.pytorch.core.hooks.DataHooks.prepare_data` is called only within a single process on CPU, so you can safely add your downloading logic within. In case of multi-node training, the execution of this hook -depends upon :ref:`prepare_data_per_node`. :meth:`~pytorch_lightning.core.hooks.DataHooks.setup` is called after +depends upon :ref:`prepare_data_per_node`. :meth:`~lightning.pytorch.core.hooks.DataHooks.setup` is called after ``prepare_data`` and there is a barrier in between which ensures that all the processes proceed to ``setup`` once the data is prepared and available for use. - download, i.e. download data only once on the disk from a single process @@ -210,7 +210,7 @@ depends upon :ref:`prepare_data_per_node` setup ===== -There are also data operations you might want to perform on every GPU. Use :meth:`~pytorch_lightning.core.hooks.DataHooks.setup` to do things like: +There are also data operations you might want to perform on every GPU. Use :meth:`~lightning.pytorch.core.hooks.DataHooks.setup` to do things like: - count number of classes - build vocabulary @@ -221,7 +221,7 @@ There are also data operations you might want to perform on every GPU. Use :meth .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl class MNISTDataModule(pl.LightningDataModule): @@ -262,13 +262,13 @@ It is used to separate setup logic for ``trainer.{fit,validate,test,predict}``. train_dataloader ================ -Use the :meth:`~pytorch_lightning.core.hooks.DataHooks.train_dataloader` method to generate the training dataloader(s). +Use the :meth:`~lightning.pytorch.core.hooks.DataHooks.train_dataloader` method to generate the training dataloader(s). Usually you just wrap the dataset you defined in :ref:`setup`. This is the dataloader that the Trainer -:meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` method uses. +:meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` method uses. .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl class MNISTDataModule(pl.LightningDataModule): @@ -279,13 +279,13 @@ Usually you just wrap the dataset you defined in :ref:`setup`. This is the dataloader that the Trainer -:meth:`~pytorch_lightning.trainer.trainer.Trainer.fit` and :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate` methods uses. +:meth:`~lightning.pytorch.trainer.trainer.Trainer.fit` and :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate` methods uses. .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl class MNISTDataModule(pl.LightningDataModule): @@ -297,13 +297,13 @@ Usually you just wrap the dataset you defined in :ref:`setup`. This is the dataloader that the Trainer -:meth:`~pytorch_lightning.trainer.trainer.Trainer.test` method uses. +:meth:`~lightning.pytorch.trainer.trainer.Trainer.test` method uses. .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl class MNISTDataModule(pl.LightningDataModule): @@ -313,13 +313,13 @@ Usually you just wrap the dataset you defined in :ref:`setup`. This is the dataloader that the Trainer -:meth:`~pytorch_lightning.trainer.trainer.Trainer.predict` method uses. +:meth:`~lightning.pytorch.trainer.trainer.Trainer.predict` method uses. .. code-block:: python - import pytorch_lightning as pl + import lightning.pytorch as pl class MNISTDataModule(pl.LightningDataModule): @@ -330,37 +330,37 @@ Usually you just wrap the dataset you defined in :ref:`setup`_ is a package developed by Microsoft to optimize inference. ONNX allows the model to be independent of PyTorch and run on any ONNX Runtime. -To export your model to ONNX format call the :meth:`~pytorch_lightning.core.module.LightningModule.to_onnx` function on your :class:`~pytorch_lightning.core.module.LightningModule` with the ``filepath`` and ``input_sample``. +To export your model to ONNX format call the :meth:`~lightning.pytorch.core.module.LightningModule.to_onnx` function on your :class:`~lightning.pytorch.core.module.LightningModule` with the ``filepath`` and ``input_sample``. .. code-block:: python @@ -29,7 +29,7 @@ To export your model to ONNX format call the :meth:`~pytorch_lightning.core.modu input_sample = torch.randn((1, 64)) model.to_onnx(filepath, input_sample, export_params=True) -You can also skip passing the input sample if the ``example_input_array`` property is specified in your :class:`~pytorch_lightning.core.module.LightningModule`. +You can also skip passing the input sample if the ``example_input_array`` property is specified in your :class:`~lightning.pytorch.core.module.LightningModule`. .. code-block:: python @@ -69,7 +69,7 @@ Production ML Engineers would argue that a model shouldn't be trained if it can' In order to ease transition from training to production, PyTorch Lightning provides a way for you to validate a model can be served even before starting training. -In order to do so, your LightningModule needs to subclass the :class:`~pytorch_lightning.serve.servable_module.ServableModule`, implements its hooks and pass a :class:`~pytorch_lightning.serve.servable_module_validator.ServableModuleValidator` callback to the Trainer. +In order to do so, your LightningModule needs to subclass the :class:`~lightning.pytorch.serve.servable_module.ServableModule`, implements its hooks and pass a :class:`~lightning.pytorch.serve.servable_module_validator.ServableModuleValidator` callback to the Trainer. Below you can find an example of how the serving of a resnet18 can be validated. diff --git a/docs/source-pytorch/deploy/production_advanced_2.rst b/docs/source-pytorch/deploy/production_advanced_2.rst index ea5ca9fd24a8b..09014d2c8fcb3 100644 --- a/docs/source-pytorch/deploy/production_advanced_2.rst +++ b/docs/source-pytorch/deploy/production_advanced_2.rst @@ -11,7 +11,7 @@ Deploy models into production (advanced) Compile your model to TorchScript ********************************* `TorchScript `_ allows you to serialize your models in a way that it can be loaded in non-Python environments. -The ``LightningModule`` has a handy method :meth:`~pytorch_lightning.core.module.LightningModule.to_torchscript` that returns a scripted module which you +The ``LightningModule`` has a handy method :meth:`~lightning.pytorch.core.module.LightningModule.to_torchscript` that returns a scripted module which you can save or directly use. .. testcode:: python diff --git a/docs/source-pytorch/deploy/production_basic.rst b/docs/source-pytorch/deploy/production_basic.rst index 78b275dd9f482..e03a2b5a805c6 100644 --- a/docs/source-pytorch/deploy/production_basic.rst +++ b/docs/source-pytorch/deploy/production_basic.rst @@ -71,12 +71,12 @@ When you need to add complicated pre-processing or post-processing logic to your **************************** Enable distributed inference **************************** -By using the predict step in Lightning you get free distributed inference using :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`. +By using the predict step in Lightning you get free distributed inference using :class:`~lightning.pytorch.callbacks.prediction_writer.BasePredictionWriter`. .. code-block:: python import torch - from pytorch_lightning.callbacks import BasePredictionWriter + from lightning.pytorch.callbacks import BasePredictionWriter class CustomWriter(BasePredictionWriter): diff --git a/docs/source-pytorch/ecosystem/bolts.rst b/docs/source-pytorch/ecosystem/bolts.rst index 56c77681351a8..a82184dfc8534 100644 --- a/docs/source-pytorch/ecosystem/bolts.rst +++ b/docs/source-pytorch/ecosystem/bolts.rst @@ -83,7 +83,7 @@ We also have a collection of callbacks. .. code-block:: python from pl_bolts.callbacks import PrintTableMetricsCallback - import pytorch_lightning as pl + import lightning.pytorch as pl trainer = pl.Trainer(callbacks=[PrintTableMetricsCallback()]) diff --git a/docs/source-pytorch/ecosystem/community_examples.rst b/docs/source-pytorch/ecosystem/community_examples.rst index a5e5062efa173..4e500c7a4dede 100644 --- a/docs/source-pytorch/ecosystem/community_examples.rst +++ b/docs/source-pytorch/ecosystem/community_examples.rst @@ -15,7 +15,7 @@ Community Examples - `NeuralTexture (CVPR) `_ - `Recurrent Attentive Neural Process `_ - `Siamese Nets for One-shot Image Recognition `_ -- `Speech Transformers `_ +- `Speech Transformers `_ - `Transformers transfer learning (Huggingface) `_ - `Transformers text classification `_ - `VAE Library of over 18+ VAE flavors `_ @@ -32,5 +32,5 @@ Community Examples PyTorch Ecosystem Examples ========================== -- `PyTorch Geometric: Deep learning on graphs and other irregular structures `_. -- `TorchIO, MONAI and Lightning for 3D medical image segmentation `_. +- `PyTorch Geometric: Deep learning on graphs and other irregular structures `_. +- `TorchIO, MONAI and Lightning for 3D medical image segmentation `_. diff --git a/docs/source-pytorch/extensions/accelerator.rst b/docs/source-pytorch/extensions/accelerator.rst index 5fedd441fdd2c..f74bd16d75087 100644 --- a/docs/source-pytorch/extensions/accelerator.rst +++ b/docs/source-pytorch/extensions/accelerator.rst @@ -70,7 +70,7 @@ Finally, add the XPUAccelerator to the Trainer: .. code-block:: python - from pytorch_lightning import Trainer + from lightning.pytorch import Trainer accelerator = XPUAccelerator() trainer = Trainer(accelerator=accelerator, devices=2) @@ -84,7 +84,7 @@ Finally, add the XPUAccelerator to the Trainer: Registering Accelerators ------------------------ -If you wish to switch to a custom accelerator from the CLI without code changes, you can implement the :meth:`~pytorch_lightning.accelerators.accelerator.Accelerator.register_accelerators` class method to register your new accelerator under a shorthand name like so: +If you wish to switch to a custom accelerator from the CLI without code changes, you can implement the :meth:`~lightning.pytorch.accelerators.accelerator.Accelerator.register_accelerators` class method to register your new accelerator under a shorthand name like so: .. code-block:: python @@ -117,7 +117,7 @@ Or if you are using the Lightning CLI, for example: Accelerator API --------------- -.. currentmodule:: pytorch_lightning.accelerators +.. currentmodule:: lightning.pytorch.accelerators .. autosummary:: :nosignatures: diff --git a/docs/source-pytorch/extensions/callbacks.rst b/docs/source-pytorch/extensions/callbacks.rst index 042623c2ea009..6c4b09e00430c 100644 --- a/docs/source-pytorch/extensions/callbacks.rst +++ b/docs/source-pytorch/extensions/callbacks.rst @@ -38,7 +38,7 @@ Example: .. testcode:: - from pytorch_lightning.callbacks import Callback + from lightning.pytorch.callbacks import Callback class MyPrintingCallback(Callback): @@ -78,7 +78,7 @@ Lightning has a few built-in callbacks. For a richer collection of callbacks, check out our `bolts library `_. -.. currentmodule:: pytorch_lightning.callbacks +.. currentmodule:: lightning.pytorch.callbacks .. autosummary:: :nosignatures: @@ -137,7 +137,7 @@ Callback API ************ Here is the full API of methods available in the Callback base class. -The :class:`~pytorch_lightning.callbacks.Callback` class is the base for all the callbacks in Lightning just like the :class:`~pytorch_lightning.core.module.LightningModule` is the base for all models. +The :class:`~lightning.pytorch.callbacks.Callback` class is the base for all the callbacks in Lightning just like the :class:`~lightning.pytorch.core.module.LightningModule` is the base for all models. It defines a public interface that each callback implementation must follow, the key ones are: Properties @@ -146,7 +146,7 @@ Properties state_key ^^^^^^^^^ -.. autoattribute:: pytorch_lightning.callbacks.Callback.state_key +.. autoattribute:: lightning.pytorch.callbacks.Callback.state_key :noindex: @@ -156,234 +156,234 @@ Hooks setup ^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.setup +.. automethod:: lightning.pytorch.callbacks.Callback.setup :noindex: teardown ^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.teardown +.. automethod:: lightning.pytorch.callbacks.Callback.teardown :noindex: on_fit_start ^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_fit_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_fit_start :noindex: on_fit_end ^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_fit_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_fit_end :noindex: on_sanity_check_start ^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_sanity_check_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_sanity_check_start :noindex: on_sanity_check_end ^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_sanity_check_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_sanity_check_end :noindex: on_train_batch_start ^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_batch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_batch_start :noindex: on_train_batch_end ^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_batch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_batch_end :noindex: on_train_epoch_start ^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_epoch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_epoch_start :noindex: on_train_epoch_end ^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_epoch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_epoch_end :noindex: on_validation_epoch_start ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_epoch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_epoch_start :noindex: on_validation_epoch_end ^^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_epoch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_epoch_end :noindex: on_test_epoch_start ^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_epoch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_epoch_start :noindex: on_test_epoch_end ^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_epoch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_epoch_end :noindex: on_predict_epoch_start ^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_epoch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_epoch_start :noindex: on_predict_epoch_end ^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_epoch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_epoch_end :noindex: on_validation_batch_start ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_batch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_batch_start :noindex: on_validation_batch_end ^^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_batch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_batch_end :noindex: on_test_batch_start ^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_batch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_batch_start :noindex: on_test_batch_end ^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_batch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_batch_end :noindex: on_predict_batch_start ^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_batch_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_batch_start :noindex: on_predict_batch_end ^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_batch_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_batch_end :noindex: on_train_start ^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_start :noindex: on_train_end ^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_train_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_train_end :noindex: on_validation_start ^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_start :noindex: on_validation_end ^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_validation_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_validation_end :noindex: on_test_start ^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_start :noindex: on_test_end ^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_test_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_test_end :noindex: on_predict_start ^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_start +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_start :noindex: on_predict_end ^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_predict_end +.. automethod:: lightning.pytorch.callbacks.Callback.on_predict_end :noindex: on_exception ^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_exception +.. automethod:: lightning.pytorch.callbacks.Callback.on_exception :noindex: state_dict ^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.state_dict +.. automethod:: lightning.pytorch.callbacks.Callback.state_dict :noindex: on_save_checkpoint ^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_save_checkpoint +.. automethod:: lightning.pytorch.callbacks.Callback.on_save_checkpoint :noindex: load_state_dict ^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.load_state_dict +.. automethod:: lightning.pytorch.callbacks.Callback.load_state_dict :noindex: on_load_checkpoint ^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_load_checkpoint +.. automethod:: lightning.pytorch.callbacks.Callback.on_load_checkpoint :noindex: on_before_backward ^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_before_backward +.. automethod:: lightning.pytorch.callbacks.Callback.on_before_backward :noindex: on_after_backward ^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_after_backward +.. automethod:: lightning.pytorch.callbacks.Callback.on_after_backward :noindex: on_before_optimizer_step ^^^^^^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_before_optimizer_step +.. automethod:: lightning.pytorch.callbacks.Callback.on_before_optimizer_step :noindex: on_before_zero_grad ^^^^^^^^^^^^^^^^^^^ -.. automethod:: pytorch_lightning.callbacks.Callback.on_before_zero_grad +.. automethod:: lightning.pytorch.callbacks.Callback.on_before_zero_grad :noindex: diff --git a/docs/source-pytorch/extensions/callbacks_state.rst b/docs/source-pytorch/extensions/callbacks_state.rst index 0a104caf2bc3d..25f6ae4fb53ed 100644 --- a/docs/source-pytorch/extensions/callbacks_state.rst +++ b/docs/source-pytorch/extensions/callbacks_state.rst @@ -4,12 +4,12 @@ Save Callback state Some callbacks require internal state in order to function properly. You can optionally choose to persist your callback's state as part of model checkpoint files using -:meth:`~pytorch_lightning.callbacks.Callback.state_dict` and :meth:`~pytorch_lightning.callbacks.Callback.load_state_dict`. +:meth:`~lightning.pytorch.callbacks.Callback.state_dict` and :meth:`~lightning.pytorch.callbacks.Callback.load_state_dict`. Note that the returned state must be able to be pickled. When your callback is meant to be used only as a singleton callback then implementing the above two hooks is enough to persist state effectively. However, if passing multiple instances of the callback to the Trainer is supported, then -the callback must define a :attr:`~pytorch_lightning.callbacks.Callback.state_key` property in order for Lightning +the callback must define a :attr:`~lightning.pytorch.callbacks.Callback.state_key` property in order for Lightning to be able to distinguish the different states when loading the callback state. This concept is best illustrated by the following example. @@ -57,6 +57,6 @@ A Lightning checkpoint from this Trainer with the two stateful callbacks will in } } -The implementation of a :attr:`~pytorch_lightning.callbacks.Callback.state_key` is essential here. If it were missing, -Lightning would not be able to disambiguate the state for these two callbacks, and :attr:`~pytorch_lightning.callbacks.Callback.state_key` +The implementation of a :attr:`~lightning.pytorch.callbacks.Callback.state_key` is essential here. If it were missing, +Lightning would not be able to disambiguate the state for these two callbacks, and :attr:`~lightning.pytorch.callbacks.Callback.state_key` by default only defines the class name as the key, e.g., here ``Counter``. diff --git a/docs/source-pytorch/extensions/entry_points.rst b/docs/source-pytorch/extensions/entry_points.rst index 24bf28a05f55c..1d6234eba0d67 100644 --- a/docs/source-pytorch/extensions/entry_points.rst +++ b/docs/source-pytorch/extensions/entry_points.rst @@ -29,14 +29,14 @@ Here is a minimal example of the `setup.py` file for the package `my-package`: version="0.0.1", install_requires=["pytorch-lightning"], entry_points={ - "pytorch_lightning.callbacks_factory": [ + "lightning.pytorch.callbacks_factory": [ # The format here must be [any name]=[module path]:[function name] "monitor_callbacks=factories:my_custom_callbacks_factory" ] }, ) -The group name for the entry points is ``pytorch_lightning.callbacks_factory`` and it contains a list of strings that +The group name for the entry points is ``lightning.pytorch.callbacks_factory`` and it contains a list of strings that specify where to find the function within the package. Now, if you `pip install -e .` this package, it will register the ``my_custom_callbacks_factory`` function and Lightning diff --git a/docs/source-pytorch/extensions/logging.rst b/docs/source-pytorch/extensions/logging.rst index 2fc8ac79a893b..7167daab05caa 100644 --- a/docs/source-pytorch/extensions/logging.rst +++ b/docs/source-pytorch/extensions/logging.rst @@ -2,7 +2,7 @@ .. testsetup:: * - from pytorch_lightning import loggers as pl_loggers + from lightning.pytorch import loggers as pl_loggers .. role:: hidden :class: hidden-section @@ -20,7 +20,7 @@ Supported Loggers The following are loggers we support: -.. currentmodule:: pytorch_lightning.loggers +.. currentmodule:: lightning.pytorch.loggers .. autosummary:: :toctree: generated @@ -41,7 +41,7 @@ By default, Lightning uses ``TensorBoard`` logger under the hood, and stores the .. testcode:: - from pytorch_lightning import Trainer + from lightning.pytorch import Trainer # Automatically logs to a directory (by default ``lightning_logs/``) trainer = Trainer() @@ -59,12 +59,12 @@ To visualize tensorboard in a jupyter notebook environment, run the following co %reload_ext tensorboard %tensorboard --logdir=lightning_logs/ -You can also pass a custom Logger to the :class:`~pytorch_lightning.trainer.trainer.Trainer`. +You can also pass a custom Logger to the :class:`~lightning.pytorch.trainer.trainer.Trainer`. .. testcode:: :skipif: not _TENSORBOARD_AVAILABLE and not _TENSORBOARDX_AVAILABLE - from pytorch_lightning import loggers as pl_loggers + from lightning.pytorch import loggers as pl_loggers tb_logger = pl_loggers.TensorBoardLogger(save_dir="logs/") trainer = Trainer(logger=tb_logger) @@ -106,7 +106,7 @@ Lightning offers automatic log functionalities for logging scalars, or manual lo Automatic Logging ================= -Use the :meth:`~pytorch_lightning.core.module.LightningModule.log` or :meth:`~pytorch_lightning.core.module.LightningModule.log_dict` +Use the :meth:`~lightning.pytorch.core.module.LightningModule.log` or :meth:`~lightning.pytorch.core.module.LightningModule.log_dict` methods to log from anywhere in a :doc:`LightningModule <../common/lightning_module>` and :doc:`callbacks <../extensions/callbacks>`. .. code-block:: python @@ -120,23 +120,23 @@ methods to log from anywhere in a :doc:`LightningModule <../common/lightning_mod self.log_dict({"acc": acc, "recall": recall}) .. note:: - Everything explained below applies to both :meth:`~pytorch_lightning.core.module.LightningModule.log` or :meth:`~pytorch_lightning.core.module.LightningModule.log_dict` methods. + Everything explained below applies to both :meth:`~lightning.pytorch.core.module.LightningModule.log` or :meth:`~lightning.pytorch.core.module.LightningModule.log_dict` methods. -Depending on where the :meth:`~pytorch_lightning.core.module.LightningModule.log` method is called, Lightning auto-determines +Depending on where the :meth:`~lightning.pytorch.core.module.LightningModule.log` method is called, Lightning auto-determines the correct logging mode for you. Of course you can override the default behavior by manually setting the -:meth:`~pytorch_lightning.core.module.LightningModule.log` parameters. +:meth:`~lightning.pytorch.core.module.LightningModule.log` parameters. .. code-block:: python def training_step(self, batch, batch_idx): self.log("my_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True) -The :meth:`~pytorch_lightning.core.module.LightningModule.log` method has a few options: +The :meth:`~lightning.pytorch.core.module.LightningModule.log` method has a few options: * ``on_step``: Logs the metric at the current step. * ``on_epoch``: Automatically accumulates and logs at the end of the epoch. * ``prog_bar``: Logs to the progress bar (Default: ``False``). -* ``logger``: Logs to the logger like ``Tensorboard``, or any other custom logger passed to the :class:`~pytorch_lightning.trainer.trainer.Trainer` (Default: ``True``). +* ``logger``: Logs to the logger like ``Tensorboard``, or any other custom logger passed to the :class:`~lightning.pytorch.trainer.trainer.Trainer` (Default: ``True``). * ``reduce_fx``: Reduction function over step values for end of epoch. Uses :meth:`torch.mean` by default and is not applied when a :class:`torchmetrics.Metric` is logged. * ``enable_graph``: If True, will not auto detach the graph. * ``sync_dist``: If True, reduces the metric across devices. Use with care as this may lead to a significant communication overhead. @@ -195,7 +195,7 @@ The :meth:`~pytorch_lightning.core.module.LightningModule.log` method has a few - Setting both ``on_step=True`` and ``on_epoch=True`` will create two keys per metric you log with suffix ``_step`` and ``_epoch`` respectively. You can refer to these keys e.g. in the `monitor` - argument of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` or in the graphs plotted to the logger of your choice. + argument of :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` or in the graphs plotted to the logger of your choice. If your work requires to log in an unsupported method, please open an issue with a clear description of why it is blocking you. @@ -223,13 +223,13 @@ If you want to log anything that is not a scalar, like histograms, text, images, Make a Custom Logger ******************** -You can implement your own logger by writing a class that inherits from :class:`~pytorch_lightning.loggers.logger.Logger`. -Use the :func:`~pytorch_lightning.loggers.logger.rank_zero_experiment` and :func:`~pytorch_lightning.utilities.rank_zero.rank_zero_only` decorators to make sure that only the first process in DDP training creates the experiment and logs the data respectively. +You can implement your own logger by writing a class that inherits from :class:`~lightning.pytorch.loggers.logger.Logger`. +Use the :func:`~lightning.pytorch.loggers.logger.rank_zero_experiment` and :func:`~lightning.pytorch.utilities.rank_zero.rank_zero_only` decorators to make sure that only the first process in DDP training creates the experiment and logs the data respectively. .. testcode:: - from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment - from pytorch_lightning.utilities import rank_zero_only + from lightning.pytorch.loggers.logger import Logger, rank_zero_experiment + from lightning.pytorch.utilities import rank_zero_only class MyLogger(Logger): @@ -281,7 +281,7 @@ Logging frequency ================= It may slow down training to log on every single batch. By default, Lightning logs every 50 rows, or 50 training steps. -To change this behaviour, set the ``log_every_n_steps`` :class:`~pytorch_lightning.trainer.trainer.Trainer` flag. +To change this behaviour, set the ``log_every_n_steps`` :class:`~lightning.pytorch.trainer.trainer.Trainer` flag. .. testcode:: @@ -293,7 +293,7 @@ Log Writing Frequency ===================== Individual logger implementations determine their flushing frequency. For example, on the -:class:`~pytorch_lightning.loggers.csv_logs.CSVLogger` you can set the flag ``flush_logs_every_n_steps``. +:class:`~lightning.pytorch.loggers.csv_logs.CSVLogger` you can set the flag ``flush_logs_every_n_steps``. ---------- @@ -301,7 +301,7 @@ Individual logger implementations determine their flushing frequency. For exampl Progress Bar ************ -You can add any metric to the progress bar using :meth:`~pytorch_lightning.core.module.LightningModule.log` +You can add any metric to the progress bar using :meth:`~lightning.pytorch.core.module.LightningModule.log` method, setting ``prog_bar=True``. @@ -318,11 +318,11 @@ Modifying the Progress Bar The progress bar by default already includes the training loss and version number of the experiment if you are using a logger. These defaults can be customized by overriding the -:meth:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase.get_metrics` hook in your logger. +:meth:`~lightning.pytorch.callbacks.progress.base.ProgressBarBase.get_metrics` hook in your logger. .. code-block:: python - from pytorch_lightning.callbacks.progress import TQDMProgressBar + from lightning.pytorch.callbacks.progress import TQDMProgressBar class CustomProgressBar(TQDMProgressBar): @@ -349,10 +349,10 @@ or redirect output for certain modules to log files: import logging # configure logging at the root level of Lightning - logging.getLogger("pytorch_lightning").setLevel(logging.ERROR) + logging.getLogger("lightning.pytorch").setLevel(logging.ERROR) # configure logging on module level, redirect to file - logger = logging.getLogger("pytorch_lightning.core") + logger = logging.getLogger("lightning.pytorch.core") logger.addHandler(logging.FileHandler("core.log")) Read more about custom Python logging `here `_. diff --git a/docs/source-pytorch/extensions/plugins.rst b/docs/source-pytorch/extensions/plugins.rst index 84eb7e9f301b0..20b4ec9667e4d 100644 --- a/docs/source-pytorch/extensions/plugins.rst +++ b/docs/source-pytorch/extensions/plugins.rst @@ -46,7 +46,7 @@ We provide precision plugins for you to benefit from numerical representations w The full list of built-in precision plugins is listed below. -.. currentmodule:: pytorch_lightning.plugins.precision +.. currentmodule:: lightning.pytorch.plugins.precision .. autosummary:: :nosignatures: @@ -73,12 +73,12 @@ More information regarding precision with Lightning can be found :ref:`here
`
    * - ddp_spawn
-     - :class:`~pytorch_lightning.strategies.DDPSpawnStrategy`
+     - :class:`~lightning.pytorch.strategies.DDPSpawnStrategy`
      - Spawns processes using the :func:`torch.multiprocessing.spawn` method and joins processes after training finishes. Useful for debugging. :ref:`Learn more. `
    * - ddp
-     - :class:`~pytorch_lightning.strategies.DDPStrategy`
+     - :class:`~lightning.pytorch.strategies.DDPStrategy`
      - Strategy for multi-process single-device training on one or multiple nodes. :ref:`Learn more. `
    * - deepspeed
-     - :class:`~pytorch_lightning.strategies.DeepSpeedStrategy`
+     - :class:`~lightning.pytorch.strategies.DeepSpeedStrategy`
      - Provides capabilities to run training using the DeepSpeed library, with training optimizations for large billion parameter models. :ref:`Learn more. `
    * - hpu_parallel
-     - :class:`~pytorch_lightning.strategies.HPUParallelStrategy`
+     - :class:`~lightning.pytorch.strategies.HPUParallelStrategy`
      - Strategy for distributed training on multiple HPU devices. :doc:`Learn more. <../accelerators/hpu>`
    * - hpu_single
-     - :class:`~pytorch_lightning.strategies.SingleHPUStrategy`
+     - :class:`~lightning.pytorch.strategies.SingleHPUStrategy`
      - Strategy for training on a single HPU device. :doc:`Learn more. <../accelerators/hpu>`
    * - ipu_strategy
-     - :class:`~pytorch_lightning.strategies.IPUStrategy`
+     - :class:`~lightning.pytorch.strategies.IPUStrategy`
      - Plugin for training on IPU devices. :doc:`Learn more. <../accelerators/ipu>`
    * - xla
-     - :class:`~pytorch_lightning.strategies.XLAStrategy`
+     - :class:`~lightning.pytorch.strategies.XLAStrategy`
      - Strategy for training on multiple TPU devices using the :func:`torch_xla.distributed.xla_multiprocessing.spawn` method. :doc:`Learn more. <../accelerators/tpu>`
    * - single_tpu
-     - :class:`~pytorch_lightning.strategies.SingleTPUStrategy`
+     - :class:`~lightning.pytorch.strategies.SingleTPUStrategy`
      - Strategy for training on a single TPU device. :doc:`Learn more. <../accelerators/tpu>`
 
 ----
@@ -125,7 +125,7 @@ There are powerful third-party strategies that integrate well with Lightning but
 Create a Custom Strategy
 ************************
 
-Every strategy in Lightning is a subclass of one of the main base classes: :class:`~pytorch_lightning.strategies.Strategy`, :class:`~pytorch_lightning.strategies.SingleDeviceStrategy` or :class:`~pytorch_lightning.strategies.ParallelStrategy`.
+Every strategy in Lightning is a subclass of one of the main base classes: :class:`~lightning.pytorch.strategies.Strategy`, :class:`~lightning.pytorch.strategies.SingleDeviceStrategy` or :class:`~lightning.pytorch.strategies.ParallelStrategy`.
 
 .. image:: https://pl-public-data.s3.amazonaws.com/docs/static/images/strategies/hierarchy.jpeg
     :alt: Strategy base classes
@@ -135,7 +135,7 @@ subclassing the base classes.
 
 .. code-block:: python
 
-    from pytorch_lightning.strategies import DDPStrategy
+    from lightning.pytorch.strategies import DDPStrategy
 
 
     class CustomDDPStrategy(DDPStrategy):
diff --git a/docs/source-pytorch/guides/data.rst b/docs/source-pytorch/guides/data.rst
index bfcb11dafc4b6..96b0072368f6a 100644
--- a/docs/source-pytorch/guides/data.rst
+++ b/docs/source-pytorch/guides/data.rst
@@ -24,14 +24,14 @@ There are a few different data containers used in Lightning:
      - The PyTorch :class:`~torch.utils.data.IterableDataset` represents a stream of data.
    * - :class:`~torch.utils.data.DataLoader`
      - The PyTorch :class:`~torch.utils.data.DataLoader` represents a Python iterable over a Dataset.
-   * - :class:`~pytorch_lightning.core.datamodule.LightningDataModule`
-     -  A :class:`~pytorch_lightning.core.datamodule.LightningDataModule` is simply a collection of: training DataLoader(s), validation DataLoader(s), test DataLoader(s) and predict DataLoader(s), along with the matching transforms and data processing/downloads steps required.
+   * - :class:`~lightning.pytorch.core.datamodule.LightningDataModule`
+     -  A :class:`~lightning.pytorch.core.datamodule.LightningDataModule` is simply a collection of: training DataLoader(s), validation DataLoader(s), test DataLoader(s) and predict DataLoader(s), along with the matching transforms and data processing/downloads steps required.
 
 
 Why Use LightningDataModule?
 ============================
 
-The :class:`~pytorch_lightning.core.datamodule.LightningDataModule` was designed as a way of decoupling data-related hooks from the :class:`~pytorch_lightning.core.module.LightningModule` so you can develop dataset agnostic models. The :class:`~pytorch_lightning.core.datamodule.LightningDataModule` makes it easy to hot swap different Datasets with your model, so you can test it and benchmark it across domains. It also makes sharing and reusing the exact data splits and transforms across projects possible.
+The :class:`~lightning.pytorch.core.datamodule.LightningDataModule` was designed as a way of decoupling data-related hooks from the :class:`~lightning.pytorch.core.module.LightningModule` so you can develop dataset agnostic models. The :class:`~lightning.pytorch.core.datamodule.LightningDataModule` makes it easy to hot swap different Datasets with your model, so you can test it and benchmark it across domains. It also makes sharing and reusing the exact data splits and transforms across projects possible.
 
 Read :ref:`this ` for more details on LightningDataModule.
 
@@ -49,14 +49,14 @@ There are a few ways to pass multiple Datasets to Lightning:
 2. In the training loop, you can pass multiple DataLoaders as a dict or list/tuple, and Lightning will
    automatically combine the batches from different DataLoaders.
 3. In the validation, test, or prediction, you have the option to return multiple DataLoaders as list/tuple, which Lightning will call sequentially
-   or combine the DataLoaders using :class:`~pytorch_lightning.utilities.CombinedLoader`, which is what Lightning uses
+   or combine the DataLoaders using :class:`~lightning.pytorch.utilities.CombinedLoader`, which is what Lightning uses
    under the hood.
 
 
 Using LightningDataModule
 =========================
 
-You can set more than one :class:`~torch.utils.data.DataLoader` in your :class:`~pytorch_lightning.core.datamodule.LightningDataModule` using its DataLoader hooks
+You can set more than one :class:`~torch.utils.data.DataLoader` in your :class:`~lightning.pytorch.core.datamodule.LightningDataModule` using its DataLoader hooks
 and Lightning will use the correct one.
 
 .. testcode::
@@ -113,7 +113,7 @@ also works for testing, validation, and prediction Datasets.
 Return Multiple DataLoaders
 ---------------------------
 
-You can set multiple DataLoaders in your :class:`~pytorch_lightning.core.module.LightningModule`, and Lightning will take care of batch combination.
+You can set multiple DataLoaders in your :class:`~lightning.pytorch.core.module.LightningModule`, and Lightning will take care of batch combination.
 
 .. testcode::
 
@@ -174,11 +174,11 @@ Furthermore, Lightning also supports nested lists and dicts (or a combination).
             batch_c = batch_c_d["c"]
             batch_d = batch_c_d["d"]
 
-Alternatively, you can also pass in a :class:`~pytorch_lightning.utilities.CombinedLoader` containing multiple DataLoaders.
+Alternatively, you can also pass in a :class:`~lightning.pytorch.utilities.CombinedLoader` containing multiple DataLoaders.
 
 .. testcode::
 
-    from pytorch_lightning.utilities import CombinedLoader
+    from lightning.pytorch.utilities import CombinedLoader
 
 
     def train_dataloader(self):
@@ -206,9 +206,9 @@ the DataLoaders in sequential order; that is, the first DataLoader will be proce
 
 Refer to the following for more details for the default sequential option:
 
-- :meth:`~pytorch_lightning.core.hooks.DataHooks.val_dataloader`
-- :meth:`~pytorch_lightning.core.hooks.DataHooks.test_dataloader`
-- :meth:`~pytorch_lightning.core.hooks.DataHooks.predict_dataloader`
+- :meth:`~lightning.pytorch.core.hooks.DataHooks.val_dataloader`
+- :meth:`~lightning.pytorch.core.hooks.DataHooks.test_dataloader`
+- :meth:`~lightning.pytorch.core.hooks.DataHooks.predict_dataloader`
 
 .. testcode::
 
@@ -226,7 +226,7 @@ Evaluation DataLoaders are iterated over sequentially. The above is equivalent t
 
 .. testcode::
 
-    from pytorch_lightning.utilities import CombinedLoader
+    from lightning.pytorch.utilities import CombinedLoader
 
 
     def val_dataloader(self):
@@ -246,8 +246,8 @@ Evaluate with Additional DataLoaders
 ====================================
 
 You can evaluate your models using additional DataLoaders even if the DataLoader specific hooks haven't been defined within your
-:class:`~pytorch_lightning.core.module.LightningModule`. For example, this would be the case if your test data
-set is not available at the time your model was declared. Simply pass the test set to the :meth:`~pytorch_lightning.trainer.trainer.Trainer.test` method:
+:class:`~lightning.pytorch.core.module.LightningModule`. For example, this would be the case if your test data
+set is not available at the time your model was declared. Simply pass the test set to the :meth:`~lightning.pytorch.trainer.trainer.Trainer.test` method:
 
 .. code-block:: python
 
@@ -267,7 +267,7 @@ In the case that you require access to the DataLoader or Dataset objects, DataLo
 
 .. testcode::
 
-    from pytorch_lightning import LightningModule
+    from lightning.pytorch import LightningModule
 
 
     class Model(LightningModule):
@@ -279,12 +279,12 @@ In the case that you require access to the DataLoader or Dataset objects, DataLo
             # extract metadata, etc. from the dataset:
             ...
 
-If you are using a :class:`~pytorch_lightning.utilities.CombinedLoader` object which allows you to fetch batches from a collection of DataLoaders
+If you are using a :class:`~lightning.pytorch.utilities.CombinedLoader` object which allows you to fetch batches from a collection of DataLoaders
 simultaneously which supports collections of DataLoader such as list, tuple, or dictionary. The DataLoaders can be accessed using the same collection structure:
 
 .. code-block:: python
 
-    from pytorch_lightning.utilities import CombinedLoader
+    from lightning.pytorch.utilities import CombinedLoader
 
     test_dl1 = ...
     test_dl2 = ...
diff --git a/docs/source-pytorch/guides/speed.rst b/docs/source-pytorch/guides/speed.rst
index b5120f394eabf..70b24b8047c5f 100644
--- a/docs/source-pytorch/guides/speed.rst
+++ b/docs/source-pytorch/guides/speed.rst
@@ -2,7 +2,7 @@
 
 .. testsetup:: *
 
-    from pytorch_lightning.callbacks.early_stopping import EarlyStopping
+    from lightning.pytorch.callbacks.early_stopping import EarlyStopping
 
 .. _training-speedup:
 
@@ -27,9 +27,9 @@ GPU Training
 
 Lightning supports a variety of plugins to speed up distributed GPU training. Most notably:
 
-* :class:`~pytorch_lightning.strategies.DDPStrategy`
-* :class:`~pytorch_lightning.strategies.FSDPStrategy`
-* :class:`~pytorch_lightning.strategies.DeepSpeedStrategy`
+* :class:`~lightning.pytorch.strategies.DDPStrategy`
+* :class:`~lightning.pytorch.strategies.FSDPStrategy`
+* :class:`~lightning.pytorch.strategies.DeepSpeedStrategy`
 
 .. code-block:: python
 
@@ -51,7 +51,7 @@ Refer to :doc:`Advanced GPU Optimized Training for more details <../advanced/mod
 
 |
 
-:class:`~pytorch_lightning.strategies.ddp.DDPStrategy` only performs two transfer operations for each step, making it the simplest distributed training strategy:
+:class:`~lightning.pytorch.strategies.ddp.DDPStrategy` only performs two transfer operations for each step, making it the simplest distributed training strategy:
 
 1. Moving data to the device.
 2. Transfer and sync gradients.
@@ -97,7 +97,7 @@ For debugging purposes or for dataloaders that load very small datasets, it is d
     warnings.filterwarnings("ignore", ".*Consider increasing the value of the `num_workers` argument*")
 
     # or to ignore all warnings that could be false positives
-    from pytorch_lightning.utilities.warnings import PossibleUserWarning
+    from lightning.pytorch.utilities.warnings import PossibleUserWarning
 
     warnings.filterwarnings("ignore", category=PossibleUserWarning)
 
@@ -161,7 +161,7 @@ Early Stopping
 **************
 
 Usually, long training epochs can lead to either overfitting or no major improvements in your metrics due to no limited convergence.
-Here :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback can help you stop the training entirely by monitoring a metric of your choice.
+Here :class:`~lightning.pytorch.callbacks.early_stopping.EarlyStopping` callback can help you stop the training entirely by monitoring a metric of your choice.
 
 You can read more about it :ref:`here `.
 
@@ -339,8 +339,8 @@ Here is an explanation of what it does:
 When performing gradient accumulation, there is no need to perform grad synchronization during the accumulation phase.
 Setting ``sync_grad`` to ``False`` will block this synchronization and improve your training speed.
 
-:class:`~pytorch_lightning.core.optimizer.LightningOptimizer` provides a
-:meth:`~pytorch_lightning.core.optimizer.LightningOptimizer.toggle_model` function as a
+:class:`~lightning.pytorch.core.optimizer.LightningOptimizer` provides a
+:meth:`~lightning.pytorch.core.optimizer.LightningOptimizer.toggle_model` function as a
 :func:`contextlib.contextmanager` for advanced users.
 
 Here is an example of an advanced use case:
@@ -409,7 +409,7 @@ Here is an example of an advanced use case:
 Set Grads to None
 *****************
 
-In order to improve performance, you can override :meth:`~pytorch_lightning.core.module.LightningModule.optimizer_zero_grad`.
+In order to improve performance, you can override :meth:`~lightning.pytorch.core.module.LightningModule.optimizer_zero_grad`.
 
 For a more detailed explanation of the pros / cons of this technique,
 read the documentation for :meth:`~torch.optim.Optimizer.zero_grad` by the PyTorch team.
diff --git a/docs/source-pytorch/model/manual_optimization.rst b/docs/source-pytorch/model/manual_optimization.rst
index d2fdae016c888..2c12f4cf76880 100644
--- a/docs/source-pytorch/model/manual_optimization.rst
+++ b/docs/source-pytorch/model/manual_optimization.rst
@@ -23,7 +23,7 @@ Here is a minimal example of manual optimization.
 
 .. testcode:: python
 
-    from pytorch_lightning import LightningModule
+    from lightning.pytorch import LightningModule
 
 
     class MyModel(LightningModule):
@@ -47,8 +47,8 @@ Here is a minimal example of manual optimization.
 Access your Own Optimizer
 =========================
 
-The provided ``optimizer`` is a :class:`~pytorch_lightning.core.optimizer.LightningOptimizer` object wrapping your own optimizer
-configured in your :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers`. You can access your own optimizer
+The provided ``optimizer`` is a :class:`~lightning.pytorch.core.optimizer.LightningOptimizer` object wrapping your own optimizer
+configured in your :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers`. You can access your own optimizer
 with ``optimizer.optimizer``. However, if you use your own optimizer to perform a step, Lightning won't be able to
 support accelerators, precision and profiling for you.
 
@@ -103,7 +103,7 @@ To perform gradient clipping with one optimizer with manual optimization, you ca
 
 .. testcode:: python
 
-    from pytorch_lightning import LightningModule
+    from lightning.pytorch import LightningModule
 
 
     class SimpleModel(LightningModule):
@@ -138,7 +138,7 @@ Here is an example training a simple GAN with multiple optimizers using manual o
 
     import torch
     from torch import Tensor
-    from pytorch_lightning import LightningModule
+    from lightning.pytorch import LightningModule
 
 
     class SimpleGAN(LightningModule):
@@ -209,16 +209,16 @@ Learning Rate Scheduling
 
 Every optimizer you use can be paired with any
 `Learning Rate Scheduler `_. Please see the
-documentation of :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` for all the available options
+documentation of :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` for all the available options
 
 You can call ``lr_scheduler.step()`` at arbitrary intervals.
-Use ``self.lr_schedulers()`` in  your :class:`~pytorch_lightning.core.module.LightningModule` to access any learning rate schedulers
-defined in your :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers`.
+Use ``self.lr_schedulers()`` in  your :class:`~lightning.pytorch.core.module.LightningModule` to access any learning rate schedulers
+defined in your :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers`.
 
 .. warning::
-   * ``lr_scheduler.step()`` can be called at arbitrary intervals by the user in case of manual optimization, or by Lightning if ``"interval"`` is defined in :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` in case of automatic optimization.
+   * ``lr_scheduler.step()`` can be called at arbitrary intervals by the user in case of manual optimization, or by Lightning if ``"interval"`` is defined in :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` in case of automatic optimization.
    * Note that the ``lr_scheduler_config`` keys, such as ``"frequency"`` and ``"interval"``, will be ignored even if they are provided in
-     your :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` during manual optimization.
+     your :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` during manual optimization.
 
 Here is an example calling ``lr_scheduler.step()`` every step.
 
diff --git a/docs/source-pytorch/model/train_model_basic.rst b/docs/source-pytorch/model/train_model_basic.rst
index e5bce7dfdf1c1..028734e15364e 100644
--- a/docs/source-pytorch/model/train_model_basic.rst
+++ b/docs/source-pytorch/model/train_model_basic.rst
@@ -21,7 +21,7 @@ Add the relevant imports at the top of the file
     from torchvision import transforms
     from torchvision.datasets import MNIST
     from torch.utils.data import DataLoader
-    import pytorch_lightning as pl
+    import lightning.pytorch as pl
 
 ----
 
diff --git a/docs/source-pytorch/starter/converting.rst b/docs/source-pytorch/starter/converting.rst
index 925922e5b5eb2..1d7844c0fb67c 100644
--- a/docs/source-pytorch/starter/converting.rst
+++ b/docs/source-pytorch/starter/converting.rst
@@ -16,7 +16,7 @@ Keep your regular nn.Module architecture
 
 .. testcode::
 
-    import pytorch_lightning as pl
+    import lightning.pytorch as pl
     import torch
     import torch.nn as nn
     import torch.nn.functional as F
@@ -62,7 +62,7 @@ In the training_step of the LightningModule configure how your training routine
 ****************************************
 3. Move Optimizer(s) and LR Scheduler(s)
 ****************************************
-Move your optimizers to the :meth:`~pytorch_lightning.core.module.LightningModule.configure_optimizers` hook.
+Move your optimizers to the :meth:`~lightning.pytorch.core.module.LightningModule.configure_optimizers` hook.
 
 .. testcode::
 
@@ -163,7 +163,7 @@ Regular PyTorch DataLoaders work with Lightning. For more modular and scalable d
 Good to know
 ************
 
-Additionally, you can run only the validation loop using :meth:`~pytorch_lightning.trainer.trainer.Trainer.validate` method.
+Additionally, you can run only the validation loop using :meth:`~lightning.pytorch.trainer.trainer.Trainer.validate` method.
 
 .. code-block:: python
 
@@ -173,7 +173,7 @@ Additionally, you can run only the validation loop using :meth:`~pytorch_lightni
 .. note:: ``model.eval()`` and ``torch.no_grad()`` are called automatically for validation.
 
 
-The test loop isn't used within :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`, therefore, you would need to explicitly call :meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.
+The test loop isn't used within :meth:`~lightning.pytorch.trainer.trainer.Trainer.fit`, therefore, you would need to explicitly call :meth:`~lightning.pytorch.trainer.trainer.Trainer.test`.
 
 .. code-block:: python
 
@@ -185,7 +185,7 @@ The test loop isn't used within :meth:`~pytorch_lightning.trainer.trainer.Traine
 .. tip:: ``trainer.test()`` loads the best checkpoint automatically by default if checkpointing is enabled.
 
 
-The predict loop will not be used until you call :meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.
+The predict loop will not be used until you call :meth:`~lightning.pytorch.trainer.trainer.Trainer.predict`.
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/starter/introduction.rst b/docs/source-pytorch/starter/introduction.rst
index a1edea0a3b5ee..aad3b907a3a6a 100644
--- a/docs/source-pytorch/starter/introduction.rst
+++ b/docs/source-pytorch/starter/introduction.rst
@@ -118,7 +118,7 @@ A LightningModule enables your PyTorch nn.Module to play together in complex way
     from torch import optim, nn, utils, Tensor
     from torchvision.datasets import MNIST
     from torchvision.transforms import ToTensor
-    import pytorch_lightning as pl
+    import lightning.pytorch as pl
 
     # define any number of nn.Modules (or use your current ones)
     encoder = nn.Sequential(nn.Linear(28 * 28, 64), nn.ReLU(), nn.Linear(64, 3))
diff --git a/docs/source-pytorch/starter/style_guide.rst b/docs/source-pytorch/starter/style_guide.rst
index 54e3294bd727a..ebc936e907b40 100644
--- a/docs/source-pytorch/starter/style_guide.rst
+++ b/docs/source-pytorch/starter/style_guide.rst
@@ -2,7 +2,7 @@
 Style Guide
 ###########
 The main goal of PyTorch Lightning is to improve readability and reproducibility. Imagine looking into any GitHub repo or a research project,
-finding a :class:`~pytorch_lightning.core.module.LightningModule`, and knowing exactly where to look to find the things you care about.
+finding a :class:`~lightning.pytorch.core.module.LightningModule`, and knowing exactly where to look to find the things you care about.
 
 The goal of this style guide is to encourage Lightning code to be structured similarly.
 
@@ -12,7 +12,7 @@ The goal of this style guide is to encourage Lightning code to be structured sim
 LightningModule
 ***************
 
-These are best practices for structuring your :class:`~pytorch_lightning.core.module.LightningModule` class:
+These are best practices for structuring your :class:`~lightning.pytorch.core.module.LightningModule` class:
 
 Systems vs Models
 =================
@@ -165,8 +165,8 @@ In practice, the code looks like this:
 Forward vs training_step
 ========================
 
-We recommend using :meth:`~pytorch_lightning.core.module.LightningModule.forward` for inference/predictions and keeping
-:meth:`~pytorch_lightning.core.module.LightningModule.training_step` independent.
+We recommend using :meth:`~lightning.pytorch.core.module.LightningModule.forward` for inference/predictions and keeping
+:meth:`~lightning.pytorch.core.module.LightningModule.training_step` independent.
 
 .. code-block:: python
 
@@ -200,8 +200,8 @@ make sure to tune the number of workers for maximum efficiency.
 DataModules
 ===========
 
-The :class:`~pytorch_lightning.core.datamodule.LightningDataModule` is designed as a way of decoupling data-related
-hooks from the :class:`~pytorch_lightning.core.module.LightningModule` so you can develop dataset agnostic models. It makes it easy to hot swap different
+The :class:`~lightning.pytorch.core.datamodule.LightningDataModule` is designed as a way of decoupling data-related
+hooks from the :class:`~lightning.pytorch.core.module.LightningModule` so you can develop dataset agnostic models. It makes it easy to hot swap different
 datasets with your model, so you can test it and benchmark it across domains. It also makes sharing and reusing the exact data splits and transforms across projects possible.
 
 Check out :ref:`data` document to understand data management within Lightning and its best practices.
diff --git a/docs/source-pytorch/tuning/profiler_advanced.rst b/docs/source-pytorch/tuning/profiler_advanced.rst
index 1a1794f35f0a0..63a0013d1d2b6 100644
--- a/docs/source-pytorch/tuning/profiler_advanced.rst
+++ b/docs/source-pytorch/tuning/profiler_advanced.rst
@@ -12,11 +12,11 @@ Find bottlenecks in your code (advanced)
 ************************
 Profile cloud TPU models
 ************************
-To profile TPU models use the :class:`~pytorch_lightning.profilers.xla.XLAProfiler`
+To profile TPU models use the :class:`~lightning.pytorch.profilers.xla.XLAProfiler`
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import XLAProfiler
+    from lightning.pytorch.profilers import XLAProfiler
 
     profiler = XLAProfiler(port=9001)
     trainer = Trainer(profiler=profiler)
diff --git a/docs/source-pytorch/tuning/profiler_basic.rst b/docs/source-pytorch/tuning/profiler_basic.rst
index 16dc798402cb0..d248cc649004a 100644
--- a/docs/source-pytorch/tuning/profiler_basic.rst
+++ b/docs/source-pytorch/tuning/profiler_basic.rst
@@ -67,7 +67,7 @@ The simple profiler measures all the standard methods used in the training loop
 **************************************
 Profile the time within every function
 **************************************
-To profile the time within every function, use the :class:`~pytorch_lightning.profilers.advanced.AdvancedProfiler` built on top of Python's `cProfiler `_.
+To profile the time within every function, use the :class:`~lightning.pytorch.profilers.advanced.AdvancedProfiler` built on top of Python's `cProfiler `_.
 
 
 .. code-block:: python
@@ -100,7 +100,7 @@ If the profiler report becomes too long, you can stream the report to a file:
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import AdvancedProfiler
+    from lightning.pytorch.profilers import AdvancedProfiler
 
     profiler = AdvancedProfiler(dirpath=".", filename="perf_logs")
     trainer = Trainer(profiler=profiler)
@@ -111,11 +111,11 @@ If the profiler report becomes too long, you can stream the report to a file:
 Measure accelerator usage
 *************************
 Another helpful technique to detect bottlenecks is to ensure that you're using the full capacity of your accelerator (GPU/TPU/IPU/HPU).
-This can be measured with the :class:`~pytorch_lightning.callbacks.device_stats_monitor.DeviceStatsMonitor`:
+This can be measured with the :class:`~lightning.pytorch.callbacks.device_stats_monitor.DeviceStatsMonitor`:
 
 .. testcode::
 
-    from pytorch_lightning.callbacks import DeviceStatsMonitor
+    from lightning.pytorch.callbacks import DeviceStatsMonitor
 
     trainer = Trainer(callbacks=[DeviceStatsMonitor()])
 
diff --git a/docs/source-pytorch/tuning/profiler_expert.rst b/docs/source-pytorch/tuning/profiler_expert.rst
index fe864536e4b03..a64c0fafc9b18 100644
--- a/docs/source-pytorch/tuning/profiler_expert.rst
+++ b/docs/source-pytorch/tuning/profiler_expert.rst
@@ -12,12 +12,12 @@ Find bottlenecks in your code (expert)
 ***********************
 Build your own profiler
 ***********************
-To build your own profiler, subclass :class:`~pytorch_lightning.profilers.profiler.Profiler`
+To build your own profiler, subclass :class:`~lightning.pytorch.profilers.profiler.Profiler`
 and override some of its methods. Here is a simple example that profiles the first occurrence and total calls of each action:
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import Profiler
+    from lightning.pytorch.profilers import Profiler
     from collections import defaultdict
     import time
 
@@ -69,7 +69,7 @@ To profile a specific action of interest, reference a profiler in the LightningM
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import SimpleProfiler, PassThroughProfiler
+    from lightning.pytorch.profilers import SimpleProfiler, PassThroughProfiler
 
 
     class MyModel(LightningModule):
@@ -90,7 +90,7 @@ Here's the full code:
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import SimpleProfiler, PassThroughProfiler
+    from lightning.pytorch.profilers import SimpleProfiler, PassThroughProfiler
 
 
     class MyModel(LightningModule):
diff --git a/docs/source-pytorch/tuning/profiler_intermediate.rst b/docs/source-pytorch/tuning/profiler_intermediate.rst
index beb0cc2699ad4..802bfc5e6db4e 100644
--- a/docs/source-pytorch/tuning/profiler_intermediate.rst
+++ b/docs/source-pytorch/tuning/profiler_intermediate.rst
@@ -12,11 +12,11 @@ Find bottlenecks in your code (intermediate)
 **************************
 Profile pytorch operations
 **************************
-To understand the cost of each PyTorch operation, use the :class:`~pytorch_lightning.profilers.pytorch.PyTorchProfiler` built on top of the `PyTorch profiler `__.
+To understand the cost of each PyTorch operation, use the :class:`~lightning.pytorch.profilers.pytorch.PyTorchProfiler` built on top of the `PyTorch profiler `__.
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import PyTorchProfiler
+    from lightning.pytorch.profilers import PyTorchProfiler
 
     profiler = PyTorchProfiler()
     trainer = Trainer(profiler=profiler)
@@ -65,11 +65,11 @@ The profiler will generate an output like this:
 ***************************
 Profile a distributed model
 ***************************
-To profile a distributed model, use the :class:`~pytorch_lightning.profilers.pytorch.PyTorchProfiler` with the *filename* argument which will save a report per rank.
+To profile a distributed model, use the :class:`~lightning.pytorch.profilers.pytorch.PyTorchProfiler` with the *filename* argument which will save a report per rank.
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import PyTorchProfiler
+    from lightning.pytorch.profilers import PyTorchProfiler
 
     profiler = PyTorchProfiler(filename="perf-logs")
     trainer = Trainer(profiler=profiler)
@@ -152,11 +152,11 @@ The output above shows the profiling for the action ``training_step``.
 *****************************
 Visualize profiled operations
 *****************************
-To visualize the profiled operations, enable **emit_nvtx** in the :class:`~pytorch_lightning.profilers.pytorch.PyTorchProfiler`.
+To visualize the profiled operations, enable **emit_nvtx** in the :class:`~lightning.pytorch.profilers.pytorch.PyTorchProfiler`.
 
 .. code-block:: python
 
-    from pytorch_lightning.profilers import PyTorchProfiler
+    from lightning.pytorch.profilers import PyTorchProfiler
 
     profiler = PyTorchProfiler(emit_nvtx=True)
     trainer = Trainer(profiler=profiler)
diff --git a/docs/source-pytorch/visualize/experiment_managers.rst b/docs/source-pytorch/visualize/experiment_managers.rst
index 30fada9f2e0fb..e40b7416b7b05 100644
--- a/docs/source-pytorch/visualize/experiment_managers.rst
+++ b/docs/source-pytorch/visualize/experiment_managers.rst
@@ -5,7 +5,7 @@ To track other artifacts, such as histograms or model topology graphs first sele
 
 .. code-block:: python
 
-    from pytorch_lightning import loggers as pl_loggers
+    from lightning.pytorch import loggers as pl_loggers
 
     tensorboard = pl_loggers.TensorBoardLogger()
     trainer = Trainer(logger=tensorboard)
diff --git a/docs/source-pytorch/visualize/logging_advanced.rst b/docs/source-pytorch/visualize/logging_advanced.rst
index 6883d97991f34..35995588961c0 100644
--- a/docs/source-pytorch/visualize/logging_advanced.rst
+++ b/docs/source-pytorch/visualize/logging_advanced.rst
@@ -12,11 +12,11 @@ Track and Visualize Experiments (advanced)
 ****************************
 Change progress bar defaults
 ****************************
-To change the default values (ie: version number) shown in the progress bar, override the :meth:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase.get_metrics` method in your logger.
+To change the default values (ie: version number) shown in the progress bar, override the :meth:`~lightning.pytorch.callbacks.progress.base.ProgressBarBase.get_metrics` method in your logger.
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks.progress import Tqdm
+    from lightning.pytorch.callbacks.progress import Tqdm
 
 
     class CustomProgressBar(Tqdm):
@@ -37,7 +37,7 @@ Modify logging frequency
 ========================
 
 Logging a metric on every single batch can slow down training. By default, Lightning logs every 50 rows, or 50 training steps.
-To change this behaviour, set the *log_every_n_steps* :class:`~pytorch_lightning.trainer.trainer.Trainer` flag.
+To change this behaviour, set the *log_every_n_steps* :class:`~lightning.pytorch.trainer.trainer.Trainer` flag.
 
 .. testcode::
 
@@ -113,7 +113,7 @@ logger
 ======
 **Default:** True
 
-Send logs to the logger like ``Tensorboard``, or any other custom logger passed to the :class:`~pytorch_lightning.trainer.trainer.Trainer` (Default: ``True``).
+Send logs to the logger like ``Tensorboard``, or any other custom logger passed to the :class:`~lightning.pytorch.trainer.trainer.Trainer` (Default: ``True``).
 
 .. code-block:: python
 
@@ -314,7 +314,7 @@ To save logs to a remote filesystem, prepend a protocol like "s3:/" to the root_
 
 .. code-block:: python
 
-    from pytorch_lightning.loggers import TensorBoardLogger
+    from lightning.pytorch.loggers import TensorBoardLogger
 
     logger = TensorBoardLogger(save_dir="s3://my_bucket/logs/")
 
diff --git a/docs/source-pytorch/visualize/logging_expert.rst b/docs/source-pytorch/visualize/logging_expert.rst
index 3b44ee910ea9c..3067173e023dc 100644
--- a/docs/source-pytorch/visualize/logging_expert.rst
+++ b/docs/source-pytorch/visualize/logging_expert.rst
@@ -19,11 +19,11 @@ If you'd like to change the way the progress bar displays information you can us
 
 Use the TQDMProgressBar
 =======================
-To use the TQDMProgressBar pass it into the *callbacks* :class:`~pytorch_lightning.trainer.trainer.Trainer` argument.
+To use the TQDMProgressBar pass it into the *callbacks* :class:`~lightning.pytorch.trainer.trainer.Trainer` argument.
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks import TQDMProgressBar
+    from lightning.pytorch.callbacks import TQDMProgressBar
 
     trainer = Trainer(callbacks=[TQDMProgressBar()])
 
@@ -37,11 +37,11 @@ The RichProgressBar can add custom colors and beautiful formatting for your prog
 
     pip install rich
 
-Then pass the callback into the callbacks :class:`~pytorch_lightning.trainer.trainer.Trainer` argument:
+Then pass the callback into the callbacks :class:`~lightning.pytorch.trainer.trainer.Trainer` argument:
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks import RichProgressBar
+    from lightning.pytorch.callbacks import RichProgressBar
 
     trainer = Trainer(callbacks=[RichProgressBar()])
 
@@ -49,8 +49,8 @@ The rich progress bar can also have custom themes
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks import RichProgressBar
-    from pytorch_lightning.callbacks.progress.rich_progress import RichProgressBarTheme
+    from lightning.pytorch.callbacks import RichProgressBar
+    from lightning.pytorch.callbacks.progress.rich_progress import RichProgressBarTheme
 
     # create your own theme!
     theme = RichProgressBarTheme(description="green_yellow", progress_bar="green1")
@@ -64,11 +64,11 @@ The rich progress bar can also have custom themes
 ************************
 Customize a progress bar
 ************************
-To customize either the  :class:`~pytorch_lightning.callbacks.TQDMProgressBar` or the  :class:`~pytorch_lightning.callbacks.RichProgressBar`, subclass it and override any of its methods.
+To customize either the  :class:`~lightning.pytorch.callbacks.TQDMProgressBar` or the  :class:`~lightning.pytorch.callbacks.RichProgressBar`, subclass it and override any of its methods.
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks import TQDMProgressBar
+    from lightning.pytorch.callbacks import TQDMProgressBar
 
 
     class LitProgressBar(TQDMProgressBar):
@@ -82,11 +82,11 @@ To customize either the  :class:`~pytorch_lightning.callbacks.TQDMProgressBar` o
 ***************************
 Build your own progress bar
 ***************************
-To build your own progress bar, subclass :class:`~pytorch_lightning.callbacks.ProgressBarBase`
+To build your own progress bar, subclass :class:`~lightning.pytorch.callbacks.ProgressBarBase`
 
 .. code-block:: python
 
-    from pytorch_lightning.callbacks import ProgressBarBase
+    from lightning.pytorch.callbacks import ProgressBarBase
 
 
     class LitProgressBar(ProgressBarBase):
@@ -112,11 +112,11 @@ To build your own progress bar, subclass :class:`~pytorch_lightning.callbacks.Pr
 *******************************
 Integrate an experiment manager
 *******************************
-To create an integration between a custom logger and Lightning, subclass :class:`~pytorch_lightning.loggers.base.LightningLoggerBase`
+To create an integration between a custom logger and Lightning, subclass :class:`~lightning.pytorch.loggers.base.LightningLoggerBase`
 
 .. code-block:: python
 
-    from pytorch_lightning.loggers import Logger
+    from lightning.pytorch.loggers import Logger
 
 
     class LitLogger(Logger):
diff --git a/docs/source-pytorch/visualize/logging_intermediate.rst b/docs/source-pytorch/visualize/logging_intermediate.rst
index 706f25547020b..24c376064dfe9 100644
--- a/docs/source-pytorch/visualize/logging_intermediate.rst
+++ b/docs/source-pytorch/visualize/logging_intermediate.rst
@@ -14,7 +14,7 @@ To track other artifacts, such as histograms or model topology graphs first sele
 
 .. code-block:: python
 
-    from pytorch_lightning import loggers as pl_loggers
+    from lightning.pytorch import loggers as pl_loggers
 
     tensorboard = pl_loggers.TensorBoardLogger(save_dir="")
     trainer = Trainer(logger=tensorboard)
diff --git a/docs/source-pytorch/visualize/supported_exp_managers.rst b/docs/source-pytorch/visualize/supported_exp_managers.rst
index 3b7e49c5c610e..41160d17a0d08 100644
--- a/docs/source-pytorch/visualize/supported_exp_managers.rst
+++ b/docs/source-pytorch/visualize/supported_exp_managers.rst
@@ -6,12 +6,12 @@ To use `Comet.ml `_ first install the comet package:
 
     pip install comet-ml
 
-Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
+Configure the logger and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`:
 
 .. testcode::
     :skipif: not _COMET_AVAILABLE
 
-    from pytorch_lightning.loggers import CometLogger
+    from lightning.pytorch.loggers import CometLogger
 
     comet_logger = CometLogger(api_key="YOUR_COMET_API_KEY")
     trainer = Trainer(logger=comet_logger)
@@ -26,7 +26,7 @@ Access the comet logger from any function (except the LightningModule *init*) to
             fake_images = torch.Tensor(32, 3, 28, 28)
             comet.add_image("generated_images", fake_images, 0)
 
-Here's the full documentation for the :class:`~pytorch_lightning.loggers.CometLogger`.
+Here's the full documentation for the :class:`~lightning.pytorch.loggers.CometLogger`.
 
 ----
 
@@ -38,12 +38,12 @@ To use `MLflow `_ first install the MLflow package:
 
     pip install mlflow
 
-Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
+Configure the logger and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`:
 
 .. testcode::
     :skipif: not _MLFLOW_AVAILABLE
 
-    from pytorch_lightning.loggers import MLFlowLogger
+    from lightning.pytorch.loggers import MLFlowLogger
 
     mlf_logger = MLFlowLogger(experiment_name="lightning_logs", tracking_uri="file:./ml-runs")
     trainer = Trainer(logger=mlf_logger)
@@ -58,7 +58,7 @@ Access the mlflow logger from any function (except the LightningModule *init*) t
             fake_images = torch.Tensor(32, 3, 28, 28)
             mlf_logger.add_image("generated_images", fake_images, 0)
 
-Here's the full documentation for the :class:`~pytorch_lightning.loggers.MLFlowLogger`.
+Here's the full documentation for the :class:`~lightning.pytorch.loggers.MLFlowLogger`.
 
 ----
 
@@ -76,12 +76,12 @@ or with conda:
 
     conda install -c conda-forge neptune-client
 
-Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
+Configure the logger and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`:
 
 .. testcode::
     :skipif: not _NEPTUNE_AVAILABLE
 
-    from pytorch_lightning.loggers import NeptuneLogger
+    from lightning.pytorch.loggers import NeptuneLogger
 
     neptune_logger = NeptuneLogger(
         api_key="ANONYMOUS",  # replace with your own
@@ -98,7 +98,7 @@ Access the neptune logger from any function (except the LightningModule *init*)
             neptune_logger = self.logger.experiment["your/metadata/structure"]
             neptune_logger.log(metadata)
 
-Here's the full documentation for the :class:`~pytorch_lightning.loggers.NeptuneLogger`.
+Here's the full documentation for the :class:`~lightning.pytorch.loggers.NeptuneLogger`.
 
 ----
 
@@ -110,11 +110,11 @@ Tensorboard
 
     pip install tensorboard
 
-Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
+Configure the logger and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`:
 
 .. code-block:: python
 
-    from pytorch_lightning.loggers import TensorBoardLogger
+    from lightning.pytorch.loggers import TensorBoardLogger
 
     logger = TensorBoardLogger()
     trainer = Trainer(logger=logger)
@@ -129,7 +129,7 @@ Access the tensorboard logger from any function (except the LightningModule *ini
             fake_images = torch.Tensor(32, 3, 28, 28)
             tensorboard_logger.add_image("generated_images", fake_images, 0)
 
-Here's the full documentation for the :class:`~pytorch_lightning.loggers.TensorBoardLogger`.
+Here's the full documentation for the :class:`~lightning.pytorch.loggers.TensorBoardLogger`.
 
 ----
 
@@ -141,12 +141,12 @@ To use `Weights and Biases `_ (wa
 
     pip install wandb
 
-Configure the logger and pass it to the :class:`~pytorch_lightning.trainer.trainer.Trainer`:
+Configure the logger and pass it to the :class:`~lightning.pytorch.trainer.trainer.Trainer`:
 
 .. testcode::
     :skipif: not _WANDB_AVAILABLE
 
-    from pytorch_lightning.loggers import WandbLogger
+    from lightning.pytorch.loggers import WandbLogger
 
     wandb_logger = WandbLogger(project="MNIST", log_model="all")
     trainer = Trainer(logger=wandb_logger)
@@ -169,19 +169,19 @@ Access the wandb logger from any function (except the LightningModule *init*) to
             # Option 2 for specifically logging images
             wandb_logger.log_image(key="generated_images", images=[fake_images])
 
-Here's the full documentation for the :class:`~pytorch_lightning.loggers.WandbLogger`.
+Here's the full documentation for the :class:`~lightning.pytorch.loggers.WandbLogger`.
 `Demo in Google Colab `__ with hyperparameter search and model logging.
 
 ----
 
 Use multiple exp managers
 =========================
-To use multiple experiment managers at the same time, pass a list to the *logger* :class:`~pytorch_lightning.trainer.trainer.Trainer` argument.
+To use multiple experiment managers at the same time, pass a list to the *logger* :class:`~lightning.pytorch.trainer.trainer.Trainer` argument.
 
 .. testcode::
     :skipif: (not _TENSORBOARD_AVAILABLE and not _TENSORBOARDX_AVAILABLE) or not _WANDB_AVAILABLE
 
-    from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
+    from lightning.pytorch.loggers import TensorBoardLogger, WandbLogger
 
     logger1 = TensorBoardLogger()
     logger2 = WandbLogger()

From 20e0f652bf4b8987e6cd33b71b6998fb3dc96250 Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 08:45:40 +0100
Subject: [PATCH 2/9] ci

---
 .github/workflows/docs-checks.yml | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)

diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml
index 57423f6870bad..563118b1e052a 100644
--- a/.github/workflows/docs-checks.yml
+++ b/.github/workflows/docs-checks.yml
@@ -65,12 +65,6 @@ jobs:
           key: docs-test-${{ matrix.pkg-name }}-${{ hashFiles('requirements/${{ matrix.pkg-name }}/*.txt') }}
           restore-keys: docs-test-${{ matrix.pkg-name }}-
 
-      - name: Install LAI package
-        # This is needed as App docs is heavily using/referring to lightning package
-        if: ${{ matrix.pkg-name == 'lightning' }}
-        run: |
-          pip install -e . -U -v -f pypi -f ${TORCH_URL}
-
       - name: Adjust docs refs
         if: ${{ matrix.pkg-name == 'lightning' }}
         run: |
@@ -85,8 +79,6 @@ jobs:
           python -c "n = '${{ matrix.pkg-name }}' ; print('REQ_DIR=' + {'lightning': 'app'}.get(n, n))" >> $GITHUB_ENV
 
       - name: Install this package
-        env:
-          PACKAGE_NAME: ${{ matrix.pkg-name }}
         run: |
           pip install -e .[extra,cloud,ui] -U -r requirements/${{ env.REQ_DIR }}/docs.txt -f pypi -f ${TORCH_URL}
           pip list
@@ -138,8 +130,6 @@ jobs:
           python -c "n = '${{ matrix.pkg-name }}' ; print('REQ_DIR=' + {'lightning': 'app'}.get(n, n))" >> $GITHUB_ENV
 
       - name: Install package & dependencies
-        env:
-          PACKAGE_NAME: ${{ matrix.pkg-name }}
         run: |
           pip --version
           pip install -e . -U -r requirements/${{ env.REQ_DIR }}/docs.txt -f pypi -f ${TORCH_URL}
@@ -148,13 +138,11 @@ jobs:
 
       - name: Make Documentation
         working-directory: ./docs/${{ env.DOCS_DIR }}
-        run: |
-          make html --debug --jobs $(nproc) SPHINXOPTS="-W --keep-going"
+        run: make html --debug --jobs $(nproc) SPHINXOPTS="-W --keep-going"
 
       - name: Check External Links in Sphinx Documentation (Optional)
         working-directory: ./docs/${{ env.DOCS_DIR }}
-        run: |
-          make linkcheck
+        run: make linkcheck
         continue-on-error: true
 
       - name: Upload built docs
@@ -162,5 +150,3 @@ jobs:
         with:
           name: docs-${{ matrix.pkg-name }}-${{ github.sha }}
           path: docs/build/html/
-        # Use always() to always run this step to publish test results when there are test failuress
-        if: success()

From 98a7dae8f87983e0dc437b37d6f6e568fce2736f Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 08:47:26 +0100
Subject: [PATCH 3/9] fabric

---
 docs/source-pytorch/conf.py                      |  2 +-
 .../advanced/distributed_communication.rst       |  2 +-
 .../fabric/advanced/gradient_accumulation.rst    |  2 +-
 docs/source-pytorch/fabric/api/api_reference.rst | 16 ++++++++--------
 .../source-pytorch/fabric/api/fabric_methods.rst | 16 ++++++++--------
 docs/source-pytorch/fabric/api/utilities.rst     |  2 +-
 .../fabric/fundamentals/convert.rst              | 10 +++++-----
 .../fabric/fundamentals/launch.rst               |  2 +-
 .../fabric/fundamentals/notebooks.rst            |  2 +-
 .../fabric/fundamentals/precision.rst            |  2 +-
 docs/source-pytorch/fabric/guide/callbacks.rst   |  2 +-
 docs/source-pytorch/fabric/guide/checkpoint.rst  |  4 ++--
 docs/source-pytorch/fabric/guide/logging.rst     | 10 +++++-----
 .../fabric/guide/multi_node/slurm.rst            |  2 +-
 14 files changed, 37 insertions(+), 37 deletions(-)

diff --git a/docs/source-pytorch/conf.py b/docs/source-pytorch/conf.py
index f29d5e9a03f06..f089fa46425e8 100644
--- a/docs/source-pytorch/conf.py
+++ b/docs/source-pytorch/conf.py
@@ -399,7 +399,7 @@ def package_list_from_file(file):
 from lightning.pytorch.utilities import (
     _TORCHVISION_AVAILABLE,
 )
-from lightning_fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE
+from lightning.fabric.loggers.tensorboard import _TENSORBOARD_AVAILABLE, _TENSORBOARDX_AVAILABLE
 from lightning.pytorch.loggers.neptune import _NEPTUNE_AVAILABLE
 from lightning.pytorch.loggers.comet import _COMET_AVAILABLE
 from lightning.pytorch.loggers.mlflow import _MLFLOW_AVAILABLE
diff --git a/docs/source-pytorch/fabric/advanced/distributed_communication.rst b/docs/source-pytorch/fabric/advanced/distributed_communication.rst
index 759e85b5094a7..5bbff312ea341 100644
--- a/docs/source-pytorch/fabric/advanced/distributed_communication.rst
+++ b/docs/source-pytorch/fabric/advanced/distributed_communication.rst
@@ -236,7 +236,7 @@ Reduce
 
 The reduction is an operation that takes multiple values (tensors) as input and returns a single value.
 An example of a reduction is *summation*, e.g., ``torch.sum()``.
-The :meth:`~lightning_fabric.fabric.Fabric.all_reduce` operation allows you to apply a reduction across multiple processes:
+The :meth:`~lightning.fabric.fabric.Fabric.all_reduce` operation allows you to apply a reduction across multiple processes:
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/fabric/advanced/gradient_accumulation.rst b/docs/source-pytorch/fabric/advanced/gradient_accumulation.rst
index 2f56f825a09e8..d893e42fd2982 100644
--- a/docs/source-pytorch/fabric/advanced/gradient_accumulation.rst
+++ b/docs/source-pytorch/fabric/advanced/gradient_accumulation.rst
@@ -29,7 +29,7 @@ You are in control of which model accumulates and at what frequency:
 However, in a distributed setting, for example, when training across multiple GPUs or machines, doing it this way can significantly slow down your training loop.
 To optimize this code, we should skip the synchronization in ``.backward()`` during the accumulation phase.
 We only need to synchronize the gradients when the accumulation phase is over!
-This can be achieved by adding the :meth:`~lightning_fabric.fabric.Fabric.no_backward_sync` context manager over the :meth:`~lightning_fabric.fabric.Fabric.backward` call:
+This can be achieved by adding the :meth:`~lightning.fabric.fabric.Fabric.no_backward_sync` context manager over the :meth:`~lightning.fabric.fabric.Fabric.backward` call:
 
 .. code-block:: diff
 
diff --git a/docs/source-pytorch/fabric/api/api_reference.rst b/docs/source-pytorch/fabric/api/api_reference.rst
index 649d3182f1c9b..59cd6441c6047 100644
--- a/docs/source-pytorch/fabric/api/api_reference.rst
+++ b/docs/source-pytorch/fabric/api/api_reference.rst
@@ -10,7 +10,7 @@ API Reference
 Fabric
 ^^^^^^
 
-.. currentmodule:: lightning_fabric.fabric
+.. currentmodule:: lightning.fabric.fabric
 
 .. autosummary::
     :toctree: ../../api
@@ -23,7 +23,7 @@ Fabric
 Accelerators
 ^^^^^^^^^^^^
 
-.. currentmodule:: lightning_fabric.accelerators
+.. currentmodule:: lightning.fabric.accelerators
 
 .. autosummary::
     :toctree: ../../api
@@ -40,7 +40,7 @@ Accelerators
 Loggers
 ^^^^^^^
 
-.. currentmodule:: lightning_fabric.loggers
+.. currentmodule:: lightning.fabric.loggers
 
 .. autosummary::
     :toctree: ../../api
@@ -60,7 +60,7 @@ Precision
 
 .. TODO(fabric): include DeepSpeedPrecision
 
-.. currentmodule:: lightning_fabric.plugins.precision
+.. currentmodule:: lightning.fabric.plugins.precision
 
 .. autosummary::
     :toctree: ../../api
@@ -78,7 +78,7 @@ Precision
 Environments
 """"""""""""
 
-.. currentmodule:: lightning_fabric.plugins.environments
+.. currentmodule:: lightning.fabric.plugins.environments
 
 .. autosummary::
     :toctree: ../../api
@@ -98,7 +98,7 @@ Environments
 IO
 ""
 
-.. currentmodule:: lightning_fabric.plugins.io
+.. currentmodule:: lightning.fabric.plugins.io
 
 .. autosummary::
     :toctree: ../../api
@@ -113,7 +113,7 @@ IO
 Collectives
 """""""""""
 
-.. currentmodule:: lightning_fabric.plugins.collectives
+.. currentmodule:: lightning.fabric.plugins.collectives
 
 .. autosummary::
     :toctree: ../../api
@@ -130,7 +130,7 @@ Strategies
 
 .. TODO(fabric): include DeepSpeedStrategy, XLAStrategy
 
-.. currentmodule:: lightning_fabric.strategies
+.. currentmodule:: lightning.fabric.strategies
 
 .. autosummary::
     :toctree: ../../api
diff --git a/docs/source-pytorch/fabric/api/fabric_methods.rst b/docs/source-pytorch/fabric/api/fabric_methods.rst
index d1ec33e7d22af..650e525be43d8 100644
--- a/docs/source-pytorch/fabric/api/fabric_methods.rst
+++ b/docs/source-pytorch/fabric/api/fabric_methods.rst
@@ -64,9 +64,9 @@ This replaces any occurrences of ``loss.backward()`` and makes your code acceler
 to_device
 =========
 
-Use :meth:`~lightning_fabric.fabric.Fabric.to_device` to move models, tensors, or collections of tensors to
-the current device. By default :meth:`~lightning_fabric.fabric.Fabric.setup` and
-:meth:`~lightning_fabric.fabric.Fabric.setup_dataloaders` already move the model and data to the correct
+Use :meth:`~lightning.fabric.fabric.Fabric.to_device` to move models, tensors, or collections of tensors to
+the current device. By default :meth:`~lightning.fabric.fabric.Fabric.setup` and
+:meth:`~lightning.fabric.fabric.Fabric.setup_dataloaders` already move the model and data to the correct
 device, so calling this method is only necessary for manual operation when needed.
 
 .. code-block:: python
@@ -94,7 +94,7 @@ autocast
 ========
 
 Let the precision backend autocast the block of code under this context manager. This is optional and already done by
-Fabric for the model's forward method (once the model was :meth:`~lightning_fabric.fabric.Fabric.setup`).
+Fabric for the model's forward method (once the model was :meth:`~lightning.fabric.fabric.Fabric.setup`).
 You need this only if you wish to autocast more operations outside the ones in model forward:
 
 .. code-block:: python
@@ -206,11 +206,11 @@ all_gather, all_reduce, broadcast
 =================================
 
 You can send tensors and other data between processes using collective operations.
-The three most common ones, :meth:`~lightning_fabric.fabric.Fabric.broadcast`, :meth:`~lightning_fabric.fabric.Fabric.all_gather` and :meth:`~lightning_fabric.fabric.Fabric.all_reduce` are available directly on the Fabric object for convenience:
+The three most common ones, :meth:`~lightning.fabric.fabric.Fabric.broadcast`, :meth:`~lightning.fabric.fabric.Fabric.all_gather` and :meth:`~lightning.fabric.fabric.Fabric.all_reduce` are available directly on the Fabric object for convenience:
 
-- :meth:`~lightning_fabric.fabric.Fabric.broadcast`: Send a tensor from one process to all others.
-- :meth:`~lightning_fabric.fabric.Fabric.all_gather`: Gather tensors from every process and stack them.
-- :meth:`~lightning_fabric.fabric.Fabric.all_reduce`: Apply a reduction function on tensors across processes (sum, mean, etc.).
+- :meth:`~lightning.fabric.fabric.Fabric.broadcast`: Send a tensor from one process to all others.
+- :meth:`~lightning.fabric.fabric.Fabric.all_gather`: Gather tensors from every process and stack them.
+- :meth:`~lightning.fabric.fabric.Fabric.all_reduce`: Apply a reduction function on tensors across processes (sum, mean, etc.).
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/fabric/api/utilities.rst b/docs/source-pytorch/fabric/api/utilities.rst
index 4a114a6389e61..978f7599096c1 100644
--- a/docs/source-pytorch/fabric/api/utilities.rst
+++ b/docs/source-pytorch/fabric/api/utilities.rst
@@ -61,7 +61,7 @@ By default, ``seed_everything`` also handles the initialization of the seed in :
 print
 =====
 
-Avoid duplicated print statements in the logs in distributed training by using Fabric's :meth:`~lightning_fabric.fabric.Fabric.print` method:
+Avoid duplicated print statements in the logs in distributed training by using Fabric's :meth:`~lightning.fabric.fabric.Fabric.print` method:
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/fabric/fundamentals/convert.rst b/docs/source-pytorch/fabric/fundamentals/convert.rst
index a754337ff09e5..406326a5c1a0b 100644
--- a/docs/source-pytorch/fabric/fundamentals/convert.rst
+++ b/docs/source-pytorch/fabric/fundamentals/convert.rst
@@ -4,9 +4,9 @@
 Convert PyTorch code to Fabric
 ##############################
 
-Here are five easy steps to let :class:`~lightning_fabric.fabric.Fabric` scale your PyTorch models.
+Here are five easy steps to let :class:`~lightning.fabric.fabric.Fabric` scale your PyTorch models.
 
-**Step 1:** Create the :class:`~lightning_fabric.fabric.Fabric` object at the beginning of your training code.
+**Step 1:** Create the :class:`~lightning.fabric.fabric.Fabric` object at the beginning of your training code.
 
 .. code-block:: python
 
@@ -14,14 +14,14 @@ Here are five easy steps to let :class:`~lightning_fabric.fabric.Fabric` scale y
 
     fabric = Fabric()
 
-**Step 2:** Call :meth:`~lightning_fabric.fabric.Fabric.setup` on each model and optimizer pair and :meth:`~lightning_fabric.fabric.Fabric.setup_dataloaders` on all your data loaders.
+**Step 2:** Call :meth:`~lightning.fabric.fabric.Fabric.setup` on each model and optimizer pair and :meth:`~lightning.fabric.fabric.Fabric.setup_dataloaders` on all your data loaders.
 
 .. code-block:: python
 
     model, optimizer = fabric.setup(model, optimizer)
     dataloader = fabric.setup_dataloaders(dataloader)
 
-**Step 3:** Remove all ``.to`` and ``.cuda`` calls since :class:`~lightning_fabric.fabric.Fabric` will take care of it.
+**Step 3:** Remove all ``.to`` and ``.cuda`` calls since :class:`~lightning.fabric.fabric.Fabric` will take care of it.
 
 .. code-block:: diff
 
@@ -41,7 +41,7 @@ Here are five easy steps to let :class:`~lightning_fabric.fabric.Fabric` scale y
 
     lightning run model path/to/train.py
 
-or use the :meth:`~lightning_fabric.fabric.Fabric.launch` method in a notebook.
+or use the :meth:`~lightning.fabric.fabric.Fabric.launch` method in a notebook.
 Learn more about :doc:`launching distributed training `.
 
 |
diff --git a/docs/source-pytorch/fabric/fundamentals/launch.rst b/docs/source-pytorch/fabric/fundamentals/launch.rst
index af766c56e4a0c..4afc67424481b 100644
--- a/docs/source-pytorch/fabric/fundamentals/launch.rst
+++ b/docs/source-pytorch/fabric/fundamentals/launch.rst
@@ -98,7 +98,7 @@ Or `DeepSpeed Zero3 `_ w
         --accelerator=cuda \
         --precision=16
 
-:class:`~lightning_fabric.fabric.Fabric` can also figure it out automatically for you!
+:class:`~lightning.fabric.fabric.Fabric` can also figure it out automatically for you!
 
 .. code-block:: bash
 
diff --git a/docs/source-pytorch/fabric/fundamentals/notebooks.rst b/docs/source-pytorch/fabric/fundamentals/notebooks.rst
index e5c314eab4b06..0cbb0537274ba 100644
--- a/docs/source-pytorch/fabric/fundamentals/notebooks.rst
+++ b/docs/source-pytorch/fabric/fundamentals/notebooks.rst
@@ -8,7 +8,7 @@ Fabric in Notebooks
 
 Fabric works the same way in notebooks (Jupyter, Google Colab, Kaggle, etc.) if you only run in a single process or GPU.
 If you want to use multiprocessing, for example, multi-GPU, you can put your code in a function and pass that function to the
-:meth:`~lightning_fabric.fabric.Fabric.launch` method:
+:meth:`~lightning.fabric.fabric.Fabric.launch` method:
 
 
 .. code-block:: python
diff --git a/docs/source-pytorch/fabric/fundamentals/precision.rst b/docs/source-pytorch/fabric/fundamentals/precision.rst
index 5d24b41ba4e54..24a1b33a2156b 100644
--- a/docs/source-pytorch/fabric/fundamentals/precision.rst
+++ b/docs/source-pytorch/fabric/fundamentals/precision.rst
@@ -137,7 +137,7 @@ Fabric automatically casts the data type and operations in the ``forward`` of yo
     # Precision does NOT get applied here (only in forward)
     loss = loss_function(output, target)
 
-If you want to enable operations in lower bit-precision **outside** your model's ``forward()``, you can use the :meth:`~lightning_fabric.fabric.Fabric.autocast` context manager:
+If you want to enable operations in lower bit-precision **outside** your model's ``forward()``, you can use the :meth:`~lightning.fabric.fabric.Fabric.autocast` context manager:
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/fabric/guide/callbacks.rst b/docs/source-pytorch/fabric/guide/callbacks.rst
index 6c5eebedce0c2..5985ebbd59c62 100644
--- a/docs/source-pytorch/fabric/guide/callbacks.rst
+++ b/docs/source-pytorch/fabric/guide/callbacks.rst
@@ -81,7 +81,7 @@ You can pass a list to Fabric:
     callback2.any_callback_method(arg1=..., arg2=...)
 
 
-The :meth:`~lightning_fabric.fabric.Fabric.call` calls the callback objects in the order they were given to Fabric.
+The :meth:`~lightning.fabric.fabric.Fabric.call` calls the callback objects in the order they were given to Fabric.
 Not all objects registered via ``Fabric(callbacks=...)`` must implement a method with the given name.
 The ones that have a matching method name will get called.
 
diff --git a/docs/source-pytorch/fabric/guide/checkpoint.rst b/docs/source-pytorch/fabric/guide/checkpoint.rst
index b439c318d0a34..40c4eca0bebbc 100644
--- a/docs/source-pytorch/fabric/guide/checkpoint.rst
+++ b/docs/source-pytorch/fabric/guide/checkpoint.rst
@@ -29,7 +29,7 @@ Put everything into a dictionary, including models and optimizers and whatever m
 Save a checkpoint
 *****************
 
-To save the state to the filesystem, pass it to the :meth:`~lightning_fabric.fabric.Fabric.save` method:
+To save the state to the filesystem, pass it to the :meth:`~lightning.fabric.fabric.Fabric.save` method:
 
 .. code-block:: python
 
@@ -47,7 +47,7 @@ For example, ``strategy="ddp"`` saves a single file on rank 0, while ``strategy=
 Restore from a checkpoint
 *************************
 
-You can restore the state by loading a saved checkpoint back with :meth:`~lightning_fabric.fabric.Fabric.load`:
+You can restore the state by loading a saved checkpoint back with :meth:`~lightning.fabric.fabric.Fabric.load`:
 
 .. code-block:: python
 
diff --git a/docs/source-pytorch/fabric/guide/logging.rst b/docs/source-pytorch/fabric/guide/logging.rst
index b63ec97be63b5..6bb9902f8a8ed 100644
--- a/docs/source-pytorch/fabric/guide/logging.rst
+++ b/docs/source-pytorch/fabric/guide/logging.rst
@@ -36,12 +36,12 @@ To track a metric, add the following:
 
 Built-in loggers you can choose from:
 
-- :class:`~lightning_fabric.loggers.TensorBoardLogger`
-- :class:`~lightning_fabric.loggers.CSVLogger`
+- :class:`~lightning.fabric.loggers.TensorBoardLogger`
+- :class:`~lightning.fabric.loggers.CSVLogger`
 
 |
 
-**Step 2:** Add :meth:`~lightning_fabric.fabric.Fabric.log` calls in your code.
+**Step 2:** Add :meth:`~lightning.fabric.fabric.Fabric.log` calls in your code.
 
 .. code-block:: python
 
@@ -49,7 +49,7 @@ Built-in loggers you can choose from:
     fabric.log("some_value", value)
 
 
-To log multiple metrics at once, use :meth:`~lightning_fabric.fabric.Fabric.log_dict`:
+To log multiple metrics at once, use :meth:`~lightning.fabric.fabric.Fabric.log_dict`:
 
 .. code-block:: python
 
@@ -67,7 +67,7 @@ View logs dashboard
 How you can view the metrics depends on the individual logger you choose.
 Most have a dashboard that lets you browse everything you log in real time.
 
-For the :class:`~lightning_fabric.loggers.tensorboard.TensorBoardLogger` shown above, you can open it by running
+For the :class:`~lightning.fabric.loggers.tensorboard.TensorBoardLogger` shown above, you can open it by running
 
 .. code-block:: bash
 
diff --git a/docs/source-pytorch/fabric/guide/multi_node/slurm.rst b/docs/source-pytorch/fabric/guide/multi_node/slurm.rst
index e972d143461f9..11ce2e3eabbbb 100644
--- a/docs/source-pytorch/fabric/guide/multi_node/slurm.rst
+++ b/docs/source-pytorch/fabric/guide/multi_node/slurm.rst
@@ -41,7 +41,7 @@ Optionally, explore other strategies too:
     fabric = Fabric(accelerator="gpu", devices=8, num_nodes=4, strategy="fsdp")
 
 
-**Step 2:** Call :meth:`~lightning_fabric.fabric.Fabric.launch` to initialize the communication between devices and nodes.
+**Step 2:** Call :meth:`~lightning.fabric.fabric.Fabric.launch` to initialize the communication between devices and nodes.
 
 .. code-block:: python
 

From efc3528bccb54c267e62fab829be3080b2ee1e04 Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 09:14:03 +0100
Subject: [PATCH 4/9] trigger

---
 .github/workflows/docs-checks.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml
index 563118b1e052a..9b74285045fdb 100644
--- a/.github/workflows/docs-checks.yml
+++ b/.github/workflows/docs-checks.yml
@@ -6,7 +6,7 @@ on:
     branches: ["master", "release/*"]
   # use this event type to share secrets with forks.
   # it's important that the PR head SHA is checked out to run the changes
-  pull_request_target:
+  pull_request:  # fixme: pull_request_target
     branches: ["master", "release/*"]
     paths:
       - ".actions/**"

From 086ce9832ba4a0cb74ab1ee87f2a56ef623a1902 Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 13:31:53 +0100
Subject: [PATCH 5/9] links

---
 docs/source-pytorch/accelerators/tpu_faq.rst         | 2 +-
 docs/source-pytorch/advanced/training_tricks.rst     | 2 +-
 docs/source-pytorch/ecosystem/community_examples.rst | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/source-pytorch/accelerators/tpu_faq.rst b/docs/source-pytorch/accelerators/tpu_faq.rst
index 334167fa4462f..5297f5925e301 100644
--- a/docs/source-pytorch/accelerators/tpu_faq.rst
+++ b/docs/source-pytorch/accelerators/tpu_faq.rst
@@ -16,7 +16,7 @@ XLA configuration is missing?
     RuntimeError: tensorflow/compiler/xla/xla_client/computation_client.cc:273 : Missing XLA configuration
     Traceback (most recent call last):
     ...
-    File "/home/kaushikbokka/pytorch-lightning/lightning.pytorch/utilities/device_parser.py", line 125, in parse_tpu_cores
+    File "/home/kaushikbokka/pytorch-lightning/pytorch_lightning/utilities/device_parser.py", line 125, in parse_tpu_cores
         raise MisconfigurationException('No TPU devices were found.')
     lightning.pytorch.utilities.exceptions.MisconfigurationException: No TPU devices were found.
 
diff --git a/docs/source-pytorch/advanced/training_tricks.rst b/docs/source-pytorch/advanced/training_tricks.rst
index fdba0da62e860..16ec8bebc17df 100644
--- a/docs/source-pytorch/advanced/training_tricks.rst
+++ b/docs/source-pytorch/advanced/training_tricks.rst
@@ -429,4 +429,4 @@ For this, all data pre-loading should be done on the main process inside :meth:`
     trainer = Trainer(accelerator="gpu", devices=2, strategy="ddp_spawn")
     trainer.fit(model, datamodule)
 
-See the `graph-level `_ and `node-level `_ prediction examples in PyTorch Geometric for practical use-cases.
+See the `graph-level `_ and `node-level `_ prediction examples in PyTorch Geometric for practical use-cases.
diff --git a/docs/source-pytorch/ecosystem/community_examples.rst b/docs/source-pytorch/ecosystem/community_examples.rst
index 4e500c7a4dede..4e0c30b71bb16 100644
--- a/docs/source-pytorch/ecosystem/community_examples.rst
+++ b/docs/source-pytorch/ecosystem/community_examples.rst
@@ -32,5 +32,5 @@ Community Examples
 PyTorch Ecosystem Examples
 ==========================
 
-- `PyTorch Geometric: Deep learning on graphs and other irregular structures `_.
+- `PyTorch Geometric: Deep learning on graphs and other irregular structures `_.
 - `TorchIO, MONAI and Lightning for 3D medical image segmentation `_.

From 5988045d9705db476a7ce1836aecd7fcb9addfc4 Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 13:40:50 +0100
Subject: [PATCH 6/9] .

---
 docs/source-pytorch/ecosystem/community_examples.rst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/source-pytorch/ecosystem/community_examples.rst b/docs/source-pytorch/ecosystem/community_examples.rst
index 4e0c30b71bb16..2ff66b5ce0cfd 100644
--- a/docs/source-pytorch/ecosystem/community_examples.rst
+++ b/docs/source-pytorch/ecosystem/community_examples.rst
@@ -15,7 +15,7 @@ Community Examples
 - `NeuralTexture (CVPR) `_
 - `Recurrent Attentive Neural Process `_
 - `Siamese Nets for One-shot Image Recognition `_
-- `Speech Transformers `_
+- `Speech Transformers `_
 - `Transformers transfer learning (Huggingface) `_
 - `Transformers text classification `_
 - `VAE Library of over 18+ VAE flavors `_

From 2c97cdadebad739d53b00a956e501afaf91f9156 Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Fri, 24 Feb 2023 13:42:59 +0100
Subject: [PATCH 7/9] docstring

---
 src/lightning/pytorch/core/hooks.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/lightning/pytorch/core/hooks.py b/src/lightning/pytorch/core/hooks.py
index d81283ee3c5c7..c01653bdb74d3 100644
--- a/src/lightning/pytorch/core/hooks.py
+++ b/src/lightning/pytorch/core/hooks.py
@@ -170,7 +170,7 @@ def on_train_epoch_end(self) -> None:
         """Called in the training loop at the very end of the epoch.
 
         To access all batch outputs at the end of the epoch, you can cache step outputs as an attribute of the
-        :class:`pytorch_lightning.LightningModule` and access them in this hook:
+        :class:`~lightning.pytorch.LightningModule` and access them in this hook:
 
         .. code-block:: python
 

From 2ef3c0bf9ce0ffad18085048df7b33b0bb68eaab Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Mon, 27 Feb 2023 18:17:29 +0100
Subject: [PATCH 8/9] chlog

---
 docs/source-pytorch/conf.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/source-pytorch/conf.py b/docs/source-pytorch/conf.py
index f089fa46425e8..d1909c005f7aa 100644
--- a/docs/source-pytorch/conf.py
+++ b/docs/source-pytorch/conf.py
@@ -74,7 +74,7 @@ def _transform_changelog(path_in: str, path_out: str) -> None:
     shutil.copy(md, os.path.join(PATH_HERE, FOLDER_GENERATED, os.path.basename(md)))
 # copy also the changelog
 _transform_changelog(
-    os.path.join(PATH_ROOT, "src", "pytorch_lightning", "CHANGELOG.md"),
+    os.path.join(PATH_ROOT, "src", "lightning", "pytorch", "CHANGELOG.md"),
     os.path.join(PATH_HERE, FOLDER_GENERATED, "CHANGELOG.md"),
 )
 

From 409313e5fae1f360ce4c43d66cbb3fcd3c95608a Mon Sep 17 00:00:00 2001
From: Jirka 
Date: Mon, 27 Feb 2023 21:11:07 +0100
Subject: [PATCH 9/9] cleaning

---
 .github/workflows/ci-dockers-pytorch.yml | 2 +-
 .github/workflows/docs-checks.yml        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/ci-dockers-pytorch.yml b/.github/workflows/ci-dockers-pytorch.yml
index a4992589bdbd3..5d04f23c685fc 100644
--- a/.github/workflows/ci-dockers-pytorch.yml
+++ b/.github/workflows/ci-dockers-pytorch.yml
@@ -230,7 +230,7 @@ jobs:
         timeout-minutes: 55
 
   build-docs:
-    # if: github.event.pull_request.draft == false  # fixme
+    if: github.event.pull_request.draft == false
     runs-on: ubuntu-20.04
     steps:
       - uses: actions/checkout@v3
diff --git a/.github/workflows/docs-checks.yml b/.github/workflows/docs-checks.yml
index 9b74285045fdb..563118b1e052a 100644
--- a/.github/workflows/docs-checks.yml
+++ b/.github/workflows/docs-checks.yml
@@ -6,7 +6,7 @@ on:
     branches: ["master", "release/*"]
   # use this event type to share secrets with forks.
   # it's important that the PR head SHA is checked out to run the changes
-  pull_request:  # fixme: pull_request_target
+  pull_request_target:
     branches: ["master", "release/*"]
     paths:
       - ".actions/**"