From e15e49cf3100c89a171db42a55d81547ec7814dc Mon Sep 17 00:00:00 2001 From: Karthik Prasad Date: Mon, 5 Apr 2021 00:58:03 -0700 Subject: [PATCH 1/3] sanitize none params during pruning --- pytorch_lightning/callbacks/pruning.py | 5 ++++- tests/callbacks/test_pruning.py | 19 +++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/pytorch_lightning/callbacks/pruning.py b/pytorch_lightning/callbacks/pruning.py index 3f82ab3565403..b9f1214291e72 100644 --- a/pytorch_lightning/callbacks/pruning.py +++ b/pytorch_lightning/callbacks/pruning.py @@ -422,7 +422,10 @@ def sanitize_parameters_to_prune( current_modules = [m for m in pl_module.modules() if not isinstance(m, _MODULE_CONTAINERS)] if parameters_to_prune is None: - parameters_to_prune = [(m, p) for p in parameters for m in current_modules if hasattr(m, p)] + parameters_to_prune = [ + (m, p) for p in parameters for m in current_modules + if hasattr(m, p) and getattr(m, p) is not None + ] elif ( isinstance(parameters_to_prune, (list, tuple)) and len(parameters_to_prune) > 0 and all(len(p) == 2 for p in parameters_to_prune) diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index e42689a25d8aa..1e39cc9c330b0 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -36,7 +36,7 @@ def __init__(self): self.layer = Sequential( OrderedDict([ ("mlp_1", nn.Linear(32, 32)), - ("mlp_2", nn.Linear(32, 32)), + ("mlp_2", nn.Linear(32, 32, bias=False)), ("mlp_3", nn.Linear(32, 2)), ]) ) @@ -85,7 +85,10 @@ def train_with_pruning_callback( if parameters_to_prune: pruning_kwargs["parameters_to_prune"] = [(model.layer.mlp_1, "weight"), (model.layer.mlp_2, "weight")] else: - pruning_kwargs["parameter_names"] = ["weight"] + if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"): + pruning_kwargs["parameter_names"] = ["weight"] + else: + pruning_kwargs["parameter_names"] = ["weight", "bias"] if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"): pruning_kwargs["pruning_dim"] = 0 if pruning_fn == "ln_structured": @@ -249,14 +252,14 @@ def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool actual = [m for m in actual if m.startswith("Applied")] assert actual == [ "Applied `L1Unstructured`. Pruned: 0/1122 (0.00%) -> 544/1122 (48.48%)", - "Applied `L1Unstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.5. Pruned: 0 (0.00%) -> 506 (49.41%)", # noqa: E501 - "Applied `L1Unstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.5. Pruned: 0 (0.00%) -> 38 (59.38%)", # noqa: E501 + "Applied `L1Unstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.5. Pruned: 0 (0.00%) -> 500 (48.83%)", # noqa: E501 + "Applied `L1Unstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.5. Pruned: 0 (0.00%) -> 44 (68.75%)", # noqa: E501 "Applied `RandomUnstructured`. Pruned: 544/1122 (48.48%) -> 680/1122 (60.61%)", - "Applied `RandomUnstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.25. Pruned: 506 (49.41%) -> 633 (61.82%)", # noqa: E501 - "Applied `RandomUnstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.25. Pruned: 38 (59.38%) -> 47 (73.44%)", # noqa: E501 + "Applied `RandomUnstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.25. Pruned: 500 (48.83%) -> 635 (62.01%)", # noqa: E501 + "Applied `RandomUnstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.25. Pruned: 44 (68.75%) -> 45 (70.31%)", # noqa: E501 "Applied `L1Unstructured`. Pruned: 680/1122 (60.61%) -> 884/1122 (78.79%)", - "Applied `L1Unstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.5. Pruned: 633 (61.82%) -> 828 (80.86%)", # noqa: E501 - "Applied `L1Unstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.5. Pruned: 47 (73.44%) -> 56 (87.50%)", # noqa: E501 + "Applied `L1Unstructured` to `Linear(in_features=32, out_features=32, bias=True).weight` with amount=0.5. Pruned: 635 (62.01%) -> 830 (81.05%)", # noqa: E501 + "Applied `L1Unstructured` to `Linear(in_features=32, out_features=2, bias=True).weight` with amount=0.5. Pruned: 45 (70.31%) -> 54 (84.38%)", # noqa: E501 ] filepath = str(tmpdir / "foo.ckpt") From 8fcfe8a87f2ac7c6c87db448f85f85efa0851a0c Mon Sep 17 00:00:00 2001 From: Karthik Prasad Date: Mon, 5 Apr 2021 09:27:25 -0700 Subject: [PATCH 2/3] amend --- CHANGELOG.md | 3 +++ pytorch_lightning/callbacks/pruning.py | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 81846809fbf85..39883adab05d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,6 +170,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed +- Sanitze `None` params during pruning ([#6836])(https://github.com/PyTorchLightning/pytorch-lightning/pull/6836) + + - Made the `Plugin.reduce` method more consistent across all Plugins to reflect a mean-reduction by default ([#6011](https://github.com/PyTorchLightning/pytorch-lightning/pull/6011)) diff --git a/pytorch_lightning/callbacks/pruning.py b/pytorch_lightning/callbacks/pruning.py index b9f1214291e72..36622af0edaff 100644 --- a/pytorch_lightning/callbacks/pruning.py +++ b/pytorch_lightning/callbacks/pruning.py @@ -423,8 +423,7 @@ def sanitize_parameters_to_prune( if parameters_to_prune is None: parameters_to_prune = [ - (m, p) for p in parameters for m in current_modules - if hasattr(m, p) and getattr(m, p) is not None + (m, p) for p in parameters for m in current_modules if getattr(m, p, None) is not None ] elif ( isinstance(parameters_to_prune, (list, tuple)) and len(parameters_to_prune) > 0 From 07b442dcde3c43719f6d1890335c41fe853087e0 Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Tue, 6 Apr 2021 00:14:49 +0530 Subject: [PATCH 3/3] Apply suggestions from code review --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39883adab05d8..bf47285ba7923 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,7 +170,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Fixed -- Sanitze `None` params during pruning ([#6836])(https://github.com/PyTorchLightning/pytorch-lightning/pull/6836) +- Sanitize `None` params during pruning ([#6836](https://github.com/PyTorchLightning/pytorch-lightning/pull/6836)) - Made the `Plugin.reduce` method more consistent across all Plugins to reflect a mean-reduction by default ([#6011](https://github.com/PyTorchLightning/pytorch-lightning/pull/6011))