Skip to content

Commit

Permalink
Merge branch 'master' into bugfix/top_k_ignore_index
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda authored Mar 6, 2023
2 parents f69e449 + a5df1c6 commit 413602f
Show file tree
Hide file tree
Showing 90 changed files with 576 additions and 37 deletions.
8 changes: 0 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,6 @@ exclude = [
ignore-init-module-imports = true
unfixable = ["F401"]

[tool.ruff.per-file-ignores]
"setup.py" = ["D100", "SIM115"]
"__about__.py" = ["D100"]
"__init__.py" = ["D100"]
"tests/**" = [
"D102", # Missing docstring in public method
]

[tool.ruff.pydocstyle]
# Use Google-style docstrings.
convention = "google"
Expand Down
15 changes: 14 additions & 1 deletion tests/integrations/lightning/boring_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,61 +71,74 @@ def __init__(self):
self.layer = torch.nn.Linear(32, 2)

def forward(self, x):
"""Forward pass of x through model."""
return self.layer(x)

@staticmethod
def loss(_, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
"""Arbitrary loss."""
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))

def step(self, x):
"""Single step in model."""
x = self(x)
return torch.nn.functional.mse_loss(x, torch.ones_like(x))

def training_step(self, batch, batch_idx):
"""Single training step in model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}

@staticmethod
def training_step_end(training_step_outputs):
"""Run at the end of a training step. Needed when using multiple devices."""
return training_step_outputs

@staticmethod
def training_epoch_end(outputs) -> None:
"""Run at the end of a training epoch."""
torch.stack([x["loss"] for x in outputs]).mean()

def validation_step(self, batch, batch_idx):
"""Single validation step in the model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}

@staticmethod
def validation_epoch_end(outputs) -> None:
"""Run at the end of each validation epoch."""
torch.stack([x["x"] for x in outputs]).mean()

def test_step(self, batch, batch_idx):
"""Single test step in the model."""
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}

@staticmethod
def test_epoch_end(outputs) -> None:
"""Run at the end of each test epoch."""
torch.stack([x["y"] for x in outputs]).mean()

def configure_optimizers(self):
"""Configure which optimizer to use when training the model."""
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]

@staticmethod
def train_dataloader():
"""Define train dataloader used for training the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))

@staticmethod
def val_dataloader():
"""Define validation dataloader used for validating the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))

@staticmethod
def test_dataloader():
"""Define test dataloader used for testing the mdoel."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
1 change: 1 addition & 0 deletions tests/integrations/test_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ class DiffMetric(SumMetric):
"""DiffMetric inheritted from `SumMetric` by overidding its `update` method."""

def update(self, value):
"""Update state."""
super().update(-value)


Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_pesq.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ class TestPESQ(MetricTester):
@pytest.mark.parametrize("num_processes", [1, 2])
@pytest.mark.parametrize("ddp", [True, False])
def test_pesq(self, preds, target, ref_metric, fs, mode, num_processes, ddp):
"""Test class implementation of metric."""
if num_processes != 1 and ddp:
pytest.skip("Multiprocessing and ddp does not work together")
self.run_class_metric_test(
Expand All @@ -95,6 +96,7 @@ def test_pesq(self, preds, target, ref_metric, fs, mode, num_processes, ddp):

@pytest.mark.parametrize("num_processes", [1, 2])
def test_pesq_functional(self, preds, target, ref_metric, fs, mode, num_processes):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -104,6 +106,7 @@ def test_pesq_functional(self, preds, target, ref_metric, fs, mode, num_processe
)

def test_pesq_differentiability(self, preds, target, ref_metric, fs, mode):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -113,10 +116,12 @@ def test_pesq_differentiability(self, preds, target, ref_metric, fs, mode):
)

def test_pesq_half_cpu(self, preds, target, ref_metric, fs, mode):
"""Test dtype support of the metric on CPU."""
pytest.xfail("PESQ metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pesq_half_gpu(self, preds, target, ref_metric, fs, mode):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
6 changes: 6 additions & 0 deletions tests/unittests/audio/test_pit.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ class TestPIT(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_pit(self, preds, target, ref_metric, metric_func, eval_func, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -132,6 +133,7 @@ def test_pit(self, preds, target, ref_metric, metric_func, eval_func, ddp):
)

def test_pit_functional(self, preds, target, ref_metric, metric_func, eval_func):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds=preds,
target=target,
Expand All @@ -141,6 +143,8 @@ def test_pit_functional(self, preds, target, ref_metric, metric_func, eval_func)
)

def test_pit_differentiability(self, preds, target, ref_metric, metric_func, eval_func):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""

def pit_diff(preds, target, metric_func, eval_func):
return permutation_invariant_training(preds, target, metric_func, eval_func)[0]

Expand All @@ -153,10 +157,12 @@ def pit_diff(preds, target, metric_func, eval_func):
)

def test_pit_half_cpu(self, preds, target, ref_metric, metric_func, eval_func):
"""Test dtype support of the metric on CPU."""
pytest.xfail("PIT metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pit_half_gpu(self, preds, target, ref_metric, metric_func, eval_func):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_sdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ class TestSDR(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_sdr(self, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -93,6 +94,7 @@ def test_sdr(self, preds, target, ref_metric, ddp):
)

def test_sdr_functional(self, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -102,6 +104,7 @@ def test_sdr_functional(self, preds, target, ref_metric):
)

def test_sdr_differentiability(self, preds, target, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -110,6 +113,7 @@ def test_sdr_differentiability(self, preds, target, ref_metric):
)

def test_sdr_half_cpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on CPU."""
self.run_precision_test_cpu(
preds=preds,
target=target,
Expand All @@ -120,6 +124,7 @@ def test_sdr_half_cpu(self, preds, target, ref_metric):

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_sdr_half_gpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_si_sdr.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ class TestSISDR(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_si_sdr(self, preds, target, ref_metric, zero_mean, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -91,6 +92,7 @@ def test_si_sdr(self, preds, target, ref_metric, zero_mean, ddp):
)

def test_si_sdr_functional(self, preds, target, ref_metric, zero_mean):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -100,6 +102,7 @@ def test_si_sdr_functional(self, preds, target, ref_metric, zero_mean):
)

def test_si_sdr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -109,10 +112,12 @@ def test_si_sdr_differentiability(self, preds, target, ref_metric, zero_mean):
)

def test_si_sdr_half_cpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SI-SDR metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_si_sdr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_si_snr.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ class TestSISNR(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_si_snr(self, preds, target, ref_metric, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -85,6 +86,7 @@ def test_si_snr(self, preds, target, ref_metric, ddp):
)

def test_si_snr_functional(self, preds, target, ref_metric):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -93,6 +95,7 @@ def test_si_snr_functional(self, preds, target, ref_metric):
)

def test_si_snr_differentiability(self, preds, target, ref_metric):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -101,10 +104,12 @@ def test_si_snr_differentiability(self, preds, target, ref_metric):
)

def test_si_snr_half_cpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SI-SNR metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_si_snr_half_gpu(self, preds, target, ref_metric):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_snr.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ class TestSNR(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_snr(self, preds, target, ref_metric, zero_mean, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -91,6 +92,7 @@ def test_snr(self, preds, target, ref_metric, zero_mean, ddp):
)

def test_snr_functional(self, preds, target, ref_metric, zero_mean):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -100,6 +102,7 @@ def test_snr_functional(self, preds, target, ref_metric, zero_mean):
)

def test_snr_differentiability(self, preds, target, ref_metric, zero_mean):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -109,10 +112,12 @@ def test_snr_differentiability(self, preds, target, ref_metric, zero_mean):
)

def test_snr_half_cpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on CPU."""
pytest.xfail("SNR metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_snr_half_gpu(self, preds, target, ref_metric, zero_mean):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
5 changes: 5 additions & 0 deletions tests/unittests/audio/test_stoi.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ class TestSTOI(MetricTester):

@pytest.mark.parametrize("ddp", [True, False])
def test_stoi(self, preds, target, ref_metric, fs, extended, ddp):
"""Test class implementation of metric."""
self.run_class_metric_test(
ddp,
preds,
Expand All @@ -90,6 +91,7 @@ def test_stoi(self, preds, target, ref_metric, fs, extended, ddp):
)

def test_stoi_functional(self, preds, target, ref_metric, fs, extended):
"""Test functional implementation of metric."""
self.run_functional_metric_test(
preds,
target,
Expand All @@ -99,6 +101,7 @@ def test_stoi_functional(self, preds, target, ref_metric, fs, extended):
)

def test_stoi_differentiability(self, preds, target, ref_metric, fs, extended):
"""Test the differentiability of the metric, according to its `is_differentiable` attribute."""
self.run_differentiability_test(
preds=preds,
target=target,
Expand All @@ -108,10 +111,12 @@ def test_stoi_differentiability(self, preds, target, ref_metric, fs, extended):
)

def test_stoi_half_cpu(self, preds, target, ref_metric, fs, extended):
"""Test dtype support of the metric on CPU."""
pytest.xfail("STOI metric does not support cpu + half precision")

@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_stoi_half_gpu(self, preds, target, ref_metric, fs, extended):
"""Test dtype support of the metric on GPU."""
self.run_precision_test_gpu(
preds=preds,
target=target,
Expand Down
2 changes: 2 additions & 0 deletions tests/unittests/bases/test_composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,11 @@ def __init__(self, val_to_return):
self._val_to_return = val_to_return

def update(self, *args, **kwargs) -> None:
"""Compute state."""
self._num_updates += 1

def compute(self):
"""Compute result."""
return tensor(self._val_to_return)


Expand Down
15 changes: 0 additions & 15 deletions tests/unittests/classification/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +0,0 @@
from torchmetrics import Metric


class MetricWrapper(Metric):
"""Metric wrapper."""

def __init__(self, metric):
super().__init__()
self.metric = metric

def update(self, *args, **kwargs):
self.metric.update(*args, **kwargs)

def compute(self, *args, **kwargs):
return self.metric.compute(*args, **kwargs)
Loading

0 comments on commit 413602f

Please sign in to comment.