From 4dc79bd71418e715d2ac2f61095a1994c00acf41 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Tue, 1 Feb 2022 13:20:04 +0100 Subject: [PATCH 01/17] speed --- tests/audio/test_snr.py | 3 ++- tests/helpers/testers.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/audio/test_snr.py b/tests/audio/test_snr.py index 1ab29a63049..e9c1c945fda 100644 --- a/tests/audio/test_snr.py +++ b/tests/audio/test_snr.py @@ -21,7 +21,7 @@ from torch import Tensor from tests.helpers import seed_all -from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester +from tests.helpers.testers import NUM_BATCHES, MetricTester from torchmetrics.audio import SignalNoiseRatio from torchmetrics.functional import signal_noise_ratio from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6 @@ -32,6 +32,7 @@ Input = namedtuple("Input", ["preds", "target"]) +BATCH_SIZE = 2 inputs = Input( preds=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, TIME), target=torch.rand(NUM_BATCHES, BATCH_SIZE, 1, TIME), diff --git a/tests/helpers/testers.py b/tests/helpers/testers.py index 66877b9cd7a..abf6d6dc4fd 100644 --- a/tests/helpers/testers.py +++ b/tests/helpers/testers.py @@ -33,7 +33,7 @@ pass NUM_PROCESSES = 2 -NUM_BATCHES = 10 +NUM_BATCHES = 8 BATCH_SIZE = 32 NUM_CLASSES = 5 EXTRA_DIM = 3 @@ -545,7 +545,7 @@ def run_differentiability_test( metric = metric_module(**metric_args) if preds.is_floating_point(): preds.requires_grad = True - out = metric(preds[0], target[0]) + out = metric(preds[0, 0], target[0, 0]) # Check if requires_grad matches is_differentiable attribute _assert_requires_grad(metric, out) @@ -553,7 +553,7 @@ def run_differentiability_test( if metric.is_differentiable and metric_functional is not None: # check for numerical correctness assert torch.autograd.gradcheck( - partial(metric_functional, **metric_args), (preds[0].double(), target[0]) + partial(metric_functional, **metric_args), (preds[0, 0].double(), target[0, 0]) ) # reset as else it will carry over to other tests From 2269b44582f4865b266fbe0d34f8050036ec3d8e Mon Sep 17 00:00:00 2001 From: Nicki Skafte Detlefsen Date: Thu, 3 Feb 2022 14:48:34 +0100 Subject: [PATCH 02/17] Update tests/helpers/testers.py --- tests/helpers/testers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/testers.py b/tests/helpers/testers.py index abf6d6dc4fd..4962e2414c7 100644 --- a/tests/helpers/testers.py +++ b/tests/helpers/testers.py @@ -33,7 +33,7 @@ pass NUM_PROCESSES = 2 -NUM_BATCHES = 8 +NUM_BATCHES = 8 # Need to be divisible with the number of processes BATCH_SIZE = 32 NUM_CLASSES = 5 EXTRA_DIM = 3 From 1b2f1e7a29e1dd372791bf8e8e95f291edaf1f34 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Thu, 3 Feb 2022 15:23:50 +0100 Subject: [PATCH 03/17] fix tests --- tests/audio/test_pit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/audio/test_pit.py b/tests/audio/test_pit.py index c668f3a77b7..3b683befb43 100644 --- a/tests/audio/test_pit.py +++ b/tests/audio/test_pit.py @@ -145,8 +145,8 @@ def pit_diff(preds, target, metric_func, eval_func): return permutation_invariant_training(preds, target, metric_func, eval_func)[0] self.run_differentiability_test( - preds=preds, - target=target, + preds=preds.unsqueeze(0), + target=target.unsqueeze(0), metric_module=PermutationInvariantTraining, metric_functional=pit_diff, metric_args={"metric_func": metric_func, "eval_func": eval_func}, From cad8f1488ef502db7ca1976524a6bd2e8f25ae8b Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Thu, 3 Feb 2022 15:25:01 +0100 Subject: [PATCH 04/17] changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3eb3f080be4..93d2388bc2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Improve mAP performance ([#742](https://github.com/PyTorchLightning/metrics/pull/742)) +- Improved testing speed ([#820](https://github.com/PyTorchLightning/metrics/pull/820)) + + ## [0.7.0] - 2022-01-17 ### Added From 5831ab386e75b1b59ff6f6fe950e84bf1be611f0 Mon Sep 17 00:00:00 2001 From: Nicki Skafte Detlefsen Date: Fri, 4 Feb 2022 09:52:23 +0100 Subject: [PATCH 05/17] Update tests/helpers/testers.py Co-authored-by: Jirka Borovec --- tests/helpers/testers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/testers.py b/tests/helpers/testers.py index 4962e2414c7..4299b8eea05 100644 --- a/tests/helpers/testers.py +++ b/tests/helpers/testers.py @@ -33,7 +33,7 @@ pass NUM_PROCESSES = 2 -NUM_BATCHES = 8 # Need to be divisible with the number of processes +NUM_BATCHES = 4 # Need to be divisible with the number of processes BATCH_SIZE = 32 NUM_CLASSES = 5 EXTRA_DIM = 3 From 4b1a34cda8f58acd9f412b9ae7374aeeed9636f3 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 10:06:31 +0100 Subject: [PATCH 06/17] requirements --- requirements/image.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/image.txt b/requirements/image.txt index f2604f26706..453dcdd02d2 100644 --- a/requirements/image.txt +++ b/requirements/image.txt @@ -2,3 +2,4 @@ scipy torchvision # this is needed to internally set TV version according installed PT torch-fidelity lpips +PIL==8.4.0 From c2c439a2831527098b0de458d9bff37e9ce18beb Mon Sep 17 00:00:00 2001 From: Nicki Skafte Detlefsen Date: Fri, 4 Feb 2022 10:11:27 +0100 Subject: [PATCH 07/17] Update requirements/image.txt --- requirements/image.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/image.txt b/requirements/image.txt index 453dcdd02d2..a4a09d3ac56 100644 --- a/requirements/image.txt +++ b/requirements/image.txt @@ -2,4 +2,4 @@ scipy torchvision # this is needed to internally set TV version according installed PT torch-fidelity lpips -PIL==8.4.0 +Pillow==8.4.0 From 050e2f6323ab979663d5e2c38bcf3ca20195e09e Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 10:31:54 +0100 Subject: [PATCH 08/17] fix tests --- tests/classification/test_accuracy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/classification/test_accuracy.py b/tests/classification/test_accuracy.py index d8e1cef230a..bb6d7be509b 100644 --- a/tests/classification/test_accuracy.py +++ b/tests/classification/test_accuracy.py @@ -105,8 +105,8 @@ def test_accuracy_fn(self, preds, target, subset_accuracy): def test_accuracy_differentiability(self, preds, target, subset_accuracy): self.run_differentiability_test( - preds=preds, - target=target, + preds=preds.unsqueeze(0), + target=target.unsqueeze(0), metric_module=Accuracy, metric_functional=accuracy, metric_args={"threshold": THRESHOLD, "subset_accuracy": subset_accuracy}, From 85627e5f495992290bea106bfc6e79f8d811baab Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 10:36:40 +0100 Subject: [PATCH 09/17] better solution --- tests/audio/test_pit.py | 4 ++-- tests/classification/test_accuracy.py | 4 ++-- tests/helpers/testers.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/audio/test_pit.py b/tests/audio/test_pit.py index 3b683befb43..c668f3a77b7 100644 --- a/tests/audio/test_pit.py +++ b/tests/audio/test_pit.py @@ -145,8 +145,8 @@ def pit_diff(preds, target, metric_func, eval_func): return permutation_invariant_training(preds, target, metric_func, eval_func)[0] self.run_differentiability_test( - preds=preds.unsqueeze(0), - target=target.unsqueeze(0), + preds=preds, + target=target, metric_module=PermutationInvariantTraining, metric_functional=pit_diff, metric_args={"metric_func": metric_func, "eval_func": eval_func}, diff --git a/tests/classification/test_accuracy.py b/tests/classification/test_accuracy.py index bb6d7be509b..d8e1cef230a 100644 --- a/tests/classification/test_accuracy.py +++ b/tests/classification/test_accuracy.py @@ -105,8 +105,8 @@ def test_accuracy_fn(self, preds, target, subset_accuracy): def test_accuracy_differentiability(self, preds, target, subset_accuracy): self.run_differentiability_test( - preds=preds.unsqueeze(0), - target=target.unsqueeze(0), + preds=preds, + target=target, metric_module=Accuracy, metric_functional=accuracy, metric_args={"threshold": THRESHOLD, "subset_accuracy": subset_accuracy}, diff --git a/tests/helpers/testers.py b/tests/helpers/testers.py index 4299b8eea05..177c7823032 100644 --- a/tests/helpers/testers.py +++ b/tests/helpers/testers.py @@ -545,7 +545,7 @@ def run_differentiability_test( metric = metric_module(**metric_args) if preds.is_floating_point(): preds.requires_grad = True - out = metric(preds[0, 0], target[0, 0]) + out = metric(preds[0, :1], target[0, :1]) # Check if requires_grad matches is_differentiable attribute _assert_requires_grad(metric, out) @@ -553,7 +553,7 @@ def run_differentiability_test( if metric.is_differentiable and metric_functional is not None: # check for numerical correctness assert torch.autograd.gradcheck( - partial(metric_functional, **metric_args), (preds[0, 0].double(), target[0, 0]) + partial(metric_functional, **metric_args), (preds[0, :1].double(), target[0, :1]) ) # reset as else it will carry over to other tests From fc166239f86dd69de1cead32b8d1bfad294da32e Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 11:03:09 +0100 Subject: [PATCH 10/17] fix tests --- tests/classification/test_auroc.py | 11 ++++------- tests/classification/test_average_precision.py | 5 +++-- tests/helpers/testers.py | 4 ++-- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/tests/classification/test_auroc.py b/tests/classification/test_auroc.py index f616c8c341d..8fc0f6fcd6d 100644 --- a/tests/classification/test_auroc.py +++ b/tests/classification/test_auroc.py @@ -86,7 +86,6 @@ def _sk_auroc_multilabel_multidim_prob(preds, target, num_classes, average="macr ) -@pytest.mark.parametrize("average", ["macro", "weighted", "micro"]) @pytest.mark.parametrize("max_fpr", [None, 0.8, 0.5]) @pytest.mark.parametrize( "preds, target, sk_metric, num_classes", @@ -99,6 +98,7 @@ def _sk_auroc_multilabel_multidim_prob(preds, target, num_classes, average="macr ], ) class TestAUROC(MetricTester): + @pytest.mark.parametrize("average", ["macro", "weighted", "micro"]) @pytest.mark.parametrize("ddp", [True, False]) @pytest.mark.parametrize("dist_sync_on_step", [True, False]) def test_auroc(self, preds, target, sk_metric, num_classes, average, max_fpr, ddp, dist_sync_on_step): @@ -124,6 +124,7 @@ def test_auroc(self, preds, target, sk_metric, num_classes, average, max_fpr, dd metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, ) + @pytest.mark.parametrize("average", ["macro", "weighted", "micro"]) def test_auroc_functional(self, preds, target, sk_metric, num_classes, average, max_fpr): # max_fpr different from None is not support in multi class if max_fpr is not None and num_classes != 1: @@ -145,7 +146,7 @@ def test_auroc_functional(self, preds, target, sk_metric, num_classes, average, metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, ) - def test_auroc_differentiability(self, preds, target, sk_metric, num_classes, average, max_fpr): + def test_auroc_differentiability(self, preds, target, sk_metric, num_classes, max_fpr): # max_fpr different from None is not support in multi class if max_fpr is not None and num_classes != 1: pytest.skip("max_fpr parameter not support for multi class or multi label") @@ -154,16 +155,12 @@ def test_auroc_differentiability(self, preds, target, sk_metric, num_classes, av if max_fpr is not None and _TORCH_LOWER_1_6: pytest.skip("requires torch v1.6 or higher to test max_fpr argument") - # average='micro' only supported for multilabel - if average == "micro" and preds.ndim > 2 and preds.ndim == target.ndim + 1: - pytest.skip("micro argument only support for multilabel input") - self.run_differentiability_test( preds=preds, target=target, metric_module=AUROC, metric_functional=auroc, - metric_args={"num_classes": num_classes, "average": average, "max_fpr": max_fpr}, + metric_args={"num_classes": num_classes, "max_fpr": max_fpr}, ) diff --git a/tests/classification/test_average_precision.py b/tests/classification/test_average_precision.py index cb44624c99f..32713c9986e 100644 --- a/tests/classification/test_average_precision.py +++ b/tests/classification/test_average_precision.py @@ -85,8 +85,8 @@ def _sk_avg_prec_multidim_multiclass_prob(preds, target, num_classes=1, average= (_input_multilabel.preds, _input_multilabel.target, _sk_avg_prec_multilabel_prob, NUM_CLASSES), ], ) -@pytest.mark.parametrize("average", ["micro", "macro", "weighted", None]) class TestAveragePrecision(MetricTester): + @pytest.mark.parametrize("average", ["micro", "macro", "weighted", None]) @pytest.mark.parametrize("ddp", [True, False]) @pytest.mark.parametrize("dist_sync_on_step", [True, False]) def test_average_precision(self, preds, target, sk_metric, num_classes, average, ddp, dist_sync_on_step): @@ -103,6 +103,7 @@ def test_average_precision(self, preds, target, sk_metric, num_classes, average, metric_args={"num_classes": num_classes, "average": average}, ) + @pytest.mark.parametrize("average", ["micro", "macro", "weighted", None]) def test_average_precision_functional(self, preds, target, sk_metric, num_classes, average): if target.max() > 1 and average == "micro": pytest.skip("average=micro and multiclass input cannot be used together") @@ -115,7 +116,7 @@ def test_average_precision_functional(self, preds, target, sk_metric, num_classe metric_args={"num_classes": num_classes, "average": average}, ) - def test_average_precision_differentiability(self, preds, sk_metric, target, num_classes, average): + def test_average_precision_differentiability(self, preds, sk_metric, target, num_classes): self.run_differentiability_test( preds=preds, target=target, diff --git a/tests/helpers/testers.py b/tests/helpers/testers.py index 177c7823032..9f0f7a28166 100644 --- a/tests/helpers/testers.py +++ b/tests/helpers/testers.py @@ -545,7 +545,7 @@ def run_differentiability_test( metric = metric_module(**metric_args) if preds.is_floating_point(): preds.requires_grad = True - out = metric(preds[0, :1], target[0, :1]) + out = metric(preds[0, :2], target[0, :2]) # Check if requires_grad matches is_differentiable attribute _assert_requires_grad(metric, out) @@ -553,7 +553,7 @@ def run_differentiability_test( if metric.is_differentiable and metric_functional is not None: # check for numerical correctness assert torch.autograd.gradcheck( - partial(metric_functional, **metric_args), (preds[0, :1].double(), target[0, :1]) + partial(metric_functional, **metric_args), (preds[0, :2].double(), target[0, :2]) ) # reset as else it will carry over to other tests From 0c0d005b17ffe76e62573d24487eaa809302c5cc Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 11:30:51 +0100 Subject: [PATCH 11/17] seeding --- tests/classification/inputs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/classification/inputs.py b/tests/classification/inputs.py index a0683dd4235..df7cfe4b496 100644 --- a/tests/classification/inputs.py +++ b/tests/classification/inputs.py @@ -15,8 +15,11 @@ import torch +from tests.helpers import seed_all from tests.helpers.testers import BATCH_SIZE, EXTRA_DIM, NUM_BATCHES, NUM_CLASSES +seed_all(1) + Input = namedtuple("Input", ["preds", "target"]) _input_binary_prob = Input( From e3b117441a0c550bf3e7a7857c67b47e4b1e13ae Mon Sep 17 00:00:00 2001 From: Jirka Date: Fri, 4 Feb 2022 11:56:43 +0100 Subject: [PATCH 12/17] helpers --- .github/assistant.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/assistant.py b/.github/assistant.py index d5bab2c7d79..3f49ccb0458 100644 --- a/.github/assistant.py +++ b/.github/assistant.py @@ -25,7 +25,7 @@ _REQUEST_TIMEOUT = 10 _PATH_ROOT = os.path.dirname(os.path.dirname(__file__)) -_PKG_WIDE_SUBPACKAGES = ("utilities",) +_PKG_WIDE_SUBPACKAGES = ("utilities", "helpers") LUT_PYTHON_TORCH = { "3.8": "1.4", "3.9": "1.7.1", From 0220d9c21f53286898e11938e04fcfb8d4840c9c Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Fri, 4 Feb 2022 15:18:24 +0100 Subject: [PATCH 13/17] trying --- .github/workflows/ci_test-full.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci_test-full.yml b/.github/workflows/ci_test-full.yml index cdd26c9831e..104981ab7e5 100644 --- a/.github/workflows/ci_test-full.yml +++ b/.github/workflows/ci_test-full.yml @@ -56,6 +56,7 @@ jobs: env: PYTEST_ARTEFACT: test-results-${{ matrix.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}.xml PYTORCH_URL: https://download.pytorch.org/whl/cpu/torch_stable.html + TRANSFORMERS_CACHE: .cache/huggingface/ # Timeout: https://stackoverflow.com/a/59076067/4521646 # seems that MacOS jobs take much more than orger OS From 7fc3f377dd978f7afb910b82ac6bf5ef695e45bc Mon Sep 17 00:00:00 2001 From: Jirka Date: Fri, 4 Feb 2022 21:56:28 +0100 Subject: [PATCH 14/17] ci --- .github/workflows/ci_integrate.yml | 5 ++++- .github/workflows/ci_test-conda.yml | 2 ++ .github/workflows/ci_test-full.yml | 5 ++++- requirements/image.txt | 1 - 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_integrate.yml b/.github/workflows/ci_integrate.yml index 14003a685dc..39daacbca76 100644 --- a/.github/workflows/ci_integrate.yml +++ b/.github/workflows/ci_integrate.yml @@ -59,7 +59,10 @@ jobs: - name: Freeze Numpy (hotfix) if: matrix.requires == 'latest' - run: pip install -q "numpy==1.20.0" # temp fix for cypesq + run: | + pip install -q "numpy==1.20.0" # temp fix for cypesq + # import of PILLOW_VERSION which they recently removed in v9.0 in favor of __version__ + pip install -q "Pillow<9.0" # It messes with torchvision - name: Install all dependencies run: | diff --git a/.github/workflows/ci_test-conda.yml b/.github/workflows/ci_test-conda.yml index f7ac2ef6917..8910d544c21 100644 --- a/.github/workflows/ci_test-conda.yml +++ b/.github/workflows/ci_test-conda.yml @@ -97,6 +97,8 @@ jobs: python ./.github/assistant.py prune-packages requirements/image.txt torchvision python ./.github/assistant.py prune-packages requirements/detection.txt torchvision pip install -q "numpy==1.20.0" # try to fix cocotools for PT 1.4 & 1.9 + # import of PILLOW_VERSION which they recently removed in v9.0 in favor of __version__ + pip install -q "Pillow<9.0" # It messes with torchvision pip install --requirement requirements.txt --quiet pip install --requirement requirements/devel.txt --quiet pip list diff --git a/.github/workflows/ci_test-full.yml b/.github/workflows/ci_test-full.yml index 104981ab7e5..37f7c72f14e 100644 --- a/.github/workflows/ci_test-full.yml +++ b/.github/workflows/ci_test-full.yml @@ -97,7 +97,10 @@ jobs: - name: Freeze Numpy (hotfix) if: matrix.requires == 'latest' - run: pip install -q "numpy==1.20.0" # temp fix for cypesq + run: | + pip install -q "numpy==1.20.0" # temp fix for cypesq + # import of PILLOW_VERSION which they recently removed in v9.0 in favor of __version__ + pip install -q "Pillow<9.0" # It messes with torchvision - name: Install all dependencies run: | diff --git a/requirements/image.txt b/requirements/image.txt index a4a09d3ac56..f2604f26706 100644 --- a/requirements/image.txt +++ b/requirements/image.txt @@ -2,4 +2,3 @@ scipy torchvision # this is needed to internally set TV version according installed PT torch-fidelity lpips -Pillow==8.4.0 From 075cfdee7c4f4882a0d4fd5e7928ad04830c2df7 Mon Sep 17 00:00:00 2001 From: Jirka Date: Fri, 4 Feb 2022 22:11:26 +0100 Subject: [PATCH 15/17] conda --- .github/workflows/ci_test-conda.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_test-conda.yml b/.github/workflows/ci_test-conda.yml index 8910d544c21..e8848d18057 100644 --- a/.github/workflows/ci_test-conda.yml +++ b/.github/workflows/ci_test-conda.yml @@ -13,7 +13,7 @@ concurrency: defaults: run: - shell: bash -l {0} + shell: bash jobs: # ToDo: consider unifying in a single workflow and distributing outputs to all others depending ones @@ -103,6 +103,7 @@ jobs: pip install --requirement requirements/devel.txt --quiet pip list python -c "from torch import __version__ as ver; assert '.'.join(ver.split('.')[:2]) == '${{ matrix.pytorch-version }}', ver" + shell: bash -l {0} - name: Testing env: @@ -110,6 +111,7 @@ jobs: run: | # NOTE: run coverage on tests does not propagare faler status for Win, https://github.com/nedbat/coveragepy/issues/1003 python -m pytest torchmetrics $TEST_DIRS --durations=50 --junitxml=$PYTEST_ARTEFACT + shell: bash -l {0} - name: Upload pytest test results uses: actions/upload-artifact@master From 499db56b9f0700ceb04d59a6a80f85ea440108e1 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 4 Feb 2022 22:12:23 +0100 Subject: [PATCH 16/17] cache --- .github/workflows/ci_test-full.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci_test-full.yml b/.github/workflows/ci_test-full.yml index 37f7c72f14e..42567da497d 100644 --- a/.github/workflows/ci_test-full.yml +++ b/.github/workflows/ci_test-full.yml @@ -56,7 +56,6 @@ jobs: env: PYTEST_ARTEFACT: test-results-${{ matrix.os }}-py${{ matrix.python-version }}-${{ matrix.requires }}.xml PYTORCH_URL: https://download.pytorch.org/whl/cpu/torch_stable.html - TRANSFORMERS_CACHE: .cache/huggingface/ # Timeout: https://stackoverflow.com/a/59076067/4521646 # seems that MacOS jobs take much more than orger OS From f8ca3b5f56c94708bc88876421a3abe98a3782d3 Mon Sep 17 00:00:00 2001 From: Jirka Date: Fri, 4 Feb 2022 23:17:52 +0100 Subject: [PATCH 17/17] . --- .github/workflows/ci_test-conda.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/ci_test-conda.yml b/.github/workflows/ci_test-conda.yml index e8848d18057..8910d544c21 100644 --- a/.github/workflows/ci_test-conda.yml +++ b/.github/workflows/ci_test-conda.yml @@ -13,7 +13,7 @@ concurrency: defaults: run: - shell: bash + shell: bash -l {0} jobs: # ToDo: consider unifying in a single workflow and distributing outputs to all others depending ones @@ -103,7 +103,6 @@ jobs: pip install --requirement requirements/devel.txt --quiet pip list python -c "from torch import __version__ as ver; assert '.'.join(ver.split('.')[:2]) == '${{ matrix.pytorch-version }}', ver" - shell: bash -l {0} - name: Testing env: @@ -111,7 +110,6 @@ jobs: run: | # NOTE: run coverage on tests does not propagare faler status for Win, https://github.com/nedbat/coveragepy/issues/1003 python -m pytest torchmetrics $TEST_DIRS --durations=50 --junitxml=$PYTEST_ARTEFACT - shell: bash -l {0} - name: Upload pytest test results uses: actions/upload-artifact@master