From 6f834ce8d2120259070fda6ba0d2cff0614e3b5f Mon Sep 17 00:00:00 2001 From: willfengg Date: Sat, 27 Apr 2024 23:55:00 -0700 Subject: [PATCH] skip unit test if no cuda Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: --- test/dtypes/test_nf4.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/dtypes/test_nf4.py b/test/dtypes/test_nf4.py index 641411360b..908272c397 100644 --- a/test/dtypes/test_nf4.py +++ b/test/dtypes/test_nf4.py @@ -371,6 +371,7 @@ def test_tensor_as_strided_invalid(self, input_size: Union[Tuple[int], int]): stride = (nf4_tensor.stride()[1], nf4_tensor.stride()[0]) torch.as_strided(nf4_tensor, nf4_tensor.size(), stride, nf4_tensor.storage_offset()) + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_pin_memory(self): nf4_tensor = to_nf4(torch.randn(512 * 512)) self.assertFalse(nf4_tensor.is_pinned()) @@ -378,9 +379,9 @@ def test_pin_memory(self): nf4_tensor = nf4_tensor.pin_memory() self.assertTrue(nf4_tensor.is_pinned()) - if torch.cuda.is_available(): - nf4_tensor = to_nf4(torch.randn(512 * 512, device='cuda')) - self.assertFalse(nf4_tensor.is_pinned()) + nf4_tensor = to_nf4(torch.randn(512 * 512, device='cuda')) + self.assertFalse(nf4_tensor.is_pinned()) + @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") def test_to_cuda(self):