From 2e03991fce4d8211edfe2e7bd39778c3eb3e4e0f Mon Sep 17 00:00:00 2001 From: Aidyn-A Date: Fri, 15 Dec 2023 06:26:42 +0000 Subject: [PATCH] [TEST] Skip test_schema_correctness for float8 dtype (#115757) According to the https://github.com/pytorch/pytorch/issues/107256#issuecomment-1705341870 the ops tested in `test_schema_correctness` are not supported with `torch.float8_e4m3fn` yet. Until they are not supported, it is best to skip the test. Pull Request resolved: https://github.com/pytorch/pytorch/pull/115757 Approved by: https://github.com/drisspg --- torch/testing/_internal/common_methods_invocations.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index a0cc679395f48e..3810530627caff 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -14228,6 +14228,10 @@ def reference_flatten(input, start_dim=0, end_dim=-1): # Sample inputs isn't really parametrized on dtype DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda'), + # "mul_cuda" not implemented for float8_e4m3fn + # https://github.com/pytorch/pytorch/issues/107256 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + dtypes=(torch.float8_e4m3fn,)), ) ), OpInfo(