@@ -65,6 +65,7 @@ def to_tensor(X, device):
6565
6666class TestFakeQuantizePerTensor (TestCase ):
6767
68+ @unittest .skip ("temporarily disable the test" )
6869 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
6970 X = hu .tensor (shapes = hu .array_shapes (1 , 5 ,),
7071 qparams = hu .qparams (dtypes = torch .quint8 )))
@@ -205,6 +206,7 @@ def test_fake_quant_control(self):
205206
206207class TestFakeQuantizePerChannel (TestCase ):
207208
209+ @unittest .skip ("temporarily disable the test" )
208210 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
209211 X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
210212 qparams = hu .qparams (dtypes = torch .quint8 )))
@@ -224,6 +226,7 @@ def test_forward_per_channel(self, device, X):
224226 X , scale , zero_point , axis , quant_min , quant_max )
225227 np .testing .assert_allclose (Y , Y_prime .cpu (), rtol = tolerance , atol = tolerance )
226228
229+ @unittest .skip ("temporarily disable the test" )
227230 @given (device = st .sampled_from (['cpu' , 'cuda' ] if torch .cuda .is_available () else ['cpu' ]),
228231 X = hu .per_channel_tensor (shapes = hu .array_shapes (1 , 5 ,),
229232 qparams = hu .qparams (dtypes = torch .quint8 )))
0 commit comments