Skip to content

Commit 6a52d17

Browse files
author
Samantha Andow
authored
delete failing tests (#679)
1 parent f5ce614 commit 6a52d17

File tree

1 file changed

+0
-24
lines changed

1 file changed

+0
-24
lines changed

test/test_ops.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -375,13 +375,6 @@ def wrapped_fn(*args, **kwargs):
375375
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
376376
skip('nn.functional.dropout2d', ''), # fails on cuda, runs okay on cpu
377377
378-
# See https://github.com/pytorch/pytorch/issues/69034
379-
# RuntimeError: expected scalar type double but found float
380-
xfail('minimum'),
381-
xfail('min', 'binary'),
382-
xfail('maximum'),
383-
xfail('max', 'binary'),
384-
385378
# The following don't have a forward-mode AD formula in PyTorch core
386379
# (check derivatives.yaml).
387380
xfail('var_mean'),
@@ -675,14 +668,6 @@ def test_vmapvjp(self, device, dtype, op):
675668
# https://gist.github.com/zou3519/c42d032c0111c6b65235583d391bf7a3
676669
xfail('nn.functional.linear'),
677670
678-
# These are issues that should be fixed in core. See repro in core:
679-
# https://github.com/pytorch/functorch/pull/232#discussion_r751405155
680-
# RuntimeError: expected scalar type double but found float
681-
xfail('minimum'),
682-
xfail('min', 'binary'),
683-
xfail('maximum'),
684-
xfail('max', 'binary'),
685-
686671
# Apprently these support forward AD, but we get "Trying to use forward AD..."
687672
# These are cases where OpInfo has supports_forward_ad=True, but disables
688673
# the test
@@ -770,7 +755,6 @@ def test_vmapjvp(self, device, dtype, op):
770755
xfail('linalg.inv'),
771756
xfail('linalg.tensorinv'),
772757
xfail('linalg.matrix_power'),
773-
xfail('maximum'),
774758
xfail('linalg.householder_product'),
775759
xfail('tensor_split'),
776760
xfail('quantile'),
@@ -779,13 +763,10 @@ def test_vmapjvp(self, device, dtype, op):
779763
xfail('linalg.eigvalsh'),
780764
xfail('fill_'),
781765
xfail('linalg.cholesky'),
782-
xfail('max', 'binary'),
783766
xfail('nn.functional.gaussian_nll_loss'),
784-
xfail('min', 'binary'),
785767
xfail('std_mean'),
786768
xfail('double', 'channels_last'),
787769
xfail('block_diag'),
788-
xfail('minimum'),
789770
xfail('scatter'),
790771
xfail('matrix_exp'),
791772
xfail('nanquantile'),
@@ -1148,10 +1129,6 @@ def test_vjpvmap(self, device, dtype, op):
11481129
xfail('lu', ''),
11491130
xfail('lu_solve', ''),
11501131
xfail('lu_unpack', ''),
1151-
xfail('max', 'binary'),
1152-
xfail('maximum', ''),
1153-
xfail('min', 'binary'),
1154-
xfail('minimum', ''),
11551132
xfail('nanmean', ''),
11561133
xfail('nansum', ''),
11571134
xfail('nn.functional.batch_norm', ''),
@@ -1173,7 +1150,6 @@ def test_vjpvmap(self, device, dtype, op):
11731150
xfail('nn.functional.huber_loss', ''),
11741151
xfail('nn.functional.instance_norm', ''),
11751152
xfail('nn.functional.layer_norm', ''),
1176-
xfail('nn.functional.leaky_relu', ''),
11771153
xfail('nn.functional.logsigmoid', ''),
11781154
xfail('nn.functional.mse_loss', ''),
11791155
xfail('nn.functional.nll_loss', ''),

0 commit comments

Comments
 (0)