@@ -375,13 +375,6 @@ def wrapped_fn(*args, **kwargs):
375
375
skip ('svd_lowrank' , '' ), # fails on cuda, runs okay on cpu
376
376
skip ('nn.functional.dropout2d' , '' ), # fails on cuda, runs okay on cpu
377
377
378
- # See https://github.com/pytorch/pytorch/issues/69034
379
- # RuntimeError: expected scalar type double but found float
380
- xfail ('minimum' ),
381
- xfail ('min' , 'binary' ),
382
- xfail ('maximum' ),
383
- xfail ('max' , 'binary' ),
384
-
385
378
# The following don't have a forward-mode AD formula in PyTorch core
386
379
# (check derivatives.yaml).
387
380
xfail ('var_mean' ),
@@ -675,14 +668,6 @@ def test_vmapvjp(self, device, dtype, op):
675
668
# https://gist.github.com/zou3519/c42d032c0111c6b65235583d391bf7a3
676
669
xfail ('nn.functional.linear' ),
677
670
678
- # These are issues that should be fixed in core. See repro in core:
679
- # https://github.com/pytorch/functorch/pull/232#discussion_r751405155
680
- # RuntimeError: expected scalar type double but found float
681
- xfail ('minimum' ),
682
- xfail ('min' , 'binary' ),
683
- xfail ('maximum' ),
684
- xfail ('max' , 'binary' ),
685
-
686
671
# Apprently these support forward AD, but we get "Trying to use forward AD..."
687
672
# These are cases where OpInfo has supports_forward_ad=True, but disables
688
673
# the test
@@ -770,7 +755,6 @@ def test_vmapjvp(self, device, dtype, op):
770
755
xfail ('linalg.inv' ),
771
756
xfail ('linalg.tensorinv' ),
772
757
xfail ('linalg.matrix_power' ),
773
- xfail ('maximum' ),
774
758
xfail ('linalg.householder_product' ),
775
759
xfail ('tensor_split' ),
776
760
xfail ('quantile' ),
@@ -779,13 +763,10 @@ def test_vmapjvp(self, device, dtype, op):
779
763
xfail ('linalg.eigvalsh' ),
780
764
xfail ('fill_' ),
781
765
xfail ('linalg.cholesky' ),
782
- xfail ('max' , 'binary' ),
783
766
xfail ('nn.functional.gaussian_nll_loss' ),
784
- xfail ('min' , 'binary' ),
785
767
xfail ('std_mean' ),
786
768
xfail ('double' , 'channels_last' ),
787
769
xfail ('block_diag' ),
788
- xfail ('minimum' ),
789
770
xfail ('scatter' ),
790
771
xfail ('matrix_exp' ),
791
772
xfail ('nanquantile' ),
@@ -1148,10 +1129,6 @@ def test_vjpvmap(self, device, dtype, op):
1148
1129
xfail ('lu' , '' ),
1149
1130
xfail ('lu_solve' , '' ),
1150
1131
xfail ('lu_unpack' , '' ),
1151
- xfail ('max' , 'binary' ),
1152
- xfail ('maximum' , '' ),
1153
- xfail ('min' , 'binary' ),
1154
- xfail ('minimum' , '' ),
1155
1132
xfail ('nanmean' , '' ),
1156
1133
xfail ('nansum' , '' ),
1157
1134
xfail ('nn.functional.batch_norm' , '' ),
@@ -1173,7 +1150,6 @@ def test_vjpvmap(self, device, dtype, op):
1173
1150
xfail ('nn.functional.huber_loss' , '' ),
1174
1151
xfail ('nn.functional.instance_norm' , '' ),
1175
1152
xfail ('nn.functional.layer_norm' , '' ),
1176
- xfail ('nn.functional.leaky_relu' , '' ),
1177
1153
xfail ('nn.functional.logsigmoid' , '' ),
1178
1154
xfail ('nn.functional.mse_loss' , '' ),
1179
1155
xfail ('nn.functional.nll_loss' , '' ),
0 commit comments