From f14341a8f6778577f04737d42ac966b16ed1d545 Mon Sep 17 00:00:00 2001 From: Justin Ngo Date: Wed, 12 Mar 2025 18:11:20 +0000 Subject: [PATCH 1/5] [TOSA] TOSA updates for LLVM hash 6d38dbf 1: [TOSA] Update rescale input_/output_zp and double_round attribute * Update tosa.rescale input_/output_zp as inputs according to TOSA 1.0 * Update double_round bool attribute to rounding_mode in alignment with TOSA 1.0. rounding_mode supports "SINGLE_ROUND", "INEXACT_ROUND", and "DOUBLE_ROUND". Existing double_round behaviours are mapped as followed: - double_round = true -> rounding_mode = "DOUBLE_ROUND" - double_round = false -> rounding_mode = "SINGLE_ROUND" 2: [TOSA] Update tosa.negate's zero-points to inputs Update LIT tests and XFAIL sets --- .../TorchToTosa/TosaLegalizeUtils.h | 2 +- .../TorchToTosa/TosaLegalizeCommon.cpp | 2 +- .../TorchToTosa/TosaLegalizeUtils.cpp | 64 +++++++++++++------ projects/pt1/e2e_testing/xfail_sets.py | 4 ++ test/Conversion/TorchToTosa/basic.mlir | 31 +++++---- 5 files changed, 68 insertions(+), 35 deletions(-) diff --git a/include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h b/include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h index 1b944b9a1ea4..9f4d1b014c4d 100644 --- a/include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h +++ b/include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h @@ -26,7 +26,7 @@ namespace tosa { // rounding mode Value buildRescale(PatternRewriter &rewriter, Operation *op, ShapedType output_type, Value input_val, double scale, - int64_t input_zp, int64_t output_zp, bool double_round, + int64_t input_zp, int64_t output_zp, StringRef rounding_mode, bool scale32); // Creates TOSA rescale op with int32 output diff --git a/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp b/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp index bf5d665af097..02d1390ed148 100644 --- a/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp +++ b/lib/Conversion/TorchToTosa/TosaLegalizeCommon.cpp @@ -777,7 +777,7 @@ std::optional convertReduceOpCommon( RankedTensorType output_rescale_type = RankedTensorType::get(shape_vec, output_type.getElementType()); val = buildRescale(rewriter, op, output_rescale_type, val, output_scale, - 0, output_zp, false, true); + 0, output_zp, "SINGLE_ROUND", true); } // Optionally squeeze out the reduced axes. diff --git a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp index 5ecd74d398ad..9c1d6d3d0d37 100644 --- a/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp +++ b/lib/Conversion/TorchToTosa/TosaLegalizeUtils.cpp @@ -34,14 +34,15 @@ Value buildRescaleMultiplier(bool scale32, PatternRewriter &rewriter, // rounding mode Value buildRescale(PatternRewriter &rewriter, Operation *op, ShapedType output_type, Value input_val, double scale, - int64_t input_zp, int64_t output_zp, bool double_round, + int64_t input_zp, int64_t output_zp, StringRef rounding_mode, bool scale32) { int32_t multiplier; int32_t shift; int32_t scale_width = scale32 ? 32 : 16; - computeMultiplierAndShift(scale, multiplier, shift, scale_width); + if (!computeMultiplierAndShift(scale, multiplier, shift, scale_width)) + op->emitError("buildRescale: shift must be in the range 2 <= shift <= 62"); Value multiplier_val = buildRescaleMultiplier(scale32, rewriter, op, {multiplier}); @@ -52,11 +53,23 @@ Value buildRescale(PatternRewriter &rewriter, Operation *op, bool input_unsigned = input_val.getType().isUnsignedInteger(); bool output_unsigned = output_type.isUnsignedInteger(); + // Create input_zp matches the input type and output_zp matches the output + // type of RescaleOp + const auto input_zp_val = tosa::createZeroPointTensor( + rewriter, op->getLoc(), dyn_cast(input_val.getType()), + input_zp); + if (!input_zp_val.has_value()) + op->emitError("Failed to create input zero-point tensor for RescaleOp."); + + const auto output_zp_val = tosa::createZeroPointTensor( + rewriter, op->getLoc(), output_type, output_zp); + if (!output_zp_val.has_value()) + op->emitError("Failed to create output zero-point tensor for RescaleOp."); + auto rescale_op = CreateOpAndInfer( rewriter, op->getLoc(), output_type, input_val, multiplier_val, shift_val, - rewriter.getI32IntegerAttr(static_cast(input_zp)), - rewriter.getI32IntegerAttr(static_cast(output_zp)), - rewriter.getBoolAttr(scale32), rewriter.getBoolAttr(double_round), + input_zp_val.value(), output_zp_val.value(), + rewriter.getBoolAttr(scale32), rewriter.getStringAttr(rounding_mode), rewriter.getBoolAttr(false), rewriter.getBoolAttr(input_unsigned), rewriter.getBoolAttr(output_unsigned)); @@ -73,7 +86,7 @@ Value buildRescaleToInt32(PatternRewriter &rewriter, Operation *op, auto output_type = input_type.clone(rewriter.getI32Type()); return buildRescale(rewriter, op, output_type, input_val, input_scale, - input_zp, 0, false, true); + input_zp, 0, "SINGLE_ROUND", true); } // Creates a TOSA rescale op based on conv2d parameters. @@ -96,6 +109,16 @@ Value buildRescaleOpConvOutput(PatternRewriter &rewriter, Operation *op, bool input_unsigned = input_qtype.isUnsignedInteger(); bool output_unsigned = output_qtype.isUnsignedInteger(); + const auto input_zp_val = tosa::createZeroPointTensor( + rewriter, op->getLoc(), input_type, static_cast(0)); + if (!input_zp_val.has_value()) + op->emitError("Failed to create input zero-point tensor for RescaleOp."); + + const auto output_zp_val = tosa::createZeroPointTensor( + rewriter, op->getLoc(), output_type, output_zp); + if (!output_zp_val.has_value()) + op->emitError("Failed to create output zero-point tensor for RescaleOp."); + if (auto weight_per_tensor_qtype = dyn_cast( weight_type.getElementType())) { @@ -107,7 +130,11 @@ Value buildRescaleOpConvOutput(PatternRewriter &rewriter, Operation *op, double op_tensor_scale = (input_scale * weight_scale) / output_scale; - computeMultiplierAndShift(op_tensor_scale, multiplier, shift, scale_width); + if (!computeMultiplierAndShift(op_tensor_scale, multiplier, shift, + scale_width)) + op->emitError( + "buildRescaleOpConvOutput: shift must be in the range 2 <= shift <= " + "62"); Value multiplier_val = buildRescaleMultiplier(scale32, rewriter, op, {multiplier}); @@ -117,10 +144,9 @@ Value buildRescaleOpConvOutput(PatternRewriter &rewriter, Operation *op, auto rescale_op = CreateOpAndInfer( rewriter, op->getLoc(), output_type, conv_val, multiplier_val, - shift_val, rewriter.getI32IntegerAttr(0), - rewriter.getI32IntegerAttr(output_zp), rewriter.getBoolAttr(scale32), - rewriter.getBoolAttr(true), rewriter.getBoolAttr(false), - rewriter.getBoolAttr(input_unsigned), + shift_val, input_zp_val.value(), output_zp_val.value(), + rewriter.getBoolAttr(scale32), rewriter.getStringAttr("DOUBLE_ROUND"), + rewriter.getBoolAttr(false), rewriter.getBoolAttr(input_unsigned), rewriter.getBoolAttr(output_unsigned)); return rescale_op.getResult(); @@ -136,17 +162,16 @@ Value buildRescaleOpConvOutput(PatternRewriter &rewriter, Operation *op, weight_per_channel_qtype.getScales().begin(), weight_per_channel_qtype.getScales().end()); - int64_t output_zp = output_qtype.getZeroPoint(); - double output_scale = output_qtype.getScale(); - for (double weight_scale : weight_scale_arr) { int32_t multiplier; int32_t shift; double op_channel_scale = (input_scale * weight_scale) / output_scale; - computeMultiplierAndShift(op_channel_scale, multiplier, shift, - scale_width); + if (!computeMultiplierAndShift(op_channel_scale, multiplier, shift, 32)) + op->emitError( + "buildRescaleOpConvOutput: shift must be in the range 2 <= shift " + "<= 62"); multiplier_arr.push_back(multiplier); shift_arr.push_back(static_cast(shift)); @@ -161,10 +186,9 @@ Value buildRescaleOpConvOutput(PatternRewriter &rewriter, Operation *op, auto rescale_op = CreateOpAndInfer( rewriter, op->getLoc(), output_type, conv_val, multiplier_val, - shift_val, rewriter.getI32IntegerAttr(0), - rewriter.getI32IntegerAttr(output_zp), rewriter.getBoolAttr(scale32), - rewriter.getBoolAttr(true), rewriter.getBoolAttr(true), - rewriter.getBoolAttr(input_unsigned), + shift_val, input_zp_val.value(), output_zp_val.value(), + rewriter.getBoolAttr(scale32), rewriter.getStringAttr("DOUBLE_ROUND"), + rewriter.getBoolAttr(true), rewriter.getBoolAttr(input_unsigned), rewriter.getBoolAttr(output_unsigned)); return rescale_op.getResult(); diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index a06d7f747299..32fadad6913c 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -1727,6 +1727,8 @@ "ScatterSrcModule_basic", "ScatterSrcStaticModule_basic", "HBC_basic", + # 1D inputs cause generated tosa.negate ops to crash downstream + "NllLossModule_1D_basic", } # Write the TOSA set as a "passing" set as it is very early in development @@ -3382,6 +3384,8 @@ } FX_IMPORTER_TOSA_XFAIL_SET = { + "NumpyTRank0Module_basic", + "Permute0RankModule_basic", "ArgsortTensor_basic", "ArgsortTensorInteger_basic", "AtenSymConstrainRangeForSize_basic", diff --git a/test/Conversion/TorchToTosa/basic.mlir b/test/Conversion/TorchToTosa/basic.mlir index 29be340810fd..d5fc7ba850da 100644 --- a/test/Conversion/TorchToTosa/basic.mlir +++ b/test/Conversion/TorchToTosa/basic.mlir @@ -94,11 +94,14 @@ func.func @torch.aten.exp$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vten // ----- // CHECK-LABEL: func.func @torch.aten.neg$basic( -// CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { -// CHECK: %[[ARG_BUILTIN:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,?],f32> -> tensor -// CHECK: %[[RESULT_BUILTIN:.*]] = tosa.negate %[[ARG_BUILTIN]] : (tensor) -> tensor -// CHECK: %[[RESULT:.*]] = torch_c.from_builtin_tensor %[[RESULT_BUILTIN]] : tensor -> !torch.vtensor<[?,?],f32> -// CHECK: return %[[RESULT]] : !torch.vtensor<[?,?],f32> +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { +// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor +// CHECK: %[[VAL_2:.*]] = "tosa.const"() <{values = dense<0.000000e+00> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK: %[[VAL_3:.*]] = "tosa.const"() <{values = dense<0.000000e+00> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK: %[[VAL_4:.*]] = tosa.negate %[[VAL_1]], %[[VAL_2]], %[[VAL_3]] : (tensor, tensor<1xf32>, tensor<1xf32>) -> tensor +// CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor -> !torch.vtensor<[?,?],f32> +// CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],f32> +// CHECK: } func.func @torch.aten.neg$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { %0 = torch.aten.neg %arg0 : !torch.vtensor<[?,?],f32> -> !torch.vtensor<[?,?],f32> return %0 : !torch.vtensor<[?,?],f32> @@ -1555,20 +1558,22 @@ func.func @torch.aten.tril$basic(%arg0: !torch.vtensor<[2,4], si32>) -> !torch.v // ----- // CHECK-LABEL: func.func @torch.aten.min.dim$basic( -// CHECK-SAME: %[[VAL_0:.*]]: tensor<3x2x3xf32>) -> tensor<3x2x1xf32> { +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: tensor<3x2x3xf32>) -> tensor<3x2x1xf32> { // CHECK: %[[VAL_1:.*]] = torch_c.from_builtin_tensor %[[VAL_0]] : tensor<3x2x3xf32> -> !torch.vtensor<[3,2,3],f32> // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_1]] : !torch.vtensor<[3,2,3],f32> -> tensor<3x2x3xf32> // CHECK: %[[VAL_3:.*]] = torch.constant.bool true // CHECK: %[[VAL_4:.*]] = torch.constant.int 2 -// CHECK: %[[VAL_5:.*]] = tosa.const_shape {values = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> +// CHECK-DAG: %[[VAL_5:.*]] = tosa.const_shape {values = dense<[3, 2]> : tensor<2xindex>} : () -> !tosa.shape<2> // CHECK: %[[VAL_6:.*]] = tosa.reduce_min %[[VAL_2]] {axis = 2 : i32} : (tensor<3x2x3xf32>) -> tensor<3x2x1xf32> // CHECK: %[[VAL_7:.*]] = torch_c.from_builtin_tensor %[[VAL_6]] : tensor<3x2x1xf32> -> !torch.vtensor<[3,2,1],f32> -// CHECK: %[[VAL_8:.*]] = tosa.negate %[[VAL_2]] : (tensor<3x2x3xf32>) -> tensor<3x2x3xf32> -// CHECK: %[[VAL_9:.*]] = tosa.argmax %[[VAL_8]] {axis = 2 : i32} : (tensor<3x2x3xf32>) -> tensor<3x2xi64> -// CHECK: %[[VAL_10:.*]] = tosa.const_shape {values = dense<[3, 2, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> -// CHECK: %[[VAL_11:.*]] = tosa.reshape %[[VAL_9]], %[[VAL_10]] : (tensor<3x2xi64>, !tosa.shape<3>) -> tensor<3x2x1xi64> -// CHECK: %[[VAL_12:.*]] = torch_c.to_builtin_tensor %[[VAL_7]] : !torch.vtensor<[3,2,1],f32> -> tensor<3x2x1xf32> -// CHECK: return %[[VAL_12]] : tensor<3x2x1xf32> +// CHECK: %[[VAL_8:.*]] = "tosa.const"() <{values = dense<0.000000e+00> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK: %[[VAL_9:.*]] = "tosa.const"() <{values = dense<0.000000e+00> : tensor<1xf32>}> : () -> tensor<1xf32> +// CHECK: %[[VAL_10:.*]] = tosa.negate %[[VAL_2]], %[[VAL_8]], %[[VAL_9]] : (tensor<3x2x3xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<3x2x3xf32> +// CHECK: %[[VAL_11:.*]] = tosa.argmax %[[VAL_10]] {axis = 2 : i32} : (tensor<3x2x3xf32>) -> tensor<3x2xi64> +// CHECK: %[[VAL_12:.*]] = tosa.const_shape {values = dense<[3, 2, 1]> : tensor<3xindex>} : () -> !tosa.shape<3> +// CHECK: %[[VAL_13:.*]] = tosa.reshape %[[VAL_11]], %[[VAL_12]] : (tensor<3x2xi64>, !tosa.shape<3>) -> tensor<3x2x1xi64> +// CHECK: %[[VAL_14:.*]] = torch_c.to_builtin_tensor %[[VAL_7]] : !torch.vtensor<[3,2,1],f32> -> tensor<3x2x1xf32> +// CHECK: return %[[VAL_14]] : tensor<3x2x1xf32> // CHECK: } func.func @torch.aten.min.dim$basic(%arg0: tensor<3x2x3xf32>) -> tensor<3x2x1xf32> { %0 = torch_c.from_builtin_tensor %arg0 : tensor<3x2x3xf32> -> !torch.vtensor<[3,2,3],f32> From 8f84285ccf8ff8b8ba3bc4cf3b2bfc132dc71376 Mon Sep 17 00:00:00 2001 From: Vivek Khandelwal Date: Mon, 24 Mar 2025 20:04:23 +0530 Subject: [PATCH 2/5] Integrate LLVM at 6d38dbf6eb56fd2b3399565af455de96a99ffa0f --- externals/llvm-project | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/externals/llvm-project b/externals/llvm-project index 8885b5c06260..6d38dbf6eb56 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 8885b5c0626065274cb8f8a634d45779a0f6ff2b +Subproject commit 6d38dbf6eb56fd2b3399565af455de96a99ffa0f From 67b7bbe9c50c60810b597024b37942e41f76a500 Mon Sep 17 00:00:00 2001 From: Justin Ngo Date: Wed, 12 Mar 2025 18:11:20 +0000 Subject: [PATCH 3/5] [TOSA] TOSA updates for LLVM hash 72144d1 1: [TOSA] Update rescale input_/output_zp and double_round attribute * Update tosa.rescale input_/output_zp as inputs according to TOSA 1.0 * Update double_round bool attribute to rounding_mode in alignment with TOSA 1.0. rounding_mode supports "SINGLE_ROUND", "INEXACT_ROUND", and "DOUBLE_ROUND". Existing double_round behaviours are mapped as followed: - double_round = true -> rounding_mode = "DOUBLE_ROUND" - double_round = false -> rounding_mode = "SINGLE_ROUND" 2: [TOSA] Update tosa.negate's zero-points to inputs Update LIT tests and XFAIL sets 3: [TOSA] Update tosa.int_div to tosa.intdiv Update LIT tests --- test/Conversion/TorchToTosa/basic.mlir | 6 +++--- .../TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/Conversion/TorchToTosa/basic.mlir b/test/Conversion/TorchToTosa/basic.mlir index d5fc7ba850da..5ab101bab3bc 100644 --- a/test/Conversion/TorchToTosa/basic.mlir +++ b/test/Conversion/TorchToTosa/basic.mlir @@ -1703,7 +1703,7 @@ func.func @torch.aten.div.Tensor_mode$float_trunc(%arg0: !torch.vtensor<[?, ?],f // CHECK: %[[VAL_4:.*]] = torch.constant.str "trunc" // CHECK: %[[VAL_5:.*]] = tosa.cast %[[VAL_3]] : (tensor) -> tensor // CHECK: %[[VAL_6:.*]] = tosa.cast %[[VAL_2]] : (tensor) -> tensor -// CHECK: %[[VAL_7:.*]] = tosa.int_div %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor +// CHECK: %[[VAL_7:.*]] = tosa.intdiv %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor // CHECK: %[[VAL_8:.*]] = tosa.cast %[[VAL_7]] : (tensor) -> tensor // CHECK: %[[VAL_9:.*]] = torch_c.from_builtin_tensor %[[VAL_8]] : tensor -> !torch.vtensor<[?,?],si64> // CHECK: return %[[VAL_9]] : !torch.vtensor<[?,?],si64> @@ -1745,7 +1745,7 @@ func.func @torch.aten.div.Tensor_mode$float_floor(%arg0: !torch.vtensor<[?, ?],f // CHECK: %[[VAL_4:.*]] = torch.constant.str "floor" // CHECK: %[[VAL_5:.*]] = tosa.cast %[[VAL_3]] : (tensor) -> tensor // CHECK: %[[VAL_6:.*]] = tosa.cast %[[VAL_2]] : (tensor) -> tensor -// CHECK: %[[VAL_7:.*]] = tosa.int_div %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor +// CHECK: %[[VAL_7:.*]] = tosa.intdiv %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor // CHECK: %[[VAL_8:.*]] = "tosa.const"() <{values = dense<0> : tensor}> : () -> tensor // CHECK: %[[VAL_9:.*]] = "tosa.const"() <{values = dense<1> : tensor}> : () -> tensor // CHECK: %[[VAL_10:.*]] = tosa.const_shape {values = dense<1> : tensor<2xindex>} : () -> !tosa.shape<2> @@ -1802,7 +1802,7 @@ func.func @torch.aten.div.Tensor_mode$float_basic(%arg0: !torch.vtensor<[?, ?],f // CHECK: %[[VAL_4:.*]] = torch.constant.str "" // CHECK: %[[VAL_5:.*]] = tosa.cast %[[VAL_3]] : (tensor) -> tensor // CHECK: %[[VAL_6:.*]] = tosa.cast %[[VAL_2]] : (tensor) -> tensor -// CHECK: %[[VAL_7:.*]] = tosa.int_div %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor +// CHECK: %[[VAL_7:.*]] = tosa.intdiv %[[VAL_5]], %[[VAL_6]] : (tensor, tensor) -> tensor // CHECK: %[[VAL_8:.*]] = tosa.cast %[[VAL_7]] : (tensor) -> tensor // CHECK: %[[VAL_9:.*]] = torch_c.from_builtin_tensor %[[VAL_8]] : tensor -> !torch.vtensor<[?,?],si64> // CHECK: return %[[VAL_9]] : !torch.vtensor<[?,?],si64> diff --git a/test/Conversion/TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir b/test/Conversion/TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir index 84b6ae012b01..0e4dcc29b089 100644 --- a/test/Conversion/TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir +++ b/test/Conversion/TorchToTosa/torch-backend-to-tosa-backend-pipeline.mlir @@ -111,7 +111,7 @@ func.func @torch.aten.div.Tensor$mixed_type_fp(%arg0: !torch.vtensor<[?, ?],f32> // CHECK-SAME: %[[VAL_0:.*]]: tensor, // CHECK-SAME: %[[VAL_1:.*]]: tensor // CHECK: %[[VAL_2:.*]] = tosa.cast %[[VAL_0]] : (tensor) -> tensor -// CHECK: %[[VAL_3:.*]] = tosa.int_div %[[VAL_2]], %[[VAL_1]] : (tensor, tensor) -> tensor +// CHECK: %[[VAL_3:.*]] = tosa.intdiv %[[VAL_2]], %[[VAL_1]] : (tensor, tensor) -> tensor func.func @torch.aten.div.Tensor$mixed_type_int(%arg0: !torch.vtensor<[?, ?],si16>, %arg1: !torch.vtensor<[?, ?],si32>) -> !torch.vtensor<[?, ?],si32> { %0 = torch.aten.div.Tensor %arg0, %arg1 : !torch.vtensor<[?, ?],si16>, !torch.vtensor<[?, ?],si32> -> !torch.vtensor<[?, ?],si32> return %0 : !torch.vtensor<[?, ?],si32> From 3bcfbce215287a793258af66f3ae2a02ac387f6f Mon Sep 17 00:00:00 2001 From: Vivek Khandelwal Date: Mon, 14 Apr 2025 09:47:38 +0530 Subject: [PATCH 4/5] Integrate LLVM at 72144d119a7291f8b6b8e022a2947fbe31e66afc --- externals/llvm-project | 2 +- test/Dialect/Torch/invalid.mlir | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/externals/llvm-project b/externals/llvm-project index 6d38dbf6eb56..72144d119a72 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 6d38dbf6eb56fd2b3399565af455de96a99ffa0f +Subproject commit 72144d119a7291f8b6b8e022a2947fbe31e66afc diff --git a/test/Dialect/Torch/invalid.mlir b/test/Dialect/Torch/invalid.mlir index 8f38c66ad154..c863e93fa5fa 100644 --- a/test/Dialect/Torch/invalid.mlir +++ b/test/Dialect/Torch/invalid.mlir @@ -184,6 +184,7 @@ func.func @torch.overwrite.tensor.contents(%arg0: !torch.vtensor<[1],f32>, %arg1 // There must be only one module initialize. +// expected-error @+1 {{there must be only one global slot initializer}} torch.global_slot.module_initializer { torch.initialize.global_slots [ ] From 3b87043ca6b7bf830755e88b4f5086994a6a1ede Mon Sep 17 00:00:00 2001 From: Vivek Khandelwal Date: Mon, 14 Apr 2025 20:05:48 +0530 Subject: [PATCH 5/5] Add failing tests to xfail set --- projects/pt1/e2e_testing/xfail_sets.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index 32fadad6913c..202378d1f9ac 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -519,7 +519,6 @@ "ReflectionPad3dModuleFront_basic", "ReflectionPad3dModuleBack_basic", # RuntimeError: Unknown function SliceOutOfLowerBoundEndIndexModule - "SliceOutOfLowerBoundEndIndexModule_basic", "NativeGroupNormModule_basic", } @@ -534,6 +533,7 @@ "Aten_TrilinearModuleSumdims_basic", "AvgPool2dSingleIntTupleParamsIncludePadModule_basic", "AvgPool2dSingleIntTupleParamsModule_basic", + "SliceOutOfLowerBoundEndIndexModule_basic", } FX_IMPORTER_STABLEHLO_XFAIL_SET = { @@ -3381,6 +3381,17 @@ "VarDimEmptyDimModule_basic", # Runtime op verification: rank mismatch in memref.cast "ViewSizeFromOtherTensor_basic", + "SliceOutOfLowerBoundEndIndexModule_basic", + "EmbeddingModuleF16_basic", + "EmbeddingModuleI32_basic", + "EmbeddingModuleI64_basic", + "IndexTensorHackedTwinModule3dInput_basic", + "IndexTensorHackedTwinModule_basic", + "IndexTensorModule3dInput_basic", + "IndexTensorModule_basic", + "IndexTensorSelectDimModule_basic", + "IndexTensorMultiInputContiguousOneDimDynamic_basic", + "IndexTensorMultiInputNonContiguousOneDimDynamic_basic", } FX_IMPORTER_TOSA_XFAIL_SET = {