@@ -65,15 +65,15 @@ func.func @test_quantizelinear_f8(%arg0: !torch.vtensor<[6],f32>, %arg1: !torch.
6565// -----
6666
6767// CHECK-LABEL: @test_qlinearconv_nobias
68- func.func @test_qlinearconv_nobias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
69- %0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[1 ],f32 >, !torch.vtensor <[1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
68+ func.func @test_qlinearconv_nobias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[],f32 >, %arg5: !torch.vtensor <[],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
69+ %0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
7070 // CHECK: %[[aZp:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],ui8> -> !torch.int
71- // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[1],ui8> -> !torch.int
7271 // CHECK: %[[cZp:.+]] = torch.aten.item %arg7 : !torch.vtensor<[],ui8> -> !torch.int
7372 // CHECK: %[[aScale:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float
74- // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
7573 // CHECK: %[[cScale:.+]] = torch.aten.item %arg6 : !torch.vtensor<[],f32> -> !torch.float
7674 // CHECK: %[[A:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg0, %[[aScale]], %[[aZp]] : !torch.vtensor<[1,1,7,7],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,7,7],!torch.quint8>
75+ // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[],f32> -> !torch.float
76+ // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[],ui8> -> !torch.int
7777 // CHECK: %[[B:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg3, %[[bScale]], %[[bZp]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
7878 // CHECK: %[[INT0_0:.+]] = torch.constant.int 0
7979 // CHECK: %[[INT0_1:.+]] = torch.constant.int 0
@@ -103,17 +103,17 @@ func.func @test_qlinearconv_nobias(%arg0: !torch.vtensor<[1,1,7,7],ui8>, %arg1:
103103
104104// -----
105105
106- // CHECK-LABEL: @test_qlinearconv_bias
107- func.func @test_qlinearconv_bias (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >, %arg8 : !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
106+ // CHECK-LABEL: @test_qlinearconv_bias_weight_per_channel
107+ func.func @test_qlinearconv_bias_weight_per_channel (%arg0: !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, %arg1: !torch.vtensor <[],f32 >, %arg2: !torch.vtensor <[],ui8 >, %arg3: !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, %arg4: !torch.vtensor <[1 ],f32 >, %arg5: !torch.vtensor <[1 ],ui8 >, %arg6: !torch.vtensor <[],f32 >, %arg7: !torch.vtensor <[],ui8 >, %arg8 : !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 > attributes {torch.onnx_meta.ir_version = 5 : si64 , torch.onnx_meta.opset_version = 10 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
108108 %0 = torch.operator " onnx.QLinearConv" (%arg0 , %arg1 , %arg2 , %arg3 , %arg4 , %arg5 , %arg6 , %arg7 , %arg8 ) : (!torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[1 ,1 ,1 ,1 ],ui8 >, !torch.vtensor <[1 ],f32 >, !torch.vtensor <[1 ],ui8 >, !torch.vtensor <[],f32 >, !torch.vtensor <[],ui8 >, !torch.vtensor <[7 ],si32 >) -> !torch.vtensor <[1 ,1 ,7 ,7 ],ui8 >
109109 // CHECK: %[[aZp:.+]] = torch.aten.item %arg2 : !torch.vtensor<[],ui8> -> !torch.int
110- // CHECK: %[[bZp:.+]] = torch.aten.item %arg5 : !torch.vtensor<[1],ui8> -> !torch.int
111110 // CHECK: %[[cZp:.+]] = torch.aten.item %arg7 : !torch.vtensor<[],ui8> -> !torch.int
112111 // CHECK: %[[aScale:.+]] = torch.aten.item %arg1 : !torch.vtensor<[],f32> -> !torch.float
113- // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
114112 // CHECK: %[[cScale:.+]] = torch.aten.item %arg6 : !torch.vtensor<[],f32> -> !torch.float
115113 // CHECK: %[[A:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg0, %[[aScale]], %[[aZp]] : !torch.vtensor<[1,1,7,7],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,7,7],!torch.quint8>
116- // CHECK: %[[B:.+]] = torch.aten._make_per_tensor_quantized_tensor %arg3, %[[bScale]], %[[bZp]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
114+ // CHECK: %[[bScale:.+]] = torch.aten.item %arg4 : !torch.vtensor<[1],f32> -> !torch.float
115+ // CHECK: %[[INT0:.+]] = torch.constant.int 0
116+ // CHECK: %[[B:.+]] = torch.aten._make_per_channel_quantized_tensor %arg3, %arg4, %arg5, %[[INT0]] : !torch.vtensor<[1,1,1,1],ui8>, !torch.vtensor<[1],f32>, !torch.vtensor<[1],ui8>, !torch.int -> !torch.vtensor<[1,1,1,1],!torch.quint8>
117117 // CHECK: %[[INT0_0:.+]] = torch.constant.int 0
118118 // CHECK: %[[INT0_1:.+]] = torch.constant.int 0
119119 // CHECK: %[[PAD:.+]] = torch.prim.ListConstruct %[[INT0_0]], %[[INT0_1]]
0 commit comments