@@ -1118,3 +1118,101 @@ define <16 x i8> @test_rem_variable_16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
1118
1118
%res = srem <16 x i8 > %a , %b
1119
1119
ret <16 x i8 > %res
1120
1120
}
1121
+
1122
+ define <16 x i8 > @PR143238 (<16 x i8 > %a0 ) {
1123
+ ; SSE-LABEL: PR143238:
1124
+ ; SSE: # %bb.0:
1125
+ ; SSE-NEXT: pxor %xmm1, %xmm1
1126
+ ; SSE-NEXT: pxor %xmm2, %xmm2
1127
+ ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
1128
+ ; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976]
1129
+ ; SSE-NEXT: psrlw $8, %xmm2
1130
+ ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1131
+ ; SSE-NEXT: pmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [33024,22016,33024,26368,11008,37632,33024,14592]
1132
+ ; SSE-NEXT: psrlw $8, %xmm1
1133
+ ; SSE-NEXT: packuswb %xmm2, %xmm1
1134
+ ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1135
+ ; SSE-NEXT: paddb %xmm1, %xmm0
1136
+ ; SSE-NEXT: movdqa %xmm0, %xmm1
1137
+ ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1138
+ ; SSE-NEXT: psraw $8, %xmm1
1139
+ ; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 # [64,32,128,64,32,32,32,32]
1140
+ ; SSE-NEXT: psrlw $8, %xmm1
1141
+ ; SSE-NEXT: movdqa %xmm0, %xmm2
1142
+ ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
1143
+ ; SSE-NEXT: psraw $8, %xmm2
1144
+ ; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 # [256,256,128,128,256,64,64,128]
1145
+ ; SSE-NEXT: psrlw $8, %xmm2
1146
+ ; SSE-NEXT: packuswb %xmm1, %xmm2
1147
+ ; SSE-NEXT: psrlw $7, %xmm0
1148
+ ; SSE-NEXT: paddb %xmm2, %xmm0
1149
+ ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
1150
+ ; SSE-NEXT: retq
1151
+ ;
1152
+ ; AVX1-LABEL: PR143238:
1153
+ ; AVX1: # %bb.0:
1154
+ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
1155
+ ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
1156
+ ; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [26368,47872,11008,20224,37632,35072,33024,30976]
1157
+ ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1158
+ ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
1159
+ ; AVX1-NEXT: vpmulhw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [33024,22016,33024,26368,11008,37632,33024,14592]
1160
+ ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
1161
+ ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
1162
+ ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1163
+ ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
1164
+ ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1165
+ ; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
1166
+ ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [64,32,128,64,32,32,32,32]
1167
+ ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
1168
+ ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
1169
+ ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
1170
+ ; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 # [256,256,128,128,256,64,64,128]
1171
+ ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
1172
+ ; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
1173
+ ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0
1174
+ ; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
1175
+ ; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1176
+ ; AVX1-NEXT: retq
1177
+ ;
1178
+ ; AVX2NOBW-LABEL: PR143238:
1179
+ ; AVX2NOBW: # %bb.0:
1180
+ ; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
1181
+ ; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [65409,86,65409,103,43,65427,65409,57,103,65467,43,79,65427,65417,65409,121]
1182
+ ; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
1183
+ ; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
1184
+ ; AVX2NOBW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
1185
+ ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1186
+ ; AVX2NOBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
1187
+ ; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
1188
+ ; AVX2NOBW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1 # [256,256,128,128,256,64,64,128,64,32,128,64,32,32,32,32]
1189
+ ; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
1190
+ ; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
1191
+ ; AVX2NOBW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
1192
+ ; AVX2NOBW-NEXT: vpsrlw $7, %xmm0, %xmm0
1193
+ ; AVX2NOBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
1194
+ ; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1195
+ ; AVX2NOBW-NEXT: vzeroupper
1196
+ ; AVX2NOBW-NEXT: retq
1197
+ ;
1198
+ ; AVX512BW-LABEL: PR143238:
1199
+ ; AVX512BW: # %bb.0:
1200
+ ; AVX512BW-NEXT: vpmovsxbw {{.*#+}} ymm1 = [0,0,1,1,0,2,2,1,2,3,1,2,3,3,3,3]
1201
+ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm2
1202
+ ; AVX512BW-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 # [65409,86,65409,103,43,65427,65409,57,103,65467,43,79,65427,65417,65409,121]
1203
+ ; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2
1204
+ ; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
1205
+ ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1206
+ ; AVX512BW-NEXT: vpaddb %xmm0, %xmm2, %xmm0
1207
+ ; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm2
1208
+ ; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm2, %zmm1
1209
+ ; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
1210
+ ; AVX512BW-NEXT: vpsrlw $7, %xmm0, %xmm0
1211
+ ; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
1212
+ ; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
1213
+ ; AVX512BW-NEXT: vzeroupper
1214
+ ; AVX512BW-NEXT: retq
1215
+ %sdiv = sdiv <16 x i8 > %a0 , <i8 2 , i8 3 , i8 4 , i8 5 , i8 6 , i8 7 , i8 8 , i8 9 , i8 10 , i8 11 , i8 12 , i8 13 , i8 14 , i8 15 , i8 16 , i8 17 >
1216
+ %mask = and <16 x i8 > %sdiv , splat (i8 1 )
1217
+ ret <16 x i8 > %mask
1218
+ }
0 commit comments