1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s 3 4--- | 5 define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) #0 { 6 %ret = mul <8 x i16> %arg1, %arg2 7 ret <8 x i16> %ret 8 } 9 10 define <8 x i16> @test_mul_v8i16_avx(<8 x i16> %arg1, <8 x i16> %arg2) #1 { 11 %ret = mul <8 x i16> %arg1, %arg2 12 ret <8 x i16> %ret 13 } 14 15 define <8 x i16> @test_mul_v8i16_avx512bwvl(<8 x i16> %arg1, <8 x i16> %arg2) #2 { 16 %ret = mul <8 x i16> %arg1, %arg2 17 ret <8 x i16> %ret 18 } 19 20 define <4 x i32> @test_mul_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) #3 { 21 %ret = mul <4 x i32> %arg1, %arg2 22 ret <4 x i32> %ret 23 } 24 25 define <4 x i32> @test_mul_v4i32_avx(<4 x i32> %arg1, <4 x i32> %arg2) #1 { 26 %ret = mul <4 x i32> %arg1, %arg2 27 ret <4 x i32> %ret 28 } 29 30 define <4 x i32> @test_mul_v4i32_avx512vl(<4 x i32> %arg1, <4 x i32> %arg2) #4 { 31 %ret = mul <4 x i32> %arg1, %arg2 32 ret <4 x i32> %ret 33 } 34 35 define <2 x i64> @test_mul_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) #5 { 36 %ret = mul <2 x i64> %arg1, %arg2 37 ret <2 x i64> %ret 38 } 39 40 define <16 x i16> @test_mul_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) #6 { 41 %ret = mul <16 x i16> %arg1, %arg2 42 ret <16 x i16> %ret 43 } 44 45 define <16 x i16> @test_mul_v16i16_avx512bwvl(<16 x i16> %arg1, <16 x i16> %arg2) #2 { 46 %ret = mul <16 x i16> %arg1, %arg2 47 ret <16 x i16> %ret 48 } 49 50 define <8 x i32> @test_mul_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) #6 { 51 %ret = mul <8 x i32> %arg1, %arg2 52 ret <8 x i32> %ret 53 } 54 55 define <8 x i32> @test_mul_v8i32_avx512vl(<8 x i32> %arg1, <8 x i32> %arg2) #4 { 56 %ret = mul <8 x i32> %arg1, %arg2 57 ret <8 x i32> %ret 58 } 59 60 define <4 x i64> @test_mul_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) #5 { 61 %ret = mul <4 x i64> %arg1, %arg2 62 ret <4 x i64> %ret 63 } 64 65 define <32 x i16> @test_mul_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #7 { 66 %ret = mul <32 x i16> %arg1, %arg2 67 ret <32 x i16> %ret 68 } 69 70 define <16 x i32> @test_mul_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #8 { 71 %ret = mul <16 x i32> %arg1, %arg2 72 ret <16 x i32> %ret 73 } 74 75 define <8 x i64> @test_mul_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #9 { 76 %ret = mul <8 x i64> %arg1, %arg2 77 ret <8 x i64> %ret 78 } 79 80 attributes #0 = { "target-features"="+sse2" } 81 attributes #1 = { "target-features"="+avx" } 82 attributes #2 = { "target-features"="+avx512vl,+avx512f,+avx512bw" } 83 attributes #3 = { "target-features"="+sse4.1" } 84 attributes #4 = { "target-features"="+avx512vl,+avx512f" } 85 attributes #5 = { "target-features"="+avx2,+avx512vl,+avx512f,+avx512dq" } 86 attributes #6 = { "target-features"="+avx2" } 87 attributes #7 = { "target-features"="+avx512f,+avx512bw" } 88 attributes #8 = { "target-features"="+avx512f" } 89 attributes #9 = { "target-features"="+avx512f,+avx512dq" } 90 91... 92--- 93name: test_mul_v8i16 94alignment: 4 95legalized: true 96regBankSelected: true 97registers: 98 - { id: 0, class: vecr } 99 - { id: 1, class: vecr } 100 - { id: 2, class: vecr } 101body: | 102 bb.1 (%ir-block.0): 103 liveins: $xmm0, $xmm1 104 105 ; CHECK-LABEL: name: test_mul_v8i16 106 ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 107 ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 108 ; CHECK: [[PMULLWrr:%[0-9]+]]:vr128 = PMULLWrr [[COPY]], [[COPY1]] 109 ; CHECK: $xmm0 = COPY [[PMULLWrr]] 110 ; CHECK: RET 0, implicit $xmm0 111 %0(<8 x s16>) = COPY $xmm0 112 %1(<8 x s16>) = COPY $xmm1 113 %2(<8 x s16>) = G_MUL %0, %1 114 $xmm0 = COPY %2(<8 x s16>) 115 RET 0, implicit $xmm0 116 117... 118--- 119name: test_mul_v8i16_avx 120alignment: 4 121legalized: true 122regBankSelected: true 123registers: 124 - { id: 0, class: vecr } 125 - { id: 1, class: vecr } 126 - { id: 2, class: vecr } 127body: | 128 bb.1 (%ir-block.0): 129 liveins: $xmm0, $xmm1 130 131 ; CHECK-LABEL: name: test_mul_v8i16_avx 132 ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 133 ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 134 ; CHECK: [[VPMULLWrr:%[0-9]+]]:vr128 = VPMULLWrr [[COPY]], [[COPY1]] 135 ; CHECK: $xmm0 = COPY [[VPMULLWrr]] 136 ; CHECK: RET 0, implicit $xmm0 137 %0(<8 x s16>) = COPY $xmm0 138 %1(<8 x s16>) = COPY $xmm1 139 %2(<8 x s16>) = G_MUL %0, %1 140 $xmm0 = COPY %2(<8 x s16>) 141 RET 0, implicit $xmm0 142 143... 144--- 145name: test_mul_v8i16_avx512bwvl 146alignment: 4 147legalized: true 148regBankSelected: true 149registers: 150 - { id: 0, class: vecr } 151 - { id: 1, class: vecr } 152 - { id: 2, class: vecr } 153body: | 154 bb.1 (%ir-block.0): 155 liveins: $xmm0, $xmm1 156 157 ; CHECK-LABEL: name: test_mul_v8i16_avx512bwvl 158 ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 159 ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 160 ; CHECK: [[VPMULLWZ128rr:%[0-9]+]]:vr128x = VPMULLWZ128rr [[COPY]], [[COPY1]] 161 ; CHECK: $xmm0 = COPY [[VPMULLWZ128rr]] 162 ; CHECK: RET 0, implicit $xmm0 163 %0(<8 x s16>) = COPY $xmm0 164 %1(<8 x s16>) = COPY $xmm1 165 %2(<8 x s16>) = G_MUL %0, %1 166 $xmm0 = COPY %2(<8 x s16>) 167 RET 0, implicit $xmm0 168 169... 170--- 171name: test_mul_v4i32 172alignment: 4 173legalized: true 174regBankSelected: true 175registers: 176 - { id: 0, class: vecr } 177 - { id: 1, class: vecr } 178 - { id: 2, class: vecr } 179body: | 180 bb.1 (%ir-block.0): 181 liveins: $xmm0, $xmm1 182 183 ; CHECK-LABEL: name: test_mul_v4i32 184 ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 185 ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 186 ; CHECK: [[PMULLDrr:%[0-9]+]]:vr128 = PMULLDrr [[COPY]], [[COPY1]] 187 ; CHECK: $xmm0 = COPY [[PMULLDrr]] 188 ; CHECK: RET 0, implicit $xmm0 189 %0(<4 x s32>) = COPY $xmm0 190 %1(<4 x s32>) = COPY $xmm1 191 %2(<4 x s32>) = G_MUL %0, %1 192 $xmm0 = COPY %2(<4 x s32>) 193 RET 0, implicit $xmm0 194 195... 196--- 197name: test_mul_v4i32_avx 198alignment: 4 199legalized: true 200regBankSelected: true 201registers: 202 - { id: 0, class: vecr } 203 - { id: 1, class: vecr } 204 - { id: 2, class: vecr } 205body: | 206 bb.1 (%ir-block.0): 207 liveins: $xmm0, $xmm1 208 209 ; CHECK-LABEL: name: test_mul_v4i32_avx 210 ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 211 ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1 212 ; CHECK: [[VPMULLDrr:%[0-9]+]]:vr128 = VPMULLDrr [[COPY]], [[COPY1]] 213 ; CHECK: $xmm0 = COPY [[VPMULLDrr]] 214 ; CHECK: RET 0, implicit $xmm0 215 %0(<4 x s32>) = COPY $xmm0 216 %1(<4 x s32>) = COPY $xmm1 217 %2(<4 x s32>) = G_MUL %0, %1 218 $xmm0 = COPY %2(<4 x s32>) 219 RET 0, implicit $xmm0 220 221... 222--- 223name: test_mul_v4i32_avx512vl 224alignment: 4 225legalized: true 226regBankSelected: true 227registers: 228 - { id: 0, class: vecr } 229 - { id: 1, class: vecr } 230 - { id: 2, class: vecr } 231body: | 232 bb.1 (%ir-block.0): 233 liveins: $xmm0, $xmm1 234 235 ; CHECK-LABEL: name: test_mul_v4i32_avx512vl 236 ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 237 ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 238 ; CHECK: [[VPMULLDZ128rr:%[0-9]+]]:vr128x = VPMULLDZ128rr [[COPY]], [[COPY1]] 239 ; CHECK: $xmm0 = COPY [[VPMULLDZ128rr]] 240 ; CHECK: RET 0, implicit $xmm0 241 %0(<4 x s32>) = COPY $xmm0 242 %1(<4 x s32>) = COPY $xmm1 243 %2(<4 x s32>) = G_MUL %0, %1 244 $xmm0 = COPY %2(<4 x s32>) 245 RET 0, implicit $xmm0 246 247... 248--- 249name: test_mul_v2i64 250alignment: 4 251legalized: true 252regBankSelected: true 253registers: 254 - { id: 0, class: vecr } 255 - { id: 1, class: vecr } 256 - { id: 2, class: vecr } 257body: | 258 bb.1 (%ir-block.0): 259 liveins: $xmm0, $xmm1 260 261 ; CHECK-LABEL: name: test_mul_v2i64 262 ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 263 ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1 264 ; CHECK: [[VPMULLQZ128rr:%[0-9]+]]:vr128x = VPMULLQZ128rr [[COPY]], [[COPY1]] 265 ; CHECK: $xmm0 = COPY [[VPMULLQZ128rr]] 266 ; CHECK: RET 0, implicit $xmm0 267 %0(<2 x s64>) = COPY $xmm0 268 %1(<2 x s64>) = COPY $xmm1 269 %2(<2 x s64>) = G_MUL %0, %1 270 $xmm0 = COPY %2(<2 x s64>) 271 RET 0, implicit $xmm0 272 273... 274--- 275name: test_mul_v16i16 276alignment: 4 277legalized: true 278regBankSelected: true 279registers: 280 - { id: 0, class: vecr } 281 - { id: 1, class: vecr } 282 - { id: 2, class: vecr } 283body: | 284 bb.1 (%ir-block.0): 285 liveins: $ymm0, $ymm1 286 287 ; CHECK-LABEL: name: test_mul_v16i16 288 ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 289 ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1 290 ; CHECK: [[VPMULLWYrr:%[0-9]+]]:vr256 = VPMULLWYrr [[COPY]], [[COPY1]] 291 ; CHECK: $ymm0 = COPY [[VPMULLWYrr]] 292 ; CHECK: RET 0, implicit $ymm0 293 %0(<16 x s16>) = COPY $ymm0 294 %1(<16 x s16>) = COPY $ymm1 295 %2(<16 x s16>) = G_MUL %0, %1 296 $ymm0 = COPY %2(<16 x s16>) 297 RET 0, implicit $ymm0 298 299... 300--- 301name: test_mul_v16i16_avx512bwvl 302alignment: 4 303legalized: true 304regBankSelected: true 305registers: 306 - { id: 0, class: vecr } 307 - { id: 1, class: vecr } 308 - { id: 2, class: vecr } 309body: | 310 bb.1 (%ir-block.0): 311 liveins: $ymm0, $ymm1 312 313 ; CHECK-LABEL: name: test_mul_v16i16_avx512bwvl 314 ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 315 ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 316 ; CHECK: [[VPMULLWZ256rr:%[0-9]+]]:vr256x = VPMULLWZ256rr [[COPY]], [[COPY1]] 317 ; CHECK: $ymm0 = COPY [[VPMULLWZ256rr]] 318 ; CHECK: RET 0, implicit $ymm0 319 %0(<16 x s16>) = COPY $ymm0 320 %1(<16 x s16>) = COPY $ymm1 321 %2(<16 x s16>) = G_MUL %0, %1 322 $ymm0 = COPY %2(<16 x s16>) 323 RET 0, implicit $ymm0 324 325... 326--- 327name: test_mul_v8i32 328alignment: 4 329legalized: true 330regBankSelected: true 331registers: 332 - { id: 0, class: vecr } 333 - { id: 1, class: vecr } 334 - { id: 2, class: vecr } 335body: | 336 bb.1 (%ir-block.0): 337 liveins: $ymm0, $ymm1 338 339 ; CHECK-LABEL: name: test_mul_v8i32 340 ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0 341 ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1 342 ; CHECK: [[VPMULLDYrr:%[0-9]+]]:vr256 = VPMULLDYrr [[COPY]], [[COPY1]] 343 ; CHECK: $ymm0 = COPY [[VPMULLDYrr]] 344 ; CHECK: RET 0, implicit $ymm0 345 %0(<8 x s32>) = COPY $ymm0 346 %1(<8 x s32>) = COPY $ymm1 347 %2(<8 x s32>) = G_MUL %0, %1 348 $ymm0 = COPY %2(<8 x s32>) 349 RET 0, implicit $ymm0 350 351... 352--- 353name: test_mul_v8i32_avx512vl 354alignment: 4 355legalized: true 356regBankSelected: true 357registers: 358 - { id: 0, class: vecr } 359 - { id: 1, class: vecr } 360 - { id: 2, class: vecr } 361body: | 362 bb.1 (%ir-block.0): 363 liveins: $ymm0, $ymm1 364 365 ; CHECK-LABEL: name: test_mul_v8i32_avx512vl 366 ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 367 ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 368 ; CHECK: [[VPMULLDZ256rr:%[0-9]+]]:vr256x = VPMULLDZ256rr [[COPY]], [[COPY1]] 369 ; CHECK: $ymm0 = COPY [[VPMULLDZ256rr]] 370 ; CHECK: RET 0, implicit $ymm0 371 %0(<8 x s32>) = COPY $ymm0 372 %1(<8 x s32>) = COPY $ymm1 373 %2(<8 x s32>) = G_MUL %0, %1 374 $ymm0 = COPY %2(<8 x s32>) 375 RET 0, implicit $ymm0 376 377... 378--- 379name: test_mul_v4i64 380alignment: 4 381legalized: true 382regBankSelected: true 383registers: 384 - { id: 0, class: vecr } 385 - { id: 1, class: vecr } 386 - { id: 2, class: vecr } 387body: | 388 bb.1 (%ir-block.0): 389 liveins: $ymm0, $ymm1 390 391 ; CHECK-LABEL: name: test_mul_v4i64 392 ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0 393 ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1 394 ; CHECK: [[VPMULLQZ256rr:%[0-9]+]]:vr256x = VPMULLQZ256rr [[COPY]], [[COPY1]] 395 ; CHECK: $ymm0 = COPY [[VPMULLQZ256rr]] 396 ; CHECK: RET 0, implicit $ymm0 397 %0(<4 x s64>) = COPY $ymm0 398 %1(<4 x s64>) = COPY $ymm1 399 %2(<4 x s64>) = G_MUL %0, %1 400 $ymm0 = COPY %2(<4 x s64>) 401 RET 0, implicit $ymm0 402 403... 404--- 405name: test_mul_v32i16 406alignment: 4 407legalized: true 408regBankSelected: true 409registers: 410 - { id: 0, class: vecr } 411 - { id: 1, class: vecr } 412 - { id: 2, class: vecr } 413body: | 414 bb.1 (%ir-block.0): 415 liveins: $zmm0, $zmm1 416 417 ; CHECK-LABEL: name: test_mul_v32i16 418 ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 419 ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 420 ; CHECK: [[VPMULLWZrr:%[0-9]+]]:vr512 = VPMULLWZrr [[COPY]], [[COPY1]] 421 ; CHECK: $zmm0 = COPY [[VPMULLWZrr]] 422 ; CHECK: RET 0, implicit $zmm0 423 %0(<32 x s16>) = COPY $zmm0 424 %1(<32 x s16>) = COPY $zmm1 425 %2(<32 x s16>) = G_MUL %0, %1 426 $zmm0 = COPY %2(<32 x s16>) 427 RET 0, implicit $zmm0 428 429... 430--- 431name: test_mul_v16i32 432alignment: 4 433legalized: true 434regBankSelected: true 435registers: 436 - { id: 0, class: vecr } 437 - { id: 1, class: vecr } 438 - { id: 2, class: vecr } 439body: | 440 bb.1 (%ir-block.0): 441 liveins: $zmm0, $zmm1 442 443 ; CHECK-LABEL: name: test_mul_v16i32 444 ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 445 ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 446 ; CHECK: [[VPMULLDZrr:%[0-9]+]]:vr512 = VPMULLDZrr [[COPY]], [[COPY1]] 447 ; CHECK: $zmm0 = COPY [[VPMULLDZrr]] 448 ; CHECK: RET 0, implicit $zmm0 449 %0(<16 x s32>) = COPY $zmm0 450 %1(<16 x s32>) = COPY $zmm1 451 %2(<16 x s32>) = G_MUL %0, %1 452 $zmm0 = COPY %2(<16 x s32>) 453 RET 0, implicit $zmm0 454 455... 456--- 457name: test_mul_v8i64 458alignment: 4 459legalized: true 460regBankSelected: true 461registers: 462 - { id: 0, class: vecr } 463 - { id: 1, class: vecr } 464 - { id: 2, class: vecr } 465body: | 466 bb.1 (%ir-block.0): 467 liveins: $zmm0, $zmm1 468 469 ; CHECK-LABEL: name: test_mul_v8i64 470 ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0 471 ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1 472 ; CHECK: [[VPMULLQZrr:%[0-9]+]]:vr512 = VPMULLQZrr [[COPY]], [[COPY1]] 473 ; CHECK: $zmm0 = COPY [[VPMULLQZrr]] 474 ; CHECK: RET 0, implicit $zmm0 475 %0(<8 x s64>) = COPY $zmm0 476 %1(<8 x s64>) = COPY $zmm1 477 %2(<8 x s64>) = G_MUL %0, %1 478 $zmm0 = COPY %2(<8 x s64>) 479 RET 0, implicit $zmm0 480 481... 482