Lines Matching full:copy
115 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
116 ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
117 ; SSE: $al = COPY [[MOV8rm]]
120 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
121 ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
122 ; AVX: $al = COPY [[MOV8rm]]
125 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
126 … ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
127 ; AVX512F: $al = COPY [[MOV8rm]]
130 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
131 … ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
132 ; AVX512VL: $al = COPY [[MOV8rm]]
134 %0(p0) = COPY $rdi
136 $al = COPY %1(s8)
153 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
154 ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
155 ; SSE: $ax = COPY [[MOV16rm]]
158 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
159 ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
160 ; AVX: $ax = COPY [[MOV16rm]]
163 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
164 …; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p…
165 ; AVX512F: $ax = COPY [[MOV16rm]]
168 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
169 …; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.…
170 ; AVX512VL: $ax = COPY [[MOV16rm]]
172 %0(p0) = COPY $rdi
174 $ax = COPY %1(s16)
191 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
192 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
193 ; SSE: $eax = COPY [[MOV32rm]]
196 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
197 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
198 ; AVX: $eax = COPY [[MOV32rm]]
201 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
202 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p…
203 ; AVX512F: $eax = COPY [[MOV32rm]]
206 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
207 …; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.…
208 ; AVX512VL: $eax = COPY [[MOV32rm]]
210 %0(p0) = COPY $rdi
212 $eax = COPY %1(s32)
229 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
230 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
231 ; SSE: $rax = COPY [[MOV64rm]]
234 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
235 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
236 ; AVX: $rax = COPY [[MOV64rm]]
239 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
240 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p…
241 ; AVX512F: $rax = COPY [[MOV64rm]]
244 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
245 …; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.…
246 ; AVX512VL: $rax = COPY [[MOV64rm]]
248 %0(p0) = COPY $rdi
250 $rax = COPY %1(s64)
269 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
270 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
271 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
272 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
273 ; SSE: $xmm0 = COPY [[COPY2]]
276 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
277 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
278 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
279 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
280 ; AVX: $xmm0 = COPY [[COPY2]]
283 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
284 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p…
285 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
286 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
287 ; AVX512F: $xmm0 = COPY [[COPY2]]
290 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
291 …; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.…
292 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
293 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
294 ; AVX512VL: $xmm0 = COPY [[COPY2]]
296 %0:gpr(p0) = COPY $rdi
298 %3:vecr(s32) = COPY %1(s32)
300 $xmm0 = COPY %2(s128)
319 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
320 ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
321 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
322 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
323 ; SSE: $xmm0 = COPY [[COPY2]]
326 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
327 ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
328 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
329 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
330 ; AVX: $xmm0 = COPY [[COPY2]]
333 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
334 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p…
335 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
336 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
337 ; AVX512F: $xmm0 = COPY [[COPY2]]
340 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
341 …; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.…
342 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
343 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
344 ; AVX512VL: $xmm0 = COPY [[COPY2]]
346 %0:gpr(p0) = COPY $rdi
348 %3:vecr(s32) = COPY %1(s32)
350 $xmm0 = COPY %2(s128)
369 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
370 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
371 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
372 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
373 ; SSE: $xmm0 = COPY [[COPY2]]
376 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
377 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
378 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
379 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
380 ; AVX: $xmm0 = COPY [[COPY2]]
383 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
384 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p…
385 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
386 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
387 ; AVX512F: $xmm0 = COPY [[COPY2]]
390 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
391 …; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.…
392 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
393 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
394 ; AVX512VL: $xmm0 = COPY [[COPY2]]
396 %0:gpr(p0) = COPY $rdi
398 %3:vecr(s64) = COPY %1(s64)
400 $xmm0 = COPY %2(s128)
419 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
420 ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
421 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
422 ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
423 ; SSE: $xmm0 = COPY [[COPY2]]
426 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
427 ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
428 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
429 ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
430 ; AVX: $xmm0 = COPY [[COPY2]]
433 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
434 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p…
435 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
436 ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
437 ; AVX512F: $xmm0 = COPY [[COPY2]]
440 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
441 …; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.…
442 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
443 ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
444 ; AVX512VL: $xmm0 = COPY [[COPY2]]
446 %0:gpr(p0) = COPY $rdi
448 %3:vecr(s64) = COPY %1(s64)
450 $xmm0 = COPY %2(s128)
467 ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
468 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
469 ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
470 ; SSE: $rax = COPY [[COPY1]]
473 ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
474 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
475 ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
476 ; AVX: $rax = COPY [[COPY1]]
479 ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
480 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
481 ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
482 ; AVX512F: $rax = COPY [[COPY1]]
485 ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
486 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
487 ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
488 ; AVX512VL: $rax = COPY [[COPY1]]
490 %0(s32) = COPY $edi
491 %1(p0) = COPY $rsi
493 $rax = COPY %1(p0)
510 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
511 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
512 ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
513 ; SSE: $rax = COPY [[COPY1]]
516 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
517 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
518 ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
519 ; AVX: $rax = COPY [[COPY1]]
522 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
523 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
524 ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
525 ; AVX512F: $rax = COPY [[COPY1]]
528 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
529 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
530 ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
531 ; AVX512VL: $rax = COPY [[COPY1]]
533 %0(s64) = COPY $rdi
534 %1(p0) = COPY $rsi
536 $rax = COPY %1(p0)
555 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
556 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
557 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
558 ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
560 ; SSE: $rax = COPY [[COPY2]]
563 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
564 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
565 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
566 ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
568 ; AVX: $rax = COPY [[COPY2]]
571 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
572 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
573 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
574 ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
576 ; AVX512F: $rax = COPY [[COPY2]]
579 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
580 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
581 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
582 ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
584 ; AVX512VL: $rax = COPY [[COPY2]]
586 %2:vecr(s128) = COPY $xmm0
588 %1:gpr(p0) = COPY $rdi
589 %3:gpr(s32) = COPY %0(s32)
591 $rax = COPY %1(p0)
610 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
611 ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
612 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
613 ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
615 ; SSE: $rax = COPY [[COPY2]]
618 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
619 ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
620 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
621 ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
623 ; AVX: $rax = COPY [[COPY2]]
626 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
627 ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
628 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
629 ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
631 ; AVX512F: $rax = COPY [[COPY2]]
634 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
635 ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
636 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
637 ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
639 ; AVX512VL: $rax = COPY [[COPY2]]
641 %2:vecr(s128) = COPY $xmm0
643 %1:gpr(p0) = COPY $rdi
644 %3:gpr(s32) = COPY %0(s32)
646 $rax = COPY %1(p0)
660 # NO_AVX512X: %0:fr64 = COPY $xmm0
666 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
667 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
668 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
669 ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
671 ; SSE: $rax = COPY [[COPY2]]
674 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
675 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
676 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
677 ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
679 ; AVX: $rax = COPY [[COPY2]]
682 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
683 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
684 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
685 ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
687 ; AVX512F: $rax = COPY [[COPY2]]
690 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
691 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
692 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
693 ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
695 ; AVX512VL: $rax = COPY [[COPY2]]
697 %2:vecr(s128) = COPY $xmm0
699 %1:gpr(p0) = COPY $rdi
700 %3:gpr(s64) = COPY %0(s64)
702 $rax = COPY %1(p0)
721 ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
722 ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
723 ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
724 ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
726 ; SSE: $rax = COPY [[COPY2]]
729 ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
730 ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
731 ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
732 ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
734 ; AVX: $rax = COPY [[COPY2]]
737 ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
738 ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
739 ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
740 ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
742 ; AVX512F: $rax = COPY [[COPY2]]
745 ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
746 ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
747 ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
748 ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
750 ; AVX512VL: $rax = COPY [[COPY2]]
752 %2:vecr(s128) = COPY $xmm0
754 %1:gpr(p0) = COPY $rdi
755 %3:gpr(s64) = COPY %0(s64)
757 $rax = COPY %1(p0)
775 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
776 … ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
777 ; SSE: $rax = COPY [[MOV64rm]]
780 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
781 … ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
782 ; AVX: $rax = COPY [[MOV64rm]]
785 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
786 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p…
787 ; AVX512F: $rax = COPY [[MOV64rm]]
790 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
791 …; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.…
792 ; AVX512VL: $rax = COPY [[MOV64rm]]
794 %0(p0) = COPY $rdi
796 $rax = COPY %1(p0)
814 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
815 ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
816 ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
819 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
820 ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
821 ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
824 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
825 ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
826 ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
829 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
830 ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
831 ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
833 %0(p0) = COPY $rdi
834 %1(p0) = COPY $rsi
855 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
856 ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
857 ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
858 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arra…
859 ; SSE: $eax = COPY [[MOV32rm]]
862 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
863 ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
864 ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
865 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arra…
866 ; AVX: $eax = COPY [[MOV32rm]]
869 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
870 ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
871 ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
872 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.…
873 ; AVX512F: $eax = COPY [[MOV32rm]]
876 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
877 ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
878 ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
879 …; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir…
880 ; AVX512VL: $eax = COPY [[MOV32rm]]
882 %0(p0) = COPY $rdi
883 %1(s32) = COPY $esi
888 $eax = COPY %4(s32)
908 ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
909 ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
911 ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
914 ; SSE: $eax = COPY [[MOV32rm]]
917 ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
918 ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
920 ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
923 ; AVX: $eax = COPY [[MOV32rm]]
926 ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
927 ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
929 ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
932 ; AVX512F: $eax = COPY [[MOV32rm]]
935 ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
936 ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
938 ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
941 ; AVX512VL: $eax = COPY [[MOV32rm]]
943 %0(p0) = COPY $rdi
944 %1(s32) = COPY $esi
949 $eax = COPY %4(s32)