/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | splitkit-copy-bundle.mir | 18 ; RA: %5.sub0:sgpr_1024 = S_MOV_B32 -1 20 ; RA: undef %3.sub0:sgpr_1024 = S_MOV_B32 0 24 ; RA: %6.sub2:sgpr_1024 = COPY %6.sub0 26 ; RA: %6.sub4:sgpr_1024 = COPY %6.sub0 28 ; RA: %6.sub6:sgpr_1024 = COPY %6.sub0 30 ; RA: %6.sub8:sgpr_1024 = COPY %6.sub0 32 ; RA: %6.sub10:sgpr_1024 = COPY %6.sub0 34 ; RA: %6.sub12:sgpr_1024 = COPY %6.sub0 36 ; RA: %6.sub14:sgpr_1024 = COPY %6.sub0 38 ; RA: %6.sub16:sgpr_1024 = COPY %6.sub0 [all …]
|
D | splitkit-copy-live-lanes.mir | 20 ; CHECK: %2.sub0:sgpr_128 = COPY [[S_LOAD_DWORDX4_IMM]].sub0 22 ; CHECK: undef %3.sub0:sgpr_128 = COPY [[S_LOAD_DWORDX4_IMM]].sub2 34 …; CHECK: undef %52.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET]].sub0, impl… 41 …; CHECK: undef %71.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET1]].sub0, imp… 48 …; CHECK: undef %90.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET2]].sub0, imp… 55 …; CHECK: undef %109.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET3]].sub0, im… 61 …; CHECK: undef %126.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET4]].sub0, im… 68 …; CHECK: undef %144.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET5]].sub0, im… 75 …; CHECK: undef %36.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET6]].sub0, imp… 80 …; CHECK: undef %41.sub2:vreg_128 = V_LSHRREV_B32_e32 16, [[BUFFER_LOAD_DWORDX4_OFFSET7]].sub0, imp… [all …]
|
D | promote-constOffset-to-imm.mir | 19 %7:vgpr_32 = V_AND_B32_e32 255, %6.sub0, implicit $exec 21 %9:vreg_64 = REG_SEQUENCE killed %7, %subreg.sub0, %8, %subreg.sub1 22 %10:vgpr_32 = V_LSHLREV_B32_e64 7, %6.sub0, implicit $exec 26 %14:vgpr_32, %15:sreg_64_xexec = V_ADD_CO_U32_e64 %1.sub0, %11, 0, implicit $exec 29 %19:vreg_64 = REG_SEQUENCE %14, %subreg.sub0, %17, %subreg.sub1 31 %21:vgpr_32, %22:sreg_64_xexec = V_ADD_CO_U32_e64 %14, %20.sub0, 0, implicit $exec 36 %30:vreg_64 = REG_SEQUENCE %26, %subreg.sub0, %28, %subreg.sub1 41 %37:vreg_64 = REG_SEQUENCE %33, %subreg.sub0, %35, %subreg.sub1 50 # GFX9: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[BASE_LO]], %subreg.sub0, [[BASE_HI]], %s… 56 # GFX9: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[BASE1_LO]], %subreg.sub0, [[BASE1_HI]], … [all …]
|
D | promote-constOffset-to-imm-gfx10.mir | 19 %7:vgpr_32 = V_AND_B32_e32 255, %6.sub0, implicit $exec 21 %9:vreg_64 = REG_SEQUENCE killed %7, %subreg.sub0, %8, %subreg.sub1 22 %10:vgpr_32 = V_LSHLREV_B32_e64 7, %6.sub0, implicit $exec 26 %14:vgpr_32, %15:sreg_32_xm0_xexec = V_ADD_CO_U32_e64 %1.sub0, %11, 0, implicit $exec 29 %19:vreg_64 = REG_SEQUENCE %14, %subreg.sub0, %17, %subreg.sub1 31 %21:vgpr_32, %22:sreg_32_xm0_xexec = V_ADD_CO_U32_e64 %14, %20.sub0, 0, implicit $exec 36 %30:vreg_64 = REG_SEQUENCE %26, %subreg.sub0, %28, %subreg.sub1 41 %37:vreg_64 = REG_SEQUENCE %33, %subreg.sub0, %35, %subreg.sub1 50 # GFX10: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[BASE_LO]], %subreg.sub0, [[BASE_HI]], %… 57 # GFX10: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[BASE1_LO]], %subreg.sub0, [[BASE1_HI]],… [all …]
|
D | merge-load-store-vreg.mir | 94 # VI: V_ADD_CO_U32_e64 %6, %0.sub0, 95 # VI-NEXT: DS_WRITE2_B32 killed %7, %0.sub0, %3.sub0, 0, 8, 96 # VI: V_ADD_CO_U32_e64 %10, %3.sub0, 99 # GFX9: V_ADD_U32_e64 %6, %0.sub0, 100 # GFX9-NEXT: DS_WRITE2_B32_gfx9 killed %7, %0.sub0, %3.sub0, 0, 8, 101 # GFX9: V_ADD_U32_e64 %9, %3.sub0, 114 %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0.sub0, 0, implicit $exec 117 DS_WRITE_B32 %0.sub0, %0.sub0, 1024, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp) 119 DS_WRITE_B32 %0.sub0, %3.sub0, 1056, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1) 120 … %4:vgpr_32 = DS_READ_B32 %3.sub0, 1088, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2) [all …]
|
D | regcoalesce-cannot-join-failures.mir | 11 ; CHECK: undef %0.sub0:sreg_64_xexec = IMPLICIT_DEF 14 ; CHECK: %0.sub1:sreg_64_xexec = COPY %0.sub0 21 undef %0.sub0:sreg_64_xexec = IMPLICIT_DEF 27 %0.sub1:sreg_64_xexec = COPY %0.sub0:sreg_64_xexec 31 dead %2:sreg_32_xm0 = COPY %0.sub0:sreg_64_xexec 41 ; CHECK: undef %0.sub0:sreg_64 = S_MOV_B32 0 42 ; CHECK: %0.sub1:sreg_64 = COPY %0.sub0 44 undef %0.sub0:sreg_64 = S_MOV_B32 0 46 %0.sub1:sreg_64 = COPY %0.sub0:sreg_64 59 ; CHECK: %0.sub0:sreg_64 = S_MOV_B32 0 [all …]
|
D | sdwa-ops.mir | 29 %63:vgpr_32, %65:sreg_64_xexec = nsw V_ADD_CO_U32_e64 %30.sub0, %23, 0, implicit $exec 31 %62:vreg_64 = REG_SEQUENCE %63, %subreg.sub0, %64, %subreg.sub1 32 …GLOBAL_STORE_DWORDX2_SADDR %30.sub0, %62, %1, 0, 0, 0, 0, implicit $exec, implicit $exec :: (store… 35 %163:vgpr_32, %165:sreg_64_xexec = V_ADD_CO_U32_e64 %30.sub0, %161, 0, implicit $exec 37 %162:vreg_64 = REG_SEQUENCE %163, %subreg.sub0, %164, %subreg.sub1 38 …GLOBAL_STORE_DWORDX2_SADDR %30.sub0, %162, %1, 0, 0, 0, 0, implicit $exec, implicit $exec :: (stor… 41 %173:vgpr_32, %175:sreg_64_xexec = V_ADD_CO_U32_e64 %30.sub0, %171, 0, implicit $exec 43 %172:vreg_64 = REG_SEQUENCE %173, %subreg.sub0, %174, %subreg.sub1 44 …GLOBAL_STORE_DWORDX2_SADDR %30.sub0, %172, %1, 0, 0, 0, 0, implicit $exec, implicit $exec :: (stor… 71 %63:vgpr_32, %65:sreg_64_xexec = V_ADD_CO_U32_e64 %30.sub0, %23, 0, implicit $exec [all …]
|
D | detect-dead-lanes.mir | 9 # CHECK: %3:sgpr_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, undef %2, %subreg.sub3 10 # CHECK: S_NOP 0, implicit %3.sub0 15 # CHECK: S_NOP 0, implicit %4.sub0 17 # CHECK: S_NOP 0, implicit undef %5.sub0 31 %3 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub3 32 S_NOP 0, implicit %3.sub0 37 S_NOP 0, implicit %4.sub0 39 S_NOP 0, implicit %5.sub0 45 # CHECK: %0:sgpr_128 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2 47 # CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0 [all …]
|
D | subreg-split-live-in-error.mir | 28 # undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1 60 undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 61 %3.sub1:vreg_128 = COPY %3.sub0 62 %3.sub2:vreg_128 = COPY %3.sub0 71 undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 72 %3.sub1:vreg_128 = COPY %3.sub0 91 undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $mode, implicit $exec 92 undef %11.sub0:sgpr_256 = S_MOV_B32 0 93 %11.sub1:sgpr_256 = COPY %11.sub0 94 %11.sub2:sgpr_256 = COPY %11.sub0 [all …]
|
D | v_swap_b32.mir | 136 # GCN: %0.sub0:vreg_64, %1.sub0:vreg_64 = V_SWAP_B32 %1.sub0, %0.sub0, implicit $exec 147 %2.sub0 = COPY %0.sub0 149 %0.sub0 = COPY %1.sub0 151 %1.sub0 = COPY %2.sub0 338 # GCN-NEXT: %2.sub0:vreg_64 = COPY %0.sub0 340 # GCN-NEXT: %0.sub0:vreg_64 = COPY %1.sub0 341 # GCN-NEXT: %1.sub0:vreg_64 = COPY %2.sub0 353 %2.sub0 = COPY %0.sub0 355 %0.sub0 = COPY %1.sub0 356 %1.sub0 = COPY %2.sub0 [all …]
|
D | rename-independent-subregs.mir | 10 # in combination with sub0 and needs to stay with the original vreg. 12 # CHECK: S_NOP 0, implicit-def undef %0.sub0 24 S_NOP 0, implicit-def undef %0.sub0 42 # CHECK-NEXT: S_NOP 0, implicit-def undef %0.sub0 44 # CHECK-NEXT: S_NOP 0, implicit %0.sub0 63 S_NOP 0, implicit-def %1.sub0 65 S_NOP 0, implicit %1.sub0 73 # (1) %0.sub0 + %0.sub0 and (2) %0.sub1 + %0.sub1 76 …ub0 */, def undef %0.sub0, 327690 /* regdef:SReg_1_with_sub0 */, def dead %1.sub1, 2147483657 /* r… 80 undef %0.sub0:vreg_64 = IMPLICIT_DEF [all …]
|
D | sched-assert-onlydbg-value-empty-region.mir | 29 …; CHECK: undef %6.sub0:vreg_64 = V_ADD_F32_e32 [[DEF]].sub0, [[COPY1]].sub0, implicit $mode, imp… 30 …; CHECK: dead undef %6.sub1:vreg_64 = V_ADD_F32_e32 [[DEF]].sub1, [[COPY1]].sub0, implicit $mode… 32 ; CHECK: undef %4.sub0:vreg_64 = V_MOV_B32_e32 111, implicit $exec 40 …; CHECK: undef %19.sub0:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD1]], [[GLOBAL_LOAD_DWORDX2_]]… 48 ; CHECK: %11.sub0:vreg_64 = GLOBAL_LOAD_DWORD [[DEF2]], 0, 0, 0, 0, implicit $exec 49 ; CHECK: [[DEF1]].sub0:vreg_64 = GLOBAL_LOAD_DWORD [[DEF3]], 0, 0, 0, 0, implicit $exec 54 …; CHECK: S_NOP 0, implicit [[DEF5]], implicit [[V_LSHLREV_B64_]].sub0, implicit [[DEF4]], implic… 75 %4.sub0:vreg_64 = V_MOV_B32_e32 111, implicit $exec 77 undef %6.sub0:vreg_64 = V_ADD_F32_e32 %1.sub0, %5.sub0, implicit $mode, implicit $exec 78 %6.sub1:vreg_64 = V_ADD_F32_e32 %1.sub1, %5.sub0, implicit $mode, implicit $exec [all …]
|
D | rename-independent-subregs-mac-operands.mir | 5 # GCN: undef %18.sub0:vreg_128 = nofpexcept V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef… 49 undef %12.sub0 = COPY killed %4 57 undef %13.sub0 = COPY %8 67 %16 = COPY killed %1.sub0 68 undef %15.sub0 = COPY killed %16 81 # GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 82 # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0 85 # GCN: undef %7.sub0:vreg_128 = nofpexcept V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $mode, implicit… 91 # GCN: BUFFER_STORE_DWORD_OFFEN %7.sub0, %0, 121 %6.sub0 = V_MOV_B32_e32 0, implicit $exec [all …]
|
D | coalescing_makes_lanes_undef.mir | 4 # Register coalescer is going to eliminate %2:sgpr_32 = COPY %1.sub0 from bb.1 5 # by joining %2 and %1.sub0 into %0.sub0 register. Check that when this happen 6 # the implicit intialization of %0.sub0 in the bb.2 have undef flag 19 ; CHECK: undef %0.sub0:sgpr_64 = S_MOV_B32 1 24 ; CHECK: undef %0.sub0:sgpr_64 = IMPLICIT_DEF 26 ; CHECK: S_NOP 0, implicit %0.sub0 34 undef %1.sub0:sgpr_64 = S_MOV_B32 1 36 %2:sgpr_32 = COPY %1.sub0 ; copy to be joined 42 undef %1.sub0:sgpr_64 = IMPLICIT_DEF
|
D | peephole-opt-regseq-removal.mir | 8 # %4 -> %3.sub0 -> %2.sub1 -> %1 9 # %5 -> %3.sub1 -> %2.sub0 -> %0 22 …; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.… 23 …[0-9]+]]:vreg_64 = REG_SEQUENCE [[REG_SEQUENCE]].sub1, %subreg.sub0, [[REG_SEQUENCE]].sub0, %subre… 29 %2:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 30 %3:vreg_64 = REG_SEQUENCE %2.sub1, %subreg.sub0, %2.sub0, %subreg.sub1 31 %4:vgpr_32 = COPY %3.sub0
|
D | mubuf-legalize-operands.mir | 16 # W64: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.su… 20 # W64: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec 22 # W64: [[STMP0:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1 26 # W64: [[STMP1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[SRSRC2]], %subreg.sub0, [[SRSRC3]], %subreg.sub1 29 # W64: [[SRSRC:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1… 40 # W32: [[VRSRC:%[0-9]+]]:vreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.su… 44 # W32: [[SRSRC0:%[0-9]+]]:sgpr_32 = V_READFIRSTLANE_B32 [[VRSRC]].sub0, implicit $exec 46 # W32: [[STMP0:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1 50 # W32: [[STMP1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[SRSRC2]], %subreg.sub0, [[SRSRC3]], %subreg.sub1 53 # W32: [[SRSRC:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[SRSRC0]], %subreg.sub0, [[SRSRC1]], %subreg.sub1… [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | regbankselect-reg-sequence.mir | 18 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr(s64) = REG_SEQUENCE [[COPY]](s32), %subreg.sub0, [[COPY1]](… 21 %2:_(s64) = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 35 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr(s64) = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr1, %subreg.s… 36 %0:_(s64) = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr1, %subreg.sub1 52 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[COPY]](s32), %subreg.sub0, [[COPY1]](… 55 %2:_(s64) = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 69 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE $sgpr0, %subreg.sub0, $vgpr0, %subreg.s… 70 %0:_(s64) = REG_SEQUENCE $sgpr0, %subreg.sub0, $vgpr0, %subreg.sub1 86 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vgpr(s64) = REG_SEQUENCE [[COPY]](s32), %subreg.sub0, [[COPY1]](… 89 %2:_(s64) = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1 [all …]
|
D | inst-select-ptr-add.mir | 19 ; GFX6: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 20 ; GFX6: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0 25 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_AD… 30 ; GFX8: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 31 ; GFX8: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0 36 …; GFX8: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_AD… 41 ; GFX9: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 42 ; GFX9: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0 47 …; GFX9: [[REG_SEQUENCE:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[S_ADD_U32_]], %subreg.sub0, [[S_AD… 52 ; GFX10-WAVE64: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 [all …]
|
D | inst-select-load-flat.mir | 723 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_… 724 ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 725 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 730 …; GFX7: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %9, … 738 …; GFX8: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_… 739 ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 740 ; GFX8: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 745 …; GFX8: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %9, … 758 …; GFX10: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV… 759 ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 [all …]
|
D | inst-select-ptrmask.mir | 247 ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 249 ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY1]].sub0 253 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[S_AND_B32… 273 ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 275 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subr… 296 ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 298 ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_MOV_B64_]].sub0 302 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_AND_B32_]], %subreg.sub0, [[S_AND_B32… 324 …; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32… 325 ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY [[COPY]].sub0 [all …]
|
D | inst-select-constant.mir | 136 …; WAVE64: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MO… 139 …; WAVE64: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_2]], %subreg.sub0, [[V_… 142 …; WAVE64: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_4]], %subreg.sub0, [[V_… 145 …; WAVE64: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_6]], %subreg.sub0, [[V_… 148 …; WAVE64: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_8]], %subreg.sub0, [[V_… 151 …; WAVE64: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_10]], %subreg.sub0, [[V… 154 …; WAVE64: [[REG_SEQUENCE6:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_12]], %subreg.sub0, [[V… 157 …; WAVE64: [[REG_SEQUENCE7:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_14]], %subreg.sub0, [[V… 162 …; WAVE32: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MO… 165 …; WAVE32: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_2]], %subreg.sub0, [[V_… [all …]
|
D | inst-select-atomicrmw-add-flat.mir | 86 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_… 87 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 88 ; GFX7: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 93 …; GFX7: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %10,… 108 …; GFX10: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV… 109 ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 110 ; GFX10: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 115 …; GFX10: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %10… 142 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MOV_B32_e32_]], %subreg.sub0, [[V_MOV_… 143 ; GFX7: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0 [all …]
|
D | inst-select-load-global.mir | 27 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 37 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 84 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 94 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 141 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 151 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 198 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 208 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 255 …; GFX6: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… 265 …; GFX7: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_… [all …]
|
D | inst-select-load-global-saddr.mir | 120 …; GFX9: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, %notzero, %subreg… 121 ; GFX9: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0 122 ; GFX9: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 127 …; GFX9: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %12,… 136 …; GFX10: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, %notzero, %subre… 137 ; GFX10: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0 138 ; GFX10: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0 143 …; GFX10: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]], %subreg.sub0, %12… 180 ; GFX10: %zext:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, %zero, %subreg.sub1 181 ; GFX10: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | detect-dead-lanes.mir | 20 # CHECK: S_NOP 0, implicit %3:sub0 25 # CHECK: S_NOP 0, implicit %4:sub0 27 # CHECK: S_NOP 0, implicit undef %5:sub0 42 %3 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub3 43 S_NOP 0, implicit %3:sub0 48 S_NOP 0, implicit %4:sub0 50 S_NOP 0, implicit %5:sub0 59 # CHECK: S_NOP 0, implicit %1:sub0 63 # CHECK: S_NOP 0, implicit %2:sub0 68 # CHECK: S_NOP 0, implicit undef %4:sub0 [all …]
|