/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | llvm.amdgcn.raw.tbuffer.store.i8.ll | 51 ; UNPACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 59 …; UNPACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, … 60 …; UNPACKED: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, … 62 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 87 ; PACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 95 …; PACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, im… 96 …; PACKED: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, im… 98 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 129 ; UNPACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 137 …; UNPACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, … [all …]
|
D | legalize-llvm.amdgcn.image.atomic.dim.a16.ll | 17 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 21 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](… 35 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 39 …_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](… 60 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 64 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](… 78 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 82 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](… 103 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 107 …G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.atomic.sub.1d), [[COPY8]](s32), [[TRUNC]](… [all …]
|
D | legalize-llvm.amdgcn.image.store.2d.d16.ll | 19 ; PACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 24 ; PACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 38 ; UNPACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 43 …; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3… 57 ; GFX81: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 62 ; GFX81: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 76 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 81 ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 95 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 100 ; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) [all …]
|
D | legalize-llvm.amdgcn.image.dim.a16.ll | 17 ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 20 ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) 41 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 44 ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) 72 ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 75 ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) 77 ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) 101 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 104 ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) 106 ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) [all …]
|
D | image_ls_mipmap_zero.a16.ll | 17 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 19 ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32) 40 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 42 ; GFX10: [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32) 69 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 72 ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32) 93 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 96 ; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32) 123 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 127 ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY8]](s32) [all …]
|
D | legalize-llvm.amdgcn.image.load.2d.ll | 16 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 19 ; GCN: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 39 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 42 ; GCN: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 64 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 67 ; GCN: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 90 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 93 ; GCN: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 117 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 121 ; GCN: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) [all …]
|
D | legalize-llvm.amdgcn.image.load.3d.ll | 17 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 21 …; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … 36 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 40 … = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[COPY8]](s32), [[COPY9]](… 59 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 64 …; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … 81 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 86 … = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.3d), 1, [[COPY8]](s32), [[COPY9]](…
|
D | llvm.amdgcn.raw.buffer.load.format.ll | 58 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_FORMAT_XYZ_OFFEN]].sub2 61 ; CHECK: $vgpr2 = COPY [[COPY8]] 81 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_FORMAT_XYZW_OFFEN]].sub2 85 ; CHECK: $vgpr2 = COPY [[COPY8]] 107 ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 115 …; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, imp… 116 …; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, imp… 118 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 152 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_FORMAT_XYZW_OFFEN]].sub2 156 ; CHECK: $vgpr2 = COPY [[COPY8]]
|
D | llvm.amdgcn.struct.buffer.atomic.cmpswap.ll | 18 ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 22 …SWAP_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, 0, implic… 44 ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 48 …SWAP_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, 0, implic… 69 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 90 …; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit… 91 …64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec 125 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 146 …; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit… 147 …64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec [all …]
|
D | llvm.amdgcn.raw.tbuffer.store.f16.ll | 91 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] 92 …; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY]], impl… 132 ; UNPACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 140 …; UNPACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, … 141 …; UNPACKED: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, … 143 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 168 ; PACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 176 …; PACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, im… 177 …; PACKED: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, im… 179 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec [all …]
|
D | legalize-llvm.amdgcn.image.load.2d.d16.ll | 17 ; UNPACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 20 …; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3… 36 ; PACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 39 ; PACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 60 ; UNPACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 63 …; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3… 88 ; PACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 91 ; PACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) 111 ; UNPACKED: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 114 …; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s3… [all …]
|
D | llvm.amdgcn.raw.buffer.load.format.f16.ll | 65 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] 66 …; UNPACKED: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[COPY6]], [[COPY8]], implicit $e… 115 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_FORMAT_D16_XYZW_gfx80_OFFEN]].sub2 127 …; UNPACKED: [[V_AND_B32_e64_2:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[COPY8]], [[COPY13]], implicit … 155 ; PACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 163 …; PACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, im… 164 …; PACKED: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, im… 166 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 195 ; UNPACKED: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 203 …; UNPACKED: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, … [all …]
|
D | legalize-llvm.amdgcn.image.load.2darraymsaa.ll | 17 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 22 …; GFX6: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … 41 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 46 …U_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[COPY8]](s32), [[COPY9]](… 69 ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 76 ; GFX6: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32) 97 ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 104 ; GFX10NSA: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
|
D | legalize-llvm.amdgcn.image.sample.a16.ll | 17 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 24 …; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … 43 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 50 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 75 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 82 …; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … 104 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 111 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 139 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 147 …; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), … [all …]
|
D | llvm.amdgcn.raw.buffer.atomic.cmpswap.ll | 20 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0 21 ; CHECK: $vgpr0 = COPY [[COPY8]] 44 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0 65 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] 86 …; CHECK: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %s… 118 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] 139 …; CHECK: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %s… 169 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_OFFEN_RTN]].sub0 170 ; CHECK: $vgpr0 = COPY [[COPY8]]
|
D | llvm.amdgcn.struct.buffer.store.format.f32.ll | 36 ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 40 …XY_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, 0, 0, … 58 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 62 …; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %s… 81 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 86 …; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %s… 107 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] 129 …; CHECK: BUFFER_STORE_FORMAT_X_BOTHEN_exact [[COPY8]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V…
|
D | regbankselect-amdgcn.image.load.1d.ll | 18 ; FAST: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 21 …= G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VE… 36 ; GREEDY: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 39 …= G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VE… 61 ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 64 ; FAST: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) 80 ; GREEDY: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 83 ; GREEDY: [[COPY9:%[0-9]+]]:vgpr(s32) = COPY [[COPY8]](s32) 107 ; FAST: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY $vgpr8 138 …= G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.load.1d), 15, [[COPY8]](s32), [[BUILD_VE… [all …]
|
D | irtranslator-call-return-values.ll | 79 ; GCN: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 86 ; GCN: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]] 137 ; GCN: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 140 ; GCN: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32) 186 ; GCN: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 191 ; GCN: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]] 242 ; GCN: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31 269 ; GCN: [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]] 288 ; GCN: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 293 ; GCN: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]] [all …]
|
D | llvm.amdgcn.raw.tbuffer.store.ll | 57 ; CHECK: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 60 …_FORMAT_XYZ_OFFEN_exact [[REG_SEQUENCE]], [[COPY7]], [[REG_SEQUENCE1]], [[COPY8]], 0, 78, 0, 0, 0,… 79 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 83 …; CHECK: TBUFFER_STORE_FORMAT_XYZW_OFFEN_exact [[REG_SEQUENCE]], [[COPY8]], [[REG_SEQUENCE1]], [… 124 ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 132 …; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, imp… 133 …; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, imp… 135 …4_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 166 ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 174 …; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, imp… [all …]
|
D | legalize-build-vector.s16.mir | 70 ; GFX78: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) 71 ; GFX78: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C]] 90 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) 91 … [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY7]](s32), [[COPY8]](s32) 184 ; GFX78: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32) 185 ; GFX78: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C]] 224 ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32) 225 … [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY7]](s32), [[COPY8]](s32) 271 ; GFX78: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) 272 ; GFX78: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C]] [all …]
|
D | legalize-llvm.amdgcn.image.sample.g16.ll | 16 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 24 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 54 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 65 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 96 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 110 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 146 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 155 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… 186 ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $sgpr10 198 …; GFX10: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32),… [all …]
|
D | llvm.amdgcn.struct.buffer.store.format.f16.ll | 54 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] 55 …; UNPACKED: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 [[COPY8]], [[COPY]], impl… 97 ; UNPACKED: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 106 …80_BOTHEN_exact [[REG_SEQUENCE1]], [[REG_SEQUENCE2]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, 0, 0, … 119 ; PACKED: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 123 …ZW_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, 0, 0, … 143 ; UNPACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] 165 …; UNPACKED: BUFFER_STORE_FORMAT_D16_X_gfx80_BOTHEN_exact [[COPY8]], [[REG_SEQUENCE4]], [[REG_SEQ… 187 ; PACKED: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] 209 …; PACKED: BUFFER_STORE_FORMAT_D16_X_BOTHEN_exact [[COPY8]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]]…
|
D | legalize-build-vector.mir | 165 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 166 …32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32) 194 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 196 …](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](… 225 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 228 …](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](… 258 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 262 …](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](… 293 ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8 298 …](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](… [all …]
|
D | regbankselect-amdgcn.image.sample.1d.ll | 18 ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 24 …; FAST: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32… 40 ; GREEDY: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 46 …; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s… 69 ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 75 …; FAST: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32… 92 ; GREEDY: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr10 98 …; GREEDY: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s… 123 ; FAST: [[COPY8:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 129 …; FAST: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32… [all …]
|
D | llvm.amdgcn.raw.buffer.store.ll | 38 ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] 39 …; CHECK: BUFFER_STORE_DWORD_OFFEN_exact [[COPY7]], [[COPY8]], [[REG_SEQUENCE]], [[COPY6]], 0, 0,… 60 ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 68 …; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, imp… 69 …; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, imp… 71 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec 134 ; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 142 …; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, imp… 143 …; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, imp… 145 …Q_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec [all …]
|