• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
3; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
4
5; Natural mapping
6define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
7  ; CHECK-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
8  ; CHECK: bb.1 (%ir-block.0):
9  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
10  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
11  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
12  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
13  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
14  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
15  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
16  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
17  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
18  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
19  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
20  ; CHECK:   BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "TargetCustom7", align 1, addrspace 4)
21  ; CHECK:   S_ENDPGM 0
22  call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
23  ret void
24}
25
26define amdgpu_ps void @struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<2 x float> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
27  ; CHECK-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
28  ; CHECK: bb.1 (%ir-block.0):
29  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
30  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
31  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
32  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
33  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
34  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
35  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
36  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
37  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
38  ; CHECK:   [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
39  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
40  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
41  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
42  ; CHECK:   BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom "TargetCustom7", align 1, addrspace 4)
43  ; CHECK:   S_ENDPGM 0
44  call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
45  ret void
46}
47
48define amdgpu_ps void @struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<3 x float> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
49  ; CHECK-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
50  ; CHECK: bb.1 (%ir-block.0):
51  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
52  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
53  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
54  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
55  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
56  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
57  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
58  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
59  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
60  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
61  ; CHECK:   [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
62  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
63  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
64  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
65  ; CHECK:   BUFFER_STORE_DWORDX3_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 12 into custom "TargetCustom7", align 1, addrspace 4)
66  ; CHECK:   S_ENDPGM 0
67  call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
68  ret void
69}
70
71define amdgpu_ps void @struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x float> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
72  ; CHECK-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
73  ; CHECK: bb.1 (%ir-block.0):
74  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
75  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
76  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
77  ; CHECK:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
78  ; CHECK:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
79  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
80  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
81  ; CHECK:   [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
82  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
83  ; CHECK:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
84  ; CHECK:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
85  ; CHECK:   [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
86  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
87  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
88  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
89  ; CHECK:   BUFFER_STORE_DWORDX4_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom "TargetCustom7", align 1, addrspace 4)
90  ; CHECK:   S_ENDPGM 0
91  call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
92  ret void
93}
94
95define amdgpu_ps void @struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset(<4 x float> inreg %val, <4 x i32> %rsrc, i32 inreg %vindex, i32 inreg %voffset, i32 %soffset) {
96  ; CHECK-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
97  ; CHECK: bb.1 (%ir-block.0):
98  ; CHECK:   successors: %bb.2(0x80000000)
99  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
100  ; CHECK:   [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
101  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
102  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
103  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
104  ; CHECK:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
105  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
106  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
107  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
108  ; CHECK:   [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
109  ; CHECK:   [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
110  ; CHECK:   [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
111  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
112  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
113  ; CHECK:   [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
114  ; CHECK:   [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
115  ; CHECK:   [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
116  ; CHECK:   [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
117  ; CHECK:   [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
118  ; CHECK:   [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
119  ; CHECK: bb.2:
120  ; CHECK:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
121  ; CHECK:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]].sub0, implicit $exec
122  ; CHECK:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]].sub1, implicit $exec
123  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
124  ; CHECK:   [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY14]], implicit $exec
125  ; CHECK:   [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]].sub0, implicit $exec
126  ; CHECK:   [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY15]].sub1, implicit $exec
127  ; CHECK:   [[REG_SEQUENCE3:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
128  ; CHECK:   [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE3]], [[COPY15]], implicit $exec
129  ; CHECK:   [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
130  ; CHECK:   [[REG_SEQUENCE4:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
131  ; CHECK:   [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
132  ; CHECK:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
133  ; CHECK:   [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
134  ; CHECK:   [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
135  ; CHECK:   BUFFER_STORE_DWORDX4_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 16 into custom "TargetCustom7", align 1, addrspace 4)
136  ; CHECK:   [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
137  ; CHECK:   $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
138  ; CHECK:   S_CBRANCH_EXECNZ %bb.2, implicit $exec
139  ; CHECK: bb.3:
140  ; CHECK:   successors: %bb.4(0x80000000)
141  ; CHECK:   $exec = S_MOV_B64_term [[S_MOV_B64_term]]
142  ; CHECK: bb.4:
143  ; CHECK:   S_ENDPGM 0
144  call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
145  ret void
146}
147
148define amdgpu_ps void @struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
149  ; CHECK-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
150  ; CHECK: bb.1 (%ir-block.0):
151  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
152  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
153  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
154  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
155  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
156  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
157  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
158  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
159  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
160  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
161  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
162  ; CHECK:   BUFFER_STORE_BYTE_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 1 into custom "TargetCustom7", addrspace 4)
163  ; CHECK:   S_ENDPGM 0
164  %val.trunc = trunc i32 %val to i8
165  call void @llvm.amdgcn.struct.buffer.store.i8(i8 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
166  ret void
167}
168
169define amdgpu_ps void @struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
170  ; CHECK-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
171  ; CHECK: bb.1 (%ir-block.0):
172  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
173  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
174  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
175  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
176  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
177  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
178  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
179  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
180  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
181  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
182  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
183  ; CHECK:   BUFFER_STORE_SHORT_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 2 into custom "TargetCustom7", align 1, addrspace 4)
184  ; CHECK:   S_ENDPGM 0
185  %val.trunc = trunc i32 %val to i16
186  call void @llvm.amdgcn.struct.buffer.store.i16(i16 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
187  ret void
188}
189
190define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
191  ; CHECK-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
192  ; CHECK: bb.1 (%ir-block.0):
193  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
194  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
195  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
196  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
197  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
198  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
199  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
200  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
201  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
202  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
203  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
204  ; CHECK:   BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "TargetCustom7", align 1, addrspace 4)
205  ; CHECK:   S_ENDPGM 0
206  call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
207  ret void
208}
209
210define amdgpu_ps void @struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
211  ; CHECK-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
212  ; CHECK: bb.1 (%ir-block.0):
213  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
214  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
215  ; CHECK:   [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
216  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
217  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
218  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
219  ; CHECK:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
220  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
221  ; CHECK:   [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
222  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
223  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
224  ; CHECK:   BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 4 into custom "TargetCustom7", align 1, addrspace 4)
225  ; CHECK:   S_ENDPGM 0
226  call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
227  ret void
228}
229
230; FIXME:
231; define amdgpu_ps void @struct_buffer_store_v3f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<3 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
232;   call void @llvm.amdgcn.struct.buffer.store.v3f16(<3 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
233;   ret void
234; }
235
236define amdgpu_ps void @struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x half> %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
237  ; CHECK-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
238  ; CHECK: bb.1 (%ir-block.0):
239  ; CHECK:   liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
240  ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
241  ; CHECK:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
242  ; CHECK:   [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
243  ; CHECK:   [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
244  ; CHECK:   [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
245  ; CHECK:   [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
246  ; CHECK:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
247  ; CHECK:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
248  ; CHECK:   [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
249  ; CHECK:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
250  ; CHECK:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
251  ; CHECK:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
252  ; CHECK:   BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store 8 into custom "TargetCustom7", align 1, addrspace 4)
253  ; CHECK:   S_ENDPGM 0
254  call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
255  ret void
256}
257
258declare void @llvm.amdgcn.struct.buffer.store.i8(i8, <4 x i32>, i32, i32, i32, i32 immarg)
259declare void @llvm.amdgcn.struct.buffer.store.i16(i16, <4 x i32>, i32, i32, i32, i32 immarg)
260
261declare void @llvm.amdgcn.struct.buffer.store.f16(half, <4 x i32>, i32, i32, i32, i32 immarg)
262declare void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half>, <4 x i32>, i32, i32, i32, i32 immarg)
263declare void @llvm.amdgcn.struct.buffer.store.v3f16(<3 x half>, <4 x i32>, i32, i32, i32, i32 immarg)
264declare void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half>, <4 x i32>, i32, i32, i32, i32 immarg)
265
266declare void @llvm.amdgcn.struct.buffer.store.f32(float, <4 x i32>, i32, i32, i32, i32 immarg)
267declare void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float>, <4 x i32>, i32, i32, i32, i32 immarg)
268declare void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float>, <4 x i32>, i32, i32, i32, i32 immarg)
269declare void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32, i32 immarg)
270