• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=bonaire -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=gfx900 -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
3
4declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
5declare void @llvm.amdgcn.tbuffer.store.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
6declare void @llvm.amdgcn.s.barrier() #1
7declare i32 @llvm.amdgcn.workitem.id.x() #2
8
9
10@stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4
11@stored_constant_ptr = addrspace(3) global i32 addrspace(4)* undef, align 8
12@stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8
13
14; GCN-LABEL: {{^}}reorder_local_load_global_store_local_load:
15; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
16; CI: buffer_store_dword
17
18; GFX9: global_store_dword
19; GFX9: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
20; GFX9: global_store_dword
21define amdgpu_kernel void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
22  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
23
24  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
25  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
26
27  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
28  store i32 99, i32 addrspace(1)* %gptr, align 4
29  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
30
31  %add = add nsw i32 %tmp1, %tmp2
32
33  store i32 %add, i32 addrspace(1)* %out, align 4
34  ret void
35}
36
37; GCN-LABEL: {{^}}no_reorder_local_load_volatile_global_store_local_load:
38; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
39; CI: buffer_store_dword
40; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
41
42; GFX9: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
43; GFX9: global_store_dword
44; GFX9: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
45define amdgpu_kernel void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
46  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
47
48  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
49  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
50
51  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
52  store volatile i32 99, i32 addrspace(1)* %gptr, align 4
53  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
54
55  %add = add nsw i32 %tmp1, %tmp2
56
57  store i32 %add, i32 addrspace(1)* %out, align 4
58  ret void
59}
60
61; GCN-LABEL: {{^}}no_reorder_barrier_local_load_global_store_local_load:
62; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
63; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
64; CI: buffer_store_dword
65
66; GFX9-DAG: global_store_dword
67; GFX9-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
68; GFX9: s_barrier
69; GFX9-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
70; GFX9-DAG: global_store_dword
71define amdgpu_kernel void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
72  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
73
74  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
75  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
76
77  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
78  store i32 99, i32 addrspace(1)* %gptr, align 4
79  call void @llvm.amdgcn.s.barrier() #1
80  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
81
82  %add = add nsw i32 %tmp1, %tmp2
83
84  store i32 %add, i32 addrspace(1)* %out, align 4
85  ret void
86}
87
88; GCN-LABEL: {{^}}reorder_constant_load_global_store_constant_load:
89; GCN-DAG: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
90; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
91; CI: buffer_store_dword
92
93; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
94; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
95
96; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x4
97; GFX9: global_store_dword
98; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xc
99
100; CI: buffer_store_dword
101; GFX9: global_store_dword
102define amdgpu_kernel void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
103  %ptr0 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(3)* @stored_constant_ptr, align 8
104
105  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
106  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 3
107
108  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
109  store i32 99, i32 addrspace(1)* %gptr, align 4
110  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
111
112  %add = add nsw i32 %tmp1, %tmp2
113
114  store i32 %add, i32 addrspace(1)* %out, align 4
115  ret void
116}
117
118; GCN-LABEL: {{^}}reorder_constant_load_local_store_constant_load:
119; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
120; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
121
122; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
123; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
124
125; GFX9-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x4
126; GFX9-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xc
127
128; GCN: ds_write_b32
129; CI: buffer_store_dword
130; GFX9: global_store_dword
131define amdgpu_kernel void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
132  %ptr0 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(3)* @stored_constant_ptr, align 8
133
134  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
135  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 3
136
137  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
138  store i32 99, i32 addrspace(3)* %lptr, align 4
139  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
140
141  %add = add nsw i32 %tmp1, %tmp2
142
143  store i32 %add, i32 addrspace(1)* %out, align 4
144  ret void
145}
146
147; GCN-LABEL: {{^}}reorder_smrd_load_local_store_smrd_load:
148; GCN: s_load_dword
149; GCN: s_load_dword
150; GCN: s_load_dword
151; GCN: ds_write_b32
152; CI: buffer_store_dword
153; GFX9: global_store_dword
154define amdgpu_kernel void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(4)* %ptr0) #0 {
155  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
156  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 2
157
158  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
159  store i32 99, i32 addrspace(3)* %lptr, align 4
160  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
161
162  %add = add nsw i32 %tmp1, %tmp2
163
164  store i32 %add, i32 addrspace(1)* %out, align 4
165  ret void
166}
167
168; GCN-LABEL: {{^}}reorder_global_load_local_store_global_load:
169; CI: ds_write_b32
170; CI: buffer_load_dword
171; CI: buffer_load_dword
172; CI: buffer_store_dword
173
174; GFX9: global_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:4
175; GFX9: global_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:12
176; GFX9: ds_write_b32
177define amdgpu_kernel void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
178  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
179  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 3
180
181  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
182  store i32 99, i32 addrspace(3)* %lptr, align 4
183  %tmp2 = load i32, i32 addrspace(1)* %ptr2, align 4
184
185  %add = add nsw i32 %tmp1, %tmp2
186
187  store i32 %add, i32 addrspace(1)* %out, align 4
188  ret void
189}
190
191; GCN-LABEL: {{^}}reorder_local_offsets:
192; GCN: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102
193; GCN-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100
194; GCN-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408
195; CI: buffer_store_dword
196; GFX9: global_store_dword
197; GCN: s_endpgm
198define amdgpu_kernel void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
199  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
200  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
201  %ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 102
202
203  store i32 123, i32 addrspace(3)* %ptr1, align 4
204  %tmp1 = load i32, i32 addrspace(3)* %ptr2, align 4
205  %tmp2 = load i32, i32 addrspace(3)* %ptr3, align 4
206  store i32 123, i32 addrspace(3)* %ptr2, align 4
207  %tmp3 = load i32, i32 addrspace(3)* %ptr1, align 4
208  store i32 789, i32 addrspace(3)* %ptr3, align 4
209
210  %add.0 = add nsw i32 %tmp2, %tmp1
211  %add.1 = add nsw i32 %add.0, %tmp3
212  store i32 %add.1, i32 addrspace(1)* %out, align 4
213  ret void
214}
215
216; GCN-LABEL: {{^}}reorder_global_offsets:
217; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
218; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
219; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
220; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
221; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
222; CI: buffer_store_dword
223; CI: s_endpgm
224
225; GFX9-DAG: global_load_dword {{v[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:400
226; GFX9-DAG: global_load_dword {{v[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:408
227; GFX9-DAG: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, off offset:12
228; GFX9-DAG: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, off offset:400
229; GFX9-DAG: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, off offset:408
230; GFX9: global_store_dword
231; GFX9: s_endpgm
232define amdgpu_kernel void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
233  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
234  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
235  %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 102
236
237  store i32 123, i32 addrspace(1)* %ptr1, align 4
238  %tmp1 = load i32, i32 addrspace(1)* %ptr2, align 4
239  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
240  store i32 123, i32 addrspace(1)* %ptr2, align 4
241  %tmp3 = load i32, i32 addrspace(1)* %ptr1, align 4
242  store i32 789, i32 addrspace(1)* %ptr3, align 4
243
244  %add.0 = add nsw i32 %tmp2, %tmp1
245  %add.1 = add nsw i32 %add.0, %tmp3
246  store i32 %add.1, i32 addrspace(1)* %out, align 4
247  ret void
248}
249
250; GCN-LABEL: {{^}}reorder_global_offsets_addr64_soffset0:
251; CI:      buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
252; CI-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:28{{$}}
253; CI-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:44{{$}}
254
255; CI: v_mov_b32
256; CI: v_mov_b32
257
258; CI: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
259
260; CI: v_add_i32
261; CI: v_add_i32
262
263; CI: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20{{$}}
264
265; CI: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:36{{$}}
266; CI-NEXT: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:52{{$}}
267
268
269; GFX9: global_load_dword {{v[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:12
270; GFX9: global_load_dword {{v[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:28
271; GFX9: global_load_dword {{v[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, off offset:44
272
273; GFX9: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, off{{$}}
274; GFX9: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, off offset:20
275
276; GFX9: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, off offset:36
277; GFX9: global_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, off offset:52
278define amdgpu_kernel void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
279  %id = call i32 @llvm.amdgcn.workitem.id.x()
280  %id.ext = sext i32 %id to i64
281
282  %ptr0 = getelementptr inbounds i32, i32 addrspace(1)* %ptr.base, i64 %id.ext
283  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
284  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 5
285  %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 7
286  %ptr4 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 9
287  %ptr5 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 11
288  %ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 13
289
290  store i32 789, i32 addrspace(1)* %ptr0, align 4
291  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
292  store i32 123, i32 addrspace(1)* %ptr2, align 4
293  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
294  %add.0 = add nsw i32 %tmp1, %tmp2
295  store i32 %add.0, i32 addrspace(1)* %ptr4, align 4
296  %tmp3 = load i32, i32 addrspace(1)* %ptr5, align 4
297  %add.1 = add nsw i32 %add.0, %tmp3
298  store i32 %add.1, i32 addrspace(1)* %ptr6, align 4
299  ret void
300}
301
302; XGCN-LABEL: {{^}}reorder_local_load_tbuffer_store_local_load:
303; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x4
304; XCI: TBUFFER_STORE_FORMAT
305; XCI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}}, 0x8
306; define amdgpu_vs void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #0 {
307;   %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
308
309;   %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
310;   %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
311
312;   %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
313
314;   %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
315;   call void @llvm.amdgcn.tbuffer.store.v4i32(<4 x i32> %vdata, <4 x i32> undef,
316;         i32 %vaddr, i32 0, i32 0, i32 32, i32 14, i32 4, i1 1, i1 1)
317
318;   %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
319
320;   %add = add nsw i32 %tmp1, %tmp2
321
322;   store i32 %add, i32 addrspace(1)* %out, align 4
323;   ret void
324; }
325
326attributes #0 = { nounwind }
327attributes #1 = { nounwind convergent }
328attributes #2 = { nounwind readnone }
329