• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=bonaire -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=gfx900 -enable-amdgpu-aa=0 -verify-machineinstrs -enable-misched -enable-aa-sched-mi < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
3
4@stored_lds_ptr = addrspace(3) global i32 addrspace(3)* undef, align 4
5@stored_constant_ptr = addrspace(3) global i32 addrspace(4)* undef, align 8
6@stored_global_ptr = addrspace(3) global i32 addrspace(1)* undef, align 8
7
8; GCN-LABEL: {{^}}reorder_local_load_global_store_local_load:
9; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
10; CI: buffer_store_dword
11
12; GFX9: global_store_dword
13; GFX9: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:3
14; GFX9: global_store_dword
15define amdgpu_kernel void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
16  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
17
18  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
19  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
20
21  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
22  store i32 99, i32 addrspace(1)* %gptr, align 4
23  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
24
25  %add = add nsw i32 %tmp1, %tmp2
26
27  store i32 %add, i32 addrspace(1)* %out, align 4
28  ret void
29}
30
31; GCN-LABEL: {{^}}no_reorder_local_load_volatile_global_store_local_load:
32; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
33; CI: buffer_store_dword
34; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
35
36; GFX9: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
37; GFX9: global_store_dword
38; GFX9: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
39define amdgpu_kernel void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
40  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
41
42  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
43  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
44
45  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
46  store volatile i32 99, i32 addrspace(1)* %gptr, align 4
47  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
48
49  %add = add nsw i32 %tmp1, %tmp2
50
51  store i32 %add, i32 addrspace(1)* %out, align 4
52  ret void
53}
54
55; GCN-LABEL: {{^}}no_reorder_barrier_local_load_global_store_local_load:
56; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
57; CI: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
58; CI: buffer_store_dword
59
60; GFX9-DAG: global_store_dword
61; GFX9-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:4
62; GFX9: s_barrier
63; GFX9-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:12
64; GFX9-DAG: global_store_dword
65define amdgpu_kernel void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
66  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
67
68  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
69  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
70
71  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
72  store i32 99, i32 addrspace(1)* %gptr, align 4
73  call void @llvm.amdgcn.s.barrier() #1
74  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
75
76  %add = add nsw i32 %tmp1, %tmp2
77
78  store i32 %add, i32 addrspace(1)* %out, align 4
79  ret void
80}
81
82; GCN-LABEL: {{^}}reorder_constant_load_global_store_constant_load:
83; GCN-DAG: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
84; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
85
86; CI: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
87; CI: buffer_store_dword
88; CI: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
89
90; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x4
91; GFX9: global_store_dword
92; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xc
93
94; CI: buffer_store_dword
95; GFX9: global_store_dword
96define amdgpu_kernel void @reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
97  %ptr0 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(3)* @stored_constant_ptr, align 8
98
99  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
100  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 3
101
102  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
103  store i32 99, i32 addrspace(1)* %gptr, align 4
104  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
105
106  %add = add nsw i32 %tmp1, %tmp2
107
108  store i32 %add, i32 addrspace(1)* %out, align 4
109  ret void
110}
111
112; GCN-LABEL: {{^}}reorder_constant_load_local_store_constant_load:
113; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
114; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
115
116; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x1
117; CI-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x3
118
119; GFX9-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x4
120; GFX9-DAG: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xc
121
122; GCN-DAG: ds_write_b32
123; CI: buffer_store_dword
124; GFX9: global_store_dword
125define amdgpu_kernel void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
126  %ptr0 = load i32 addrspace(4)*, i32 addrspace(4)* addrspace(3)* @stored_constant_ptr, align 8
127
128  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
129  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 3
130
131  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
132  store i32 99, i32 addrspace(3)* %lptr, align 4
133  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
134
135  %add = add nsw i32 %tmp1, %tmp2
136
137  store i32 %add, i32 addrspace(1)* %out, align 4
138  ret void
139}
140
141; GCN-LABEL: {{^}}reorder_smrd_load_local_store_smrd_load:
142; GCN: s_load_dword
143; GCN: s_load_dword
144; GCN: s_load_dword
145; GCN: ds_write_b32
146; CI: buffer_store_dword
147; GFX9: global_store_dword
148define amdgpu_kernel void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(4)* %ptr0) #0 {
149  %ptr1 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 1
150  %ptr2 = getelementptr inbounds i32, i32 addrspace(4)* %ptr0, i64 2
151
152  %tmp1 = load i32, i32 addrspace(4)* %ptr1, align 4
153  store i32 99, i32 addrspace(3)* %lptr, align 4
154  %tmp2 = load i32, i32 addrspace(4)* %ptr2, align 4
155
156  %add = add nsw i32 %tmp1, %tmp2
157
158  store i32 %add, i32 addrspace(1)* %out, align 4
159  ret void
160}
161
162; GCN-LABEL: {{^}}reorder_global_load_local_store_global_load:
163; CI: ds_write_b32
164; CI: buffer_load_dword
165; CI: buffer_load_dword
166; CI: buffer_store_dword
167
168; GFX9: global_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:4
169; GFX9: global_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:12
170; GFX9: ds_write_b32
171define amdgpu_kernel void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
172  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
173  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 3
174
175  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
176  store i32 99, i32 addrspace(3)* %lptr, align 4
177  %tmp2 = load i32, i32 addrspace(1)* %ptr2, align 4
178
179  %add = add nsw i32 %tmp1, %tmp2
180
181  store i32 %add, i32 addrspace(1)* %out, align 4
182  ret void
183}
184
185; GCN-LABEL: {{^}}reorder_local_offsets:
186; GCN: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:100 offset1:102
187; GCN-DAG: ds_write2_b32 {{v[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:3 offset1:100
188; GCN-DAG: ds_write_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:408
189; CI: buffer_store_dword
190; GFX9: global_store_dword
191; GCN: s_endpgm
192define amdgpu_kernel void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
193  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
194  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
195  %ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 102
196
197  store i32 123, i32 addrspace(3)* %ptr1, align 4
198  %tmp1 = load i32, i32 addrspace(3)* %ptr2, align 4
199  %tmp2 = load i32, i32 addrspace(3)* %ptr3, align 4
200  store i32 123, i32 addrspace(3)* %ptr2, align 4
201  %tmp3 = load i32, i32 addrspace(3)* %ptr1, align 4
202  store i32 789, i32 addrspace(3)* %ptr3, align 4
203
204  %add.0 = add nsw i32 %tmp2, %tmp1
205  %add.1 = add nsw i32 %add.0, %tmp3
206  store i32 %add.1, i32 addrspace(1)* %out, align 4
207  ret void
208}
209
210; GCN-LABEL: {{^}}reorder_global_offsets:
211; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
212; CI-DAG: buffer_load_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
213; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:12
214; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:400
215; CI-DAG: buffer_store_dword {{v[0-9]+}}, off, {{s\[[0-9]+:[0-9]+\]}}, 0 offset:408
216; CI: buffer_store_dword
217; CI: s_endpgm
218
219; GFX9-DAG: global_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:400
220; GFX9-DAG: global_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:408
221; GFX9-DAG: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:12
222; GFX9-DAG: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:400
223; GFX9-DAG: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:408
224; GFX9: global_store_dword
225; GFX9: s_endpgm
226define amdgpu_kernel void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
227  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
228  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
229  %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 102
230
231  store i32 123, i32 addrspace(1)* %ptr1, align 4
232  %tmp1 = load i32, i32 addrspace(1)* %ptr2, align 4
233  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
234  store i32 123, i32 addrspace(1)* %ptr2, align 4
235  %tmp3 = load i32, i32 addrspace(1)* %ptr1, align 4
236  store i32 789, i32 addrspace(1)* %ptr3, align 4
237
238  %add.0 = add nsw i32 %tmp2, %tmp1
239  %add.1 = add nsw i32 %add.0, %tmp3
240  store i32 %add.1, i32 addrspace(1)* %out, align 4
241  ret void
242}
243
244; GCN-LABEL: {{^}}reorder_global_offsets_addr64_soffset0:
245; CI:      buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:12{{$}}
246; CI-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:28{{$}}
247; CI-NEXT: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:44{{$}}
248
249; CI: v_mov_b32
250; CI: v_mov_b32
251
252; CI-DAG: v_add_i32
253; CI-DAG: v_add_i32
254
255; CI-DAG: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
256; CI-DAG: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20{{$}}
257; CI-DAG: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:36{{$}}
258; CI: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:52{{$}}
259
260; GFX9: global_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:12
261; GFX9: global_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:28
262; GFX9: global_load_dword {{v[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:44
263
264; GFX9: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]$}}
265; GFX9: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:20
266; GFX9: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:36
267; GFX9: global_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}} offset:52
268
269define amdgpu_kernel void @reorder_global_offsets_addr64_soffset0(i32 addrspace(1)* noalias nocapture %ptr.base) #0 {
270  %id = call i32 @llvm.amdgcn.workitem.id.x()
271  %id.ext = sext i32 %id to i64
272
273  %ptr0 = getelementptr inbounds i32, i32 addrspace(1)* %ptr.base, i64 %id.ext
274  %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
275  %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 5
276  %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 7
277  %ptr4 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 9
278  %ptr5 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 11
279  %ptr6 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 13
280
281  store i32 789, i32 addrspace(1)* %ptr0, align 4
282  %tmp1 = load i32, i32 addrspace(1)* %ptr1, align 4
283  store i32 123, i32 addrspace(1)* %ptr2, align 4
284  %tmp2 = load i32, i32 addrspace(1)* %ptr3, align 4
285  %add.0 = add nsw i32 %tmp1, %tmp2
286  store i32 %add.0, i32 addrspace(1)* %ptr4, align 4
287  %tmp3 = load i32, i32 addrspace(1)* %ptr5, align 4
288  %add.1 = add nsw i32 %add.0, %tmp3
289  store i32 %add.1, i32 addrspace(1)* %ptr6, align 4
290  ret void
291}
292
293; GCN-LABEL: {{^}}reorder_local_load_tbuffer_store_local_load:
294; GCN: tbuffer_store_format
295; GCN: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset0:1 offset1:2
296define amdgpu_vs void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #0 {
297  %ptr0 = load i32 addrspace(3)*, i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
298
299  %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
300  %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
301
302  %tmp1 = load i32, i32 addrspace(3)* %ptr1, align 4
303
304  %vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
305  %vaddr.add = add i32 %vaddr, 32
306  call void @llvm.amdgcn.struct.tbuffer.store.v4i32(<4 x i32> %vdata, <4 x i32> undef, i32 %vaddr.add, i32 0, i32 0, i32 228, i32 3)
307
308  %tmp2 = load i32, i32 addrspace(3)* %ptr2, align 4
309
310  %add = add nsw i32 %tmp1, %tmp2
311  store i32 %add, i32 addrspace(1)* %out, align 4
312  ret void
313}
314
315declare void @llvm.amdgcn.s.barrier() #1
316declare i32 @llvm.amdgcn.workitem.id.x() #2
317declare void @llvm.amdgcn.struct.tbuffer.store.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32 immarg, i32 immarg) #3
318
319attributes #0 = { nounwind }
320attributes #1 = { convergent nounwind willreturn }
321attributes #2 = { nounwind readnone speculatable willreturn }
322attributes #3 = { nounwind willreturn writeonly }
323