Lines Matching +full:1 +full:- +full:9
1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI …
2 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -chec…
4 ; FUNC-LABEL: {{^}}atomic_add_i32_offset:
5 ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
6 define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
8 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
9 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
13 ; FUNC-LABEL: {{^}}atomic_add_i32_soffset:
14 ; GCN: s_mov_b32 [[SREG:s[0-9]+]], 0x8ca0
15 ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], [[SREG]]{{$}}
16 define void @atomic_add_i32_soffset(i32 addrspace(1)* %out, i32 %in) {
18 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 9000
19 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
23 ; FUNC-LABEL: {{^}}atomic_add_i32_huge_offset:
24 ; SI-DAG: v_mov_b32_e32 v[[PTRLO:[0-9]+]], 0xdeac
25 ; SI-DAG: v_mov_b32_e32 v[[PTRHI:[0-9]+]], 0xabcd
26 ; SI: buffer_atomic_add v{{[0-9]+}}, v{{\[}}[[PTRLO]]:[[PTRHI]]{{\]}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 …
28 define void @atomic_add_i32_huge_offset(i32 addrspace(1)* %out, i32 %in) {
30 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 47224239175595
32 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
36 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_offset:
37 ; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
39 define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
41 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
42 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
43 store i32 %val, i32 addrspace(1)* %out2
47 ; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
48 ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 o…
49 ; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
50 define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
52 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
53 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
54 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
58 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64_offset:
59 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
60 ; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
62 define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
64 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
65 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
66 %val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
67 store i32 %val, i32 addrspace(1)* %out2
71 ; FUNC-LABEL: {{^}}atomic_add_i32:
72 ; GCN: buffer_atomic_add v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
73 define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
75 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
79 ; FUNC-LABEL: {{^}}atomic_add_i32_ret:
80 ; GCN: buffer_atomic_add [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
82 define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
84 %val = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
85 store i32 %val, i32 addrspace(1)* %out2
89 ; FUNC-LABEL: {{^}}atomic_add_i32_addr64:
90 ; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{…
91 ; VI: flat_atomic_add v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
92 define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
94 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
95 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
99 ; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64:
100 ; SI: buffer_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
101 ; VI: flat_atomic_add [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
103 define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
105 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
106 %val = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
107 store i32 %val, i32 addrspace(1)* %out2
111 ; FUNC-LABEL: {{^}}atomic_and_i32_offset:
112 ; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
113 define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
115 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
116 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
120 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset:
121 ; GCN: buffer_atomic_and [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
123 define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
125 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
126 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
127 store i32 %val, i32 addrspace(1)* %out2
131 ; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset:
132 ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 o…
133 ; VI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
134 define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
136 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
137 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
138 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
142 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64_offset:
143 ; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
144 ; VI: flat_atomic_and [[RET:v[0-9]]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
146 define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
148 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
149 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
150 %val = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
151 store i32 %val, i32 addrspace(1)* %out2
155 ; FUNC-LABEL: {{^}}atomic_and_i32:
156 ; GCN: buffer_atomic_and v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
157 define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
159 %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
163 ; FUNC-LABEL: {{^}}atomic_and_i32_ret:
164 ; GCN: buffer_atomic_and [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
166 define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
168 %val = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
169 store i32 %val, i32 addrspace(1)* %out2
173 ; FUNC-LABEL: {{^}}atomic_and_i32_addr64:
174 ; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{…
175 ; VI: flat_atomic_and v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
176 define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
178 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
179 %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
183 ; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64:
184 ; SI: buffer_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
185 ; VI: flat_atomic_and [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
187 define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
189 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
190 %val = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
191 store i32 %val, i32 addrspace(1)* %out2
195 ; FUNC-LABEL: {{^}}atomic_sub_i32_offset:
196 ; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
197 define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
199 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
200 %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
204 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_offset:
205 ; GCN: buffer_atomic_sub [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
207 define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
209 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
210 %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
211 store i32 %val, i32 addrspace(1)* %out2
215 ; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset:
216 ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 o…
217 ; VI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
218 define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
220 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
221 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
222 %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
226 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset:
227 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
228 ; VI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
230 define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
232 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
233 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
234 %val = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
235 store i32 %val, i32 addrspace(1)* %out2
239 ; FUNC-LABEL: {{^}}atomic_sub_i32:
240 ; GCN: buffer_atomic_sub v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
241 define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
243 %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
247 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret:
248 ; GCN: buffer_atomic_sub [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
250 define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
252 %val = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
253 store i32 %val, i32 addrspace(1)* %out2
257 ; FUNC-LABEL: {{^}}atomic_sub_i32_addr64:
258 ; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{…
259 ; VI: flat_atomic_sub v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
260 define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
262 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
263 %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
267 ; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64:
268 ; SI: buffer_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
269 ; VI: flat_atomic_sub [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
271 define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
273 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
274 %val = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
275 store i32 %val, i32 addrspace(1)* %out2
279 ; FUNC-LABEL: {{^}}atomic_max_i32_offset:
280 ; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
281 define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
283 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
284 %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
288 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_offset:
289 ; GCN: buffer_atomic_smax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
291 define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
293 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
294 %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
295 store i32 %val, i32 addrspace(1)* %out2
299 ; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset:
300 ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
301 ; VI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
302 define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
304 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
305 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
306 %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
310 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64_offset:
311 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
312 ; VI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
314 define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
316 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
317 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
318 %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
319 store i32 %val, i32 addrspace(1)* %out2
323 ; FUNC-LABEL: {{^}}atomic_max_i32:
324 ; GCN: buffer_atomic_smax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
325 define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
327 %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
331 ; FUNC-LABEL: {{^}}atomic_max_i32_ret:
332 ; GCN: buffer_atomic_smax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
334 define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
336 %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
337 store i32 %val, i32 addrspace(1)* %out2
341 ; FUNC-LABEL: {{^}}atomic_max_i32_addr64:
342 ; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{…
343 ; VI: flat_atomic_smax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
344 define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
346 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
347 %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
351 ; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64:
352 ; SI: buffer_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
353 ; VI: flat_atomic_smax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
355 define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
357 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
358 %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
359 store i32 %val, i32 addrspace(1)* %out2
363 ; FUNC-LABEL: {{^}}atomic_umax_i32_offset:
364 ; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
365 define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
367 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
368 %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
372 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_offset:
373 ; GCN: buffer_atomic_umax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
375 define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
377 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
378 %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
379 store i32 %val, i32 addrspace(1)* %out2
383 ; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset:
384 ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
385 ; VI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
386 define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
388 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
389 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
390 %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
394 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset:
395 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
396 ; VI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
398 define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32…
400 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
401 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
402 %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
403 store i32 %val, i32 addrspace(1)* %out2
407 ; FUNC-LABEL: {{^}}atomic_umax_i32:
408 ; GCN: buffer_atomic_umax v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
409 define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
411 %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
415 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret:
416 ; GCN: buffer_atomic_umax [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
418 define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
420 %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
421 store i32 %val, i32 addrspace(1)* %out2
425 ; FUNC-LABEL: {{^}}atomic_umax_i32_addr64:
426 ; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{…
427 ; VI: flat_atomic_umax v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
428 define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
430 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
431 %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
435 ; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64:
436 ; SI: buffer_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
437 ; VI: flat_atomic_umax [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
439 define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i…
441 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
442 %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
443 store i32 %val, i32 addrspace(1)* %out2
447 ; FUNC-LABEL: {{^}}atomic_min_i32_offset:
448 ; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
449 define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
451 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
452 %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
456 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_offset:
457 ; GCN: buffer_atomic_smin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
459 define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
461 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
462 %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
463 store i32 %val, i32 addrspace(1)* %out2
467 ; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset:
468 ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
469 ; VI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
470 define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
472 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
473 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
474 %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
478 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64_offset:
479 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
480 ; VI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
482 define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
484 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
485 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
486 %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
487 store i32 %val, i32 addrspace(1)* %out2
491 ; FUNC-LABEL: {{^}}atomic_min_i32:
492 ; GCN: buffer_atomic_smin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
493 define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
495 %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
499 ; FUNC-LABEL: {{^}}atomic_min_i32_ret:
500 ; GCN: buffer_atomic_smin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
502 define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
504 %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
505 store i32 %val, i32 addrspace(1)* %out2
509 ; FUNC-LABEL: {{^}}atomic_min_i32_addr64:
510 ; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{…
511 ; VI: flat_atomic_smin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
512 define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
514 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
515 %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
519 ; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64:
520 ; SI: buffer_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
521 ; VI: flat_atomic_smin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
523 define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
525 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
526 %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
527 store i32 %val, i32 addrspace(1)* %out2
531 ; FUNC-LABEL: {{^}}atomic_umin_i32_offset:
532 ; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
533 define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
535 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
536 %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
540 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_offset:
541 ; GCN: buffer_atomic_umin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
543 define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
545 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
546 %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
547 store i32 %val, i32 addrspace(1)* %out2
551 ; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset:
552 ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
553 ; VI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
554 define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
556 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
557 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
558 %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
562 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset:
563 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
564 ; VI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
566 define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32…
568 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
569 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
570 %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
571 store i32 %val, i32 addrspace(1)* %out2
575 ; FUNC-LABEL: {{^}}atomic_umin_i32:
576 ; GCN: buffer_atomic_umin v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
577 define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
579 %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
583 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret:
584 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
586 define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
588 %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
589 store i32 %val, i32 addrspace(1)* %out2
593 ; FUNC-LABEL: {{^}}atomic_umin_i32_addr64:
594 ; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{…
595 ; VI: flat_atomic_umin v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
596 define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
598 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
599 %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
603 ; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64:
604 ; SI: buffer_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
605 ; VI: flat_atomic_umin [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
607 define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i…
609 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
610 %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
611 store i32 %val, i32 addrspace(1)* %out2
615 ; FUNC-LABEL: {{^}}atomic_or_i32_offset:
616 ; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
617 define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
619 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
620 %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
624 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_offset:
625 ; GCN: buffer_atomic_or [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
627 define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
629 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
630 %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
631 store i32 %val, i32 addrspace(1)* %out2
635 ; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset:
636 ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 of…
637 ; VI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
638 define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
640 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
641 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
642 %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
646 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64_offset:
647 ; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr6…
648 ; VI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
650 define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %…
652 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
653 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
654 %val = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
655 store i32 %val, i32 addrspace(1)* %out2
659 ; FUNC-LABEL: {{^}}atomic_or_i32:
660 ; GCN: buffer_atomic_or v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
661 define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
663 %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
667 ; FUNC-LABEL: {{^}}atomic_or_i32_ret:
668 ; GCN: buffer_atomic_or [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
670 define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
672 %val = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
673 store i32 %val, i32 addrspace(1)* %out2
677 ; FUNC-LABEL: {{^}}atomic_or_i32_addr64:
678 ; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$…
679 ; VI: flat_atomic_or v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
680 define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
682 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
683 %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
687 ; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64:
688 ; SI: buffer_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr6…
689 ; VI: flat_atomic_or [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
691 define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64…
693 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
694 %val = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
695 store i32 %val, i32 addrspace(1)* %out2
699 ; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
700 ; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
701 define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
703 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
704 %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
708 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset:
709 ; GCN: buffer_atomic_swap [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
711 define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
713 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
714 %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
715 store i32 %val, i32 addrspace(1)* %out2
719 ; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
720 ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
722 ; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}{{$}}
723 define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
725 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
726 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
727 %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
731 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
732 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
734 ; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
736 define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32…
738 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
739 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
740 %val = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
741 store i32 %val, i32 addrspace(1)* %out2
745 ; FUNC-LABEL: {{^}}atomic_xchg_i32:
746 ; GCN: buffer_atomic_swap v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
747 define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
749 %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
753 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret:
754 ; GCN: buffer_atomic_swap [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
756 define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
758 %val = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
759 store i32 %val, i32 addrspace(1)* %out2
763 ; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64:
764 ; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{…
765 ; VI: flat_atomic_swap v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
766 define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
768 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
769 %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
773 ; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64:
774 ; SI: buffer_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 add…
775 ; VI: flat_atomic_swap [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
777 define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i…
779 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
780 %val = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
781 store i32 %val, i32 addrspace(1)* %out2
785 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_offset:
786 ; GCN: buffer_atomic_cmpswap v[{{[0-9]+}}:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{…
787 define void @atomic_cmpxchg_i32_offset(i32 addrspace(1)* %out, i32 %in, i32 %old) {
789 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
790 %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
794 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret_offset:
795 ; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]{{:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 of…
797 define void @atomic_cmpxchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in…
799 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
800 %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
802 store i32 %extract0, i32 addrspace(1)* %out2
806 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64_offset:
807 ; SI: buffer_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}…
809 ; VI: flat_atomic_cmpswap v[{{[0-9]+\:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}]{{$}}
810 define void @atomic_cmpxchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old…
812 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
813 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
814 %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
818 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64_offset:
819 ; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+…
820 ; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
822 define void @atomic_cmpxchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, …
824 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
825 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
826 %val = cmpxchg volatile i32 addrspace(1)* %gep, i32 %old, i32 %in seq_cst seq_cst
828 store i32 %extract0, i32 addrspace(1)* %out2
832 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32:
833 ; GCN: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
834 define void @atomic_cmpxchg_i32(i32 addrspace(1)* %out, i32 %in, i32 %old) {
836 %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
840 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret:
841 ; GCN: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
843 define void @atomic_cmpxchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i32 %…
845 %val = cmpxchg volatile i32 addrspace(1)* %out, i32 %old, i32 %in seq_cst seq_cst
847 store i32 %extract0, i32 addrspace(1)* %out2
851 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_addr64:
852 ; SI: buffer_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}…
853 ; VI: flat_atomic_cmpswap v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}]{{$}}
854 define void @atomic_cmpxchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index, i32 %old) {
856 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
857 %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
861 ; FUNC-LABEL: {{^}}atomic_cmpxchg_i32_ret_addr64:
862 ; SI: buffer_atomic_cmpswap v{{\[}}[[RET:[0-9]+]]:{{[0-9]+}}], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+…
863 ; VI: flat_atomic_cmpswap v[[RET:[0-9]+]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] glc{{$}}
865 define void @atomic_cmpxchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in…
867 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
868 %val = cmpxchg volatile i32 addrspace(1)* %ptr, i32 %old, i32 %in seq_cst seq_cst
870 store i32 %extract0, i32 addrspace(1)* %out2
874 ; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
875 ; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
876 define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
878 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
879 %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
883 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset:
884 ; GCN: buffer_atomic_xor [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
886 define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
888 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
889 %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
890 store i32 %val, i32 addrspace(1)* %out2
894 ; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
895 ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 o…
896 ; VI: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
897 define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
899 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
900 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
901 %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
905 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset:
906 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
907 ; VI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
909 define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 …
911 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
912 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
913 %val = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
914 store i32 %val, i32 addrspace(1)* %out2
918 ; FUNC-LABEL: {{^}}atomic_xor_i32:
919 ; GCN: buffer_atomic_xor v{{[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
920 define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
922 %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
926 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret:
927 ; GCN: buffer_atomic_xor [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
929 define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
931 %val = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
932 store i32 %val, i32 addrspace(1)* %out2
936 ; FUNC-LABEL: {{^}}atomic_xor_i32_addr64:
937 ; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{…
938 ; VI: flat_atomic_xor v[{{[0-9]+:[0-9]+}}], v{{[0-9]+$}}
939 define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
941 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
942 %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
946 ; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64:
947 ; SI: buffer_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
948 ; VI: flat_atomic_xor [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}} glc{{$}}
950 define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i6…
952 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
953 %val = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
954 store i32 %val, i32 addrspace(1)* %out2
958 ; FUNC-LABEL: {{^}}atomic_load_i32_offset:
959 ; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
960 ; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc{{$}}
962 define void @atomic_load_i32_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
964 %gep = getelementptr i32, i32 addrspace(1)* %in, i64 4
965 %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
966 store i32 %val, i32 addrspace(1)* %out
970 ; FUNC-LABEL: {{^}}atomic_load_i32:
971 ; SI: buffer_load_dword [[RET:v[0-9]+]], off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
972 ; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}] glc
974 define void @atomic_load_i32(i32 addrspace(1)* %in, i32 addrspace(1)* %out) {
976 %val = load atomic i32, i32 addrspace(1)* %in seq_cst, align 4
977 store i32 %val, i32 addrspace(1)* %out
981 ; FUNC-LABEL: {{^}}atomic_load_i32_addr64_offset:
982 ; SI: buffer_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
983 ; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
985 define void @atomic_load_i32_addr64_offset(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %inde…
987 %ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
988 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
989 %val = load atomic i32, i32 addrspace(1)* %gep seq_cst, align 4
990 store i32 %val, i32 addrspace(1)* %out
994 ; FUNC-LABEL: {{^}}atomic_load_i32_addr64:
995 ; SI: buffer_load_dword [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr…
996 ; VI: flat_load_dword [[RET:v[0-9]+]], v[{{[0-9]+:[0-9]+}}] glc{{$}}
998 define void @atomic_load_i32_addr64(i32 addrspace(1)* %in, i32 addrspace(1)* %out, i64 %index) {
1000 %ptr = getelementptr i32, i32 addrspace(1)* %in, i64 %index
1001 %val = load atomic i32, i32 addrspace(1)* %ptr seq_cst, align 4
1002 store i32 %val, i32 addrspace(1)* %out
1006 ; FUNC-LABEL: {{^}}atomic_store_i32_offset:
1007 ; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16 glc{{$}}
1008 ; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
1009 define void @atomic_store_i32_offset(i32 %in, i32 addrspace(1)* %out) {
1011 %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4
1012 store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
1016 ; FUNC-LABEL: {{^}}atomic_store_i32:
1017 ; SI: buffer_store_dword {{v[0-9]+}}, off, s[{{[0-9]+}}:{{[0-9]+}}], 0 glc{{$}}
1018 ; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
1019 define void @atomic_store_i32(i32 %in, i32 addrspace(1)* %out) {
1021 store atomic i32 %in, i32 addrspace(1)* %out seq_cst, align 4
1025 ; FUNC-LABEL: {{^}}atomic_store_i32_addr64_offset:
1026 ; SI: buffer_store_dword {{v[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
1027 ; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
1028 define void @atomic_store_i32_addr64_offset(i32 %in, i32 addrspace(1)* %out, i64 %index) {
1030 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
1031 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4
1032 store atomic i32 %in, i32 addrspace(1)* %gep seq_cst, align 4
1036 ; FUNC-LABEL: {{^}}atomic_store_i32_addr64:
1037 ; SI: buffer_store_dword {{v[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 …
1038 ; VI: flat_store_dword v[{{[0-9]+}}:{{[0-9]+}}], {{v[0-9]+}} glc{{$}}
1039 define void @atomic_store_i32_addr64(i32 %in, i32 addrspace(1)* %out, i64 %index) {
1041 %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
1042 store atomic i32 %in, i32 addrspace(1)* %ptr seq_cst, align 4