Lines Matching refs:addrspace
7 define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
8 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
9 store i64 %result, i64 addrspace(1)* %out, align 8
16 define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin…
17 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
18 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
19 store i64 %result, i64 addrspace(1)* %out, align 8
26 define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
27 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
28 store i64 %result, i64 addrspace(1)* %out, align 8
41 define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
42 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
43 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
44 store i64 %result, i64 addrspace(1)* %out, align 8
54 define void @lds_atomic_add1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
56 store i64 %result, i64 addrspace(1)* %out, align 8
63 define void @lds_atomic_add1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin…
64 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
66 store i64 %result, i64 addrspace(1)* %out, align 8
73 define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
75 store i64 %result, i64 addrspace(1)* %out, align 8
82 define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
83 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
85 store i64 %result, i64 addrspace(1)* %out, align 8
95 define void @lds_atomic_sub1_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
96 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
97 store i64 %result, i64 addrspace(1)* %out, align 8
104 define void @lds_atomic_sub1_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin…
105 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
106 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
107 store i64 %result, i64 addrspace(1)* %out, align 8
114 define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
115 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
116 store i64 %result, i64 addrspace(1)* %out, align 8
123 define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
124 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
125 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
126 store i64 %result, i64 addrspace(1)* %out, align 8
133 define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
134 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
135 store i64 %result, i64 addrspace(1)* %out, align 8
142 define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
143 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
144 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
145 store i64 %result, i64 addrspace(1)* %out, align 8
152 define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
153 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
154 store i64 %result, i64 addrspace(1)* %out, align 8
161 define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
162 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
163 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
164 store i64 %result, i64 addrspace(1)* %out, align 8
170 ; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
171 ; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
172 ; store i64 %result, i64 addrspace(1)* %out, align 8
179 define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
180 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
181 store i64 %result, i64 addrspace(1)* %out, align 8
188 define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
189 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
190 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
191 store i64 %result, i64 addrspace(1)* %out, align 8
198 define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
199 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
200 store i64 %result, i64 addrspace(1)* %out, align 8
207 define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind…
208 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
209 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
210 store i64 %result, i64 addrspace(1)* %out, align 8
217 define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
218 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
219 store i64 %result, i64 addrspace(1)* %out, align 8
226 define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin…
227 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
228 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
229 store i64 %result, i64 addrspace(1)* %out, align 8
236 define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
237 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
238 store i64 %result, i64 addrspace(1)* %out, align 8
245 define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwin…
246 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
247 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
248 store i64 %result, i64 addrspace(1)* %out, align 8
255 define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
256 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
263 define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
264 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
265 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
272 define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
273 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
285 define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
286 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
287 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
296 define void @lds_atomic_add1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
297 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
304 define void @lds_atomic_add1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
305 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
306 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
313 define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
314 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
321 define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
322 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
323 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
332 define void @lds_atomic_sub1_noret_i64(i64 addrspace(3)* %ptr) nounwind {
333 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
340 define void @lds_atomic_sub1_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
341 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
342 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
349 define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
350 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
357 define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
358 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
359 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
366 define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
367 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
374 define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
375 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
376 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
383 define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
384 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
391 define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
392 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
393 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
399 ; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
400 ; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
407 define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
408 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
415 define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
416 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
417 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
424 define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
425 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
432 define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
433 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
434 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
441 define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
442 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
449 define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
450 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
451 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
458 define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
459 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
466 define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
467 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
468 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst