• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
2
3define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
4; CHECK-LABEL: val_compare_and_swap:
5; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
6; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
7; CHECK-NEXT: ldaxr  [[RESULT:w[0-9]+]], [x[[ADDR]]]
8; CHECK-NEXT: cmp    [[RESULT]], w1
9; CHECK-NEXT: b.ne   [[FAILBB:.?LBB[0-9_]+]]
10; CHECK-NEXT: stxr   [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
11; CHECK-NEXT: cbnz   [[SCRATCH_REG]], [[TRYBB]]
12; CHECK-NEXT: b      [[EXITBB:.?LBB[0-9_]+]]
13; CHECK-NEXT: [[FAILBB]]:
14; CHECK-NEXT: clrex
15; CHECK-NEXT: [[EXITBB]]:
16  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
17  %val = extractvalue { i32, i1 } %pair, 0
18  ret i32 %val
19}
20
21define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
22; CHECK-LABEL: val_compare_and_swap_from_load:
23; CHECK-NEXT: ldr    [[NEW:w[0-9]+]], [x2]
24; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
25; CHECK-NEXT: ldaxr  [[RESULT:w[0-9]+]], [x0]
26; CHECK-NEXT: cmp    [[RESULT]], w1
27; CHECK-NEXT: b.ne   [[FAILBB:.?LBB[0-9_]+]]
28; CHECK-NEXT: stxr   [[SCRATCH_REG:w[0-9]+]], [[NEW]], [x0]
29; CHECK-NEXT: cbnz   [[SCRATCH_REG]], [[TRYBB]]
30; CHECK-NEXT: b      [[EXITBB:.?LBB[0-9_]+]]
31; CHECK-NEXT: [[FAILBB]]:
32; CHECK-NEXT: clrex
33; CHECK-NEXT: [[EXITBB]]:
34  %new = load i32, i32* %pnew
35  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
36  %val = extractvalue { i32, i1 } %pair, 0
37  ret i32 %val
38}
39
40define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
41; CHECK-LABEL: val_compare_and_swap_rel:
42; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
43; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
44; CHECK-NEXT: ldaxr  [[RESULT:w[0-9]+]], [x[[ADDR]]
45; CHECK-NEXT: cmp    [[RESULT]], w1
46; CHECK-NEXT: b.ne   [[FAILBB:.?LBB[0-9_]+]]
47; CHECK-NEXT: stlxr  [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]
48; CHECK-NEXT: cbnz   [[SCRATCH_REG]], [[TRYBB]]
49; CHECK-NEXT: b      [[EXITBB:.?LBB[0-9_]+]]
50; CHECK-NEXT: [[FAILBB]]:
51; CHECK-NEXT: clrex
52; CHECK-NEXT: [[EXITBB]]:
53  %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
54  %val = extractvalue { i32, i1 } %pair, 0
55  ret i32 %val
56}
57
58define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
59; CHECK-LABEL: val_compare_and_swap_64:
60; CHECK-NEXT: mov    x[[ADDR:[0-9]+]], x0
61; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
62; CHECK-NEXT: ldxr   [[RESULT:x[0-9]+]], [x[[ADDR]]]
63; CHECK-NEXT: cmp    [[RESULT]], x1
64; CHECK-NEXT: b.ne   [[FAILBB:.?LBB[0-9_]+]]
65; CHECK-NEXT: stxr   [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
66; CHECK-NEXT: cbnz   [[SCRATCH_REG]], [[TRYBB]]
67; CHECK-NEXT: b      [[EXITBB:.?LBB[0-9_]+]]
68; CHECK-NEXT: [[FAILBB]]:
69; CHECK-NEXT: clrex
70; CHECK-NEXT: [[EXITBB]]:
71  %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
72  %val = extractvalue { i64, i1 } %pair, 0
73  ret i64 %val
74}
75
76define i32 @fetch_and_nand(i32* %p) #0 {
77; CHECK-LABEL: fetch_and_nand:
78; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
79; CHECK: ldxr   w[[DEST_REG:[0-9]+]], [x0]
80; CHECK: mvn    [[TMP_REG:w[0-9]+]], w[[DEST_REG]]
81; CHECK: orr    [[SCRATCH2_REG:w[0-9]+]], [[TMP_REG]], #0xfffffff8
82; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
83; CHECK: stlxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
84; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
85; CHECK: mov    x0, x[[DEST_REG]]
86  %val = atomicrmw nand i32* %p, i32 7 release
87  ret i32 %val
88}
89
90define i64 @fetch_and_nand_64(i64* %p) #0 {
91; CHECK-LABEL: fetch_and_nand_64:
92; CHECK: mov    x[[ADDR:[0-9]+]], x0
93; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
94; CHECK: ldaxr   x[[DEST_REG:[0-9]+]], [x[[ADDR]]]
95; CHECK: mvn    w[[TMP_REG:[0-9]+]], w[[DEST_REG]]
96; CHECK: orr    [[SCRATCH2_REG:x[0-9]+]], x[[TMP_REG]], #0xfffffffffffffff8
97; CHECK: stlxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
98; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
99
100  %val = atomicrmw nand i64* %p, i64 7 acq_rel
101  ret i64 %val
102}
103
104define i32 @fetch_and_or(i32* %p) #0 {
105; CHECK-LABEL: fetch_and_or:
106; CHECK: mov   [[OLDVAL_REG:w[0-9]+]], #5
107; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
108; CHECK: ldaxr   w[[DEST_REG:[0-9]+]], [x0]
109; CHECK: orr    [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]]
110; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
111; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
112; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
113; CHECK: mov    x0, x[[DEST_REG]]
114  %val = atomicrmw or i32* %p, i32 5 seq_cst
115  ret i32 %val
116}
117
118define i64 @fetch_and_or_64(i64* %p) #0 {
119; CHECK: fetch_and_or_64:
120; CHECK: mov    x[[ADDR:[0-9]+]], x0
121; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
122; CHECK: ldxr   [[DEST_REG:x[0-9]+]], [x[[ADDR]]]
123; CHECK: orr    [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7
124; CHECK: stxr   [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
125; CHECK: cbnz   [[SCRATCH_REG]], [[TRYBB]]
126  %val = atomicrmw or i64* %p, i64 7 monotonic
127  ret i64 %val
128}
129
130define void @acquire_fence() #0 {
131   fence acquire
132   ret void
133   ; CHECK-LABEL: acquire_fence:
134   ; CHECK: dmb ishld
135}
136
137define void @release_fence() #0 {
138   fence release
139   ret void
140   ; CHECK-LABEL: release_fence:
141   ; CHECK: dmb ish{{$}}
142}
143
144define void @seq_cst_fence() #0 {
145   fence seq_cst
146   ret void
147   ; CHECK-LABEL: seq_cst_fence:
148   ; CHECK: dmb ish{{$}}
149}
150
151define i32 @atomic_load(i32* %p) #0 {
152   %r = load atomic i32, i32* %p seq_cst, align 4
153   ret i32 %r
154   ; CHECK-LABEL: atomic_load:
155   ; CHECK: ldar
156}
157
158define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
159; CHECK-LABEL: atomic_load_relaxed_8:
160  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
161  %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
162; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
163
164  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
165  %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1
166  %tot1 = add i8 %val_unsigned, %val_regoff
167; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
168
169  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
170  %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1
171  %tot2 = add i8 %tot1, %val_unscaled
172; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
173
174  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
175  %val_random = load atomic i8, i8* %ptr_random unordered, align 1
176  %tot3 = add i8 %tot2, %val_random
177; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
178; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]]
179
180  ret i8 %tot3
181}
182
183define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
184; CHECK-LABEL: atomic_load_relaxed_16:
185  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
186  %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
187; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
188
189  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
190  %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2
191  %tot1 = add i16 %val_unsigned, %val_regoff
192; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
193
194  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
195  %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2
196  %tot2 = add i16 %tot1, %val_unscaled
197; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
198
199  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
200  %val_random = load atomic i16, i16* %ptr_random unordered, align 2
201  %tot3 = add i16 %tot2, %val_random
202; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
203; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]]
204
205  ret i16 %tot3
206}
207
208define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
209; CHECK-LABEL: atomic_load_relaxed_32:
210  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
211  %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
212; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
213
214  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
215  %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4
216  %tot1 = add i32 %val_unsigned, %val_regoff
217; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
218
219  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
220  %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4
221  %tot2 = add i32 %tot1, %val_unscaled
222; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
223
224  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
225  %val_random = load atomic i32, i32* %ptr_random unordered, align 4
226  %tot3 = add i32 %tot2, %val_random
227; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
228; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]]
229
230  ret i32 %tot3
231}
232
233define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
234; CHECK-LABEL: atomic_load_relaxed_64:
235  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
236  %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
237; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
238
239  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
240  %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8
241  %tot1 = add i64 %val_unsigned, %val_regoff
242; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
243
244  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
245  %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8
246  %tot2 = add i64 %tot1, %val_unscaled
247; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
248
249  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
250  %val_random = load atomic i64, i64* %ptr_random unordered, align 8
251  %tot3 = add i64 %tot2, %val_random
252; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
253; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]]
254
255  ret i64 %tot3
256}
257
258
259define void @atomc_store(i32* %p) #0 {
260   store atomic i32 4, i32* %p seq_cst, align 4
261   ret void
262   ; CHECK-LABEL: atomc_store:
263   ; CHECK: stlr
264}
265
266define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
267; CHECK-LABEL: atomic_store_relaxed_8:
268  %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
269  store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
270; CHECK: strb {{w[0-9]+}}, [x0, #4095]
271
272  %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
273  store atomic i8 %val, i8* %ptr_regoff unordered, align 1
274; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
275
276  %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
277  store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
278; CHECK: sturb {{w[0-9]+}}, [x0, #-256]
279
280  %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
281  store atomic i8 %val, i8* %ptr_random unordered, align 1
282; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
283; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]]
284
285  ret void
286}
287
288define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
289; CHECK-LABEL: atomic_store_relaxed_16:
290  %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
291  store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
292; CHECK: strh {{w[0-9]+}}, [x0, #8190]
293
294  %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
295  store atomic i16 %val, i16* %ptr_regoff unordered, align 2
296; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
297
298  %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
299  store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
300; CHECK: sturh {{w[0-9]+}}, [x0, #-256]
301
302  %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
303  store atomic i16 %val, i16* %ptr_random unordered, align 2
304; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
305; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]]
306
307  ret void
308}
309
310define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
311; CHECK-LABEL: atomic_store_relaxed_32:
312  %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
313  store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
314; CHECK: str {{w[0-9]+}}, [x0, #16380]
315
316  %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
317  store atomic i32 %val, i32* %ptr_regoff unordered, align 4
318; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
319
320  %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
321  store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
322; CHECK: stur {{w[0-9]+}}, [x0, #-256]
323
324  %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
325  store atomic i32 %val, i32* %ptr_random unordered, align 4
326; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
327; CHECK: str {{w[0-9]+}}, [x[[ADDR]]]
328
329  ret void
330}
331
332define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
333; CHECK-LABEL: atomic_store_relaxed_64:
334  %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
335  store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
336; CHECK: str {{x[0-9]+}}, [x0, #32760]
337
338  %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
339  store atomic i64 %val, i64* %ptr_regoff unordered, align 8
340; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
341
342  %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
343  store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
344; CHECK: stur {{x[0-9]+}}, [x0, #-256]
345
346  %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
347  store atomic i64 %val, i64* %ptr_random unordered, align 8
348; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
349; CHECK: str {{x[0-9]+}}, [x[[ADDR]]]
350
351  ret void
352}
353
354; rdar://11531169
355; rdar://11531308
356
357%"class.X::Atomic" = type { %struct.x_atomic_t }
358%struct.x_atomic_t = type { i32 }
359
360@counter = external hidden global %"class.X::Atomic", align 4
361
362define i32 @next_id() nounwind optsize ssp align 2 {
363entry:
364  %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
365  %add.i = add i32 %0, 1
366  %tobool = icmp eq i32 %add.i, 0
367  br i1 %tobool, label %if.else, label %return
368
369if.else:                                          ; preds = %entry
370  %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
371  %add.i2 = add i32 %1, 1
372  br label %return
373
374return:                                           ; preds = %if.else, %entry
375  %retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ]
376  ret i32 %retval.0
377}
378
379attributes #0 = { nounwind }
380