• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=powerpc-apple-darwin -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32
2; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction).
3; This is already checked for in Atomics-64.ll
4; RUN: llc < %s -mtriple=powerpc64-apple-darwin | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64
5
6; FIXME: we don't currently check for the operations themselves with CHECK-NEXT,
7;   because they are implemented in a very messy way with lwarx/stwcx.
8;   It should be fixed soon in another patch.
9
10; We first check loads, for all sizes from i8 to i64.
11; We also vary orderings to check for barriers.
12define i8 @load_i8_unordered(i8* %mem) {
13; CHECK-LABEL: load_i8_unordered
14; CHECK: lbz
15; CHECK-NOT: sync
16  %val = load atomic i8, i8* %mem unordered, align 1
17  ret i8 %val
18}
19define i16 @load_i16_monotonic(i16* %mem) {
20; CHECK-LABEL: load_i16_monotonic
21; CHECK: lhz
22; CHECK-NOT: sync
23  %val = load atomic i16, i16* %mem monotonic, align 2
24  ret i16 %val
25}
26define i32 @load_i32_acquire(i32* %mem) {
27; CHECK-LABEL: load_i32_acquire
28; CHECK: lwz [[VAL:r[0-9]+]]
29  %val = load atomic i32, i32* %mem acquire, align 4
30; CHECK-PPC32: lwsync
31; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
32; CHECK-PPC64: bne- [[CR]], .+4
33; CHECK-PPC64: isync
34  ret i32 %val
35}
36define i64 @load_i64_seq_cst(i64* %mem) {
37; CHECK-LABEL: load_i64_seq_cst
38; CHECK: sync
39; PPC32: __sync_
40; PPC64-NOT: __sync_
41; PPC64: ld [[VAL:r[0-9]+]]
42  %val = load atomic i64, i64* %mem seq_cst, align 8
43; CHECK-PPC32: lwsync
44; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]]
45; CHECK-PPC64: bne- [[CR]], .+4
46; CHECK-PPC64: isync
47  ret i64 %val
48}
49
50; Stores
51define void @store_i8_unordered(i8* %mem) {
52; CHECK-LABEL: store_i8_unordered
53; CHECK-NOT: sync
54; CHECK: stb
55  store atomic i8 42, i8* %mem unordered, align 1
56  ret void
57}
58define void @store_i16_monotonic(i16* %mem) {
59; CHECK-LABEL: store_i16_monotonic
60; CHECK-NOT: sync
61; CHECK: sth
62  store atomic i16 42, i16* %mem monotonic, align 2
63  ret void
64}
65define void @store_i32_release(i32* %mem) {
66; CHECK-LABEL: store_i32_release
67; CHECK: lwsync
68; CHECK: stw
69  store atomic i32 42, i32* %mem release, align 4
70  ret void
71}
72define void @store_i64_seq_cst(i64* %mem) {
73; CHECK-LABEL: store_i64_seq_cst
74; CHECK: sync
75; PPC32: __sync_
76; PPC64-NOT: __sync_
77; PPC64: std
78  store atomic i64 42, i64* %mem seq_cst, align 8
79  ret void
80}
81
82; Atomic CmpXchg
83define i8 @cas_strong_i8_sc_sc(i8* %mem) {
84; CHECK-LABEL: cas_strong_i8_sc_sc
85; CHECK: sync
86  %val = cmpxchg i8* %mem, i8 0, i8 1 seq_cst seq_cst
87; CHECK: lwsync
88  %loaded = extractvalue { i8, i1} %val, 0
89  ret i8 %loaded
90}
91define i16 @cas_weak_i16_acquire_acquire(i16* %mem) {
92; CHECK-LABEL: cas_weak_i16_acquire_acquire
93;CHECK-NOT: sync
94  %val = cmpxchg weak i16* %mem, i16 0, i16 1 acquire acquire
95; CHECK: lwsync
96  %loaded = extractvalue { i16, i1} %val, 0
97  ret i16 %loaded
98}
99define i32 @cas_strong_i32_acqrel_acquire(i32* %mem) {
100; CHECK-LABEL: cas_strong_i32_acqrel_acquire
101; CHECK: lwsync
102  %val = cmpxchg i32* %mem, i32 0, i32 1 acq_rel acquire
103; CHECK: lwsync
104  %loaded = extractvalue { i32, i1} %val, 0
105  ret i32 %loaded
106}
107define i64 @cas_weak_i64_release_monotonic(i64* %mem) {
108; CHECK-LABEL: cas_weak_i64_release_monotonic
109; CHECK: lwsync
110  %val = cmpxchg weak i64* %mem, i64 0, i64 1 release monotonic
111; CHECK-NOT: [sync ]
112  %loaded = extractvalue { i64, i1} %val, 0
113  ret i64 %loaded
114}
115
116; AtomicRMW
117define i8 @add_i8_monotonic(i8* %mem, i8 %operand) {
118; CHECK-LABEL: add_i8_monotonic
119; CHECK-NOT: sync
120  %val = atomicrmw add i8* %mem, i8 %operand monotonic
121  ret i8 %val
122}
123define i16 @xor_i16_seq_cst(i16* %mem, i16 %operand) {
124; CHECK-LABEL: xor_i16_seq_cst
125; CHECK: sync
126  %val = atomicrmw xor i16* %mem, i16 %operand seq_cst
127; CHECK: lwsync
128  ret i16 %val
129}
130define i32 @xchg_i32_acq_rel(i32* %mem, i32 %operand) {
131; CHECK-LABEL: xchg_i32_acq_rel
132; CHECK: lwsync
133  %val = atomicrmw xchg i32* %mem, i32 %operand acq_rel
134; CHECK: lwsync
135  ret i32 %val
136}
137define i64 @and_i64_release(i64* %mem, i64 %operand) {
138; CHECK-LABEL: and_i64_release
139; CHECK: lwsync
140  %val = atomicrmw and i64* %mem, i64 %operand release
141; CHECK-NOT: [sync ]
142  ret i64 %val
143}
144