• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=mipsel -mcpu=mips2 < %s | FileCheck %s
2
3
4declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
5declare i32 @llvm.atomic.load.nand.i32.p0i32(i32* nocapture, i32) nounwind
6declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
7declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
8
9declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
10declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
11declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* nocapture, i8) nounwind
12declare i8 @llvm.atomic.swap.i8.p0i8(i8* nocapture, i8) nounwind
13declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* nocapture, i8, i8) nounwind
14
15declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
16
17@x = common global i32 0, align 4
18
19define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
20entry:
21  %0 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* @x, i32 %incr)
22  ret i32 %0
23
24; CHECK:   AtomicLoadAdd32:
25; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
26; CHECK:   $[[BB0:[A-Z_0-9]+]]:
27; CHECK:   ll      $[[R1:[0-9]+]], 0($[[R0]])
28; CHECK:   addu    $[[R2:[0-9]+]], $[[R1]], $4
29; CHECK:   sc      $[[R2]], 0($[[R0]])
30; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
31}
32
33define i32 @AtomicLoadNand32(i32 %incr) nounwind {
34entry:
35  %0 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* @x, i32 %incr)
36  ret i32 %0
37
38; CHECK:   AtomicLoadNand32:
39; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
40; CHECK:   $[[BB0:[A-Z_0-9]+]]:
41; CHECK:   ll      $[[R1:[0-9]+]], 0($[[R0]])
42; CHECK:   and     $[[R3:[0-9]+]], $[[R1]], $4
43; CHECK:   nor     $[[R2:[0-9]+]], $zero, $[[R3]]
44; CHECK:   sc      $[[R2]], 0($[[R0]])
45; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
46}
47
48define i32 @AtomicSwap32(i32 %newval) nounwind {
49entry:
50  %newval.addr = alloca i32, align 4
51  store i32 %newval, i32* %newval.addr, align 4
52  %tmp = load i32* %newval.addr, align 4
53  %0 = call i32 @llvm.atomic.swap.i32.p0i32(i32* @x, i32 %tmp)
54  ret i32 %0
55
56; CHECK:   AtomicSwap32:
57; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
58; CHECK:   $[[BB0:[A-Z_0-9]+]]:
59; CHECK:   ll      ${{[0-9]+}}, 0($[[R0]])
60; CHECK:   sc      $[[R2:[0-9]+]], 0($[[R0]])
61; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
62}
63
64define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
65entry:
66  %newval.addr = alloca i32, align 4
67  store i32 %newval, i32* %newval.addr, align 4
68  %tmp = load i32* %newval.addr, align 4
69  %0 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* @x, i32 %oldval, i32 %tmp)
70  ret i32 %0
71
72; CHECK:   AtomicCmpSwap32:
73; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
74; CHECK:   $[[BB0:[A-Z_0-9]+]]:
75; CHECK:   ll      $2, 0($[[R0]])
76; CHECK:   bne     $2, $4, $[[BB1:[A-Z_0-9]+]]
77; CHECK:   sc      $[[R2:[0-9]+]], 0($[[R0]])
78; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
79; CHECK:   $[[BB1]]:
80}
81
82
83
84@y = common global i8 0, align 1
85
86define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
87entry:
88  %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @y, i8 %incr)
89  ret i8 %0
90
91; CHECK:   AtomicLoadAdd8:
92; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
93; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
94; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
95; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
96; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
97; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
98; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
99; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
100; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
101
102; CHECK:   $[[BB0:[A-Z_0-9]+]]:
103; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
104; CHECK:   addu    $[[R11:[0-9]+]], $[[R10]], $[[R9]]
105; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
106; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
107; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
108; CHECK:   sc      $[[R14]], 0($[[R2]])
109; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
110
111; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
112; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
113; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
114; CHECK:   sra     $2, $[[R17]], 24
115}
116
117define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
118entry:
119  %0 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @y, i8 %incr)
120  ret i8 %0
121
122; CHECK:   AtomicLoadSub8:
123; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
124; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
125; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
126; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
127; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
128; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
129; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
130; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
131; CHECK:   sllv     $[[R9:[0-9]+]], $4, $[[R4]]
132
133; CHECK:   $[[BB0:[A-Z_0-9]+]]:
134; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
135; CHECK:   subu    $[[R11:[0-9]+]], $[[R10]], $[[R9]]
136; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
137; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
138; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
139; CHECK:   sc      $[[R14]], 0($[[R2]])
140; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
141
142; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
143; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
144; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
145; CHECK:   sra     $2, $[[R17]], 24
146}
147
148define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
149entry:
150  %0 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @y, i8 %incr)
151  ret i8 %0
152
153; CHECK:   AtomicLoadNand8:
154; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
155; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
156; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
157; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
158; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
159; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
160; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
161; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
162; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
163
164; CHECK:   $[[BB0:[A-Z_0-9]+]]:
165; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
166; CHECK:   and     $[[R18:[0-9]+]], $[[R10]], $[[R9]]
167; CHECK:   nor     $[[R11:[0-9]+]], $zero, $[[R18]]
168; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
169; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
170; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
171; CHECK:   sc      $[[R14]], 0($[[R2]])
172; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
173
174; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
175; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
176; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
177; CHECK:   sra     $2, $[[R17]], 24
178}
179
180define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
181entry:
182  %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @y, i8 %newval)
183  ret i8 %0
184
185; CHECK:   AtomicSwap8:
186; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
187; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
188; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
189; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
190; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
191; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
192; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
193; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
194; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
195
196; CHECK:   $[[BB0:[A-Z_0-9]+]]:
197; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
198; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
199; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R9]]
200; CHECK:   sc      $[[R14]], 0($[[R2]])
201; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
202
203; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
204; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
205; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
206; CHECK:   sra     $2, $[[R17]], 24
207}
208
209define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
210entry:
211  %0 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @y, i8 %oldval, i8 %newval)
212  ret i8 %0
213
214; CHECK:   AtomicCmpSwap8:
215; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
216; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
217; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
218; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
219; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
220; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
221; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
222; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
223; CHECK:   andi    $[[R8:[0-9]+]], $4, 255
224; CHECK:   sllv    $[[R9:[0-9]+]], $[[R8]], $[[R4]]
225; CHECK:   andi    $[[R10:[0-9]+]], $5, 255
226; CHECK:   sllv    $[[R11:[0-9]+]], $[[R10]], $[[R4]]
227
228; CHECK:   $[[BB0:[A-Z_0-9]+]]:
229; CHECK:   ll      $[[R12:[0-9]+]], 0($[[R2]])
230; CHECK:   and     $[[R13:[0-9]+]], $[[R12]], $[[R6]]
231; CHECK:   bne     $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
232
233; CHECK:   and     $[[R14:[0-9]+]], $[[R12]], $[[R7]]
234; CHECK:   or      $[[R15:[0-9]+]], $[[R14]], $[[R11]]
235; CHECK:   sc      $[[R15]], 0($[[R2]])
236; CHECK:   beq     $[[R15]], $zero, $[[BB0]]
237
238; CHECK:   $[[BB1]]:
239; CHECK:   srlv    $[[R16:[0-9]+]], $[[R13]], $[[R4]]
240; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
241; CHECK:   sra     $2, $[[R17]], 24
242}
243
244@countsint = common global i32 0, align 4
245
246define i32 @CheckSync(i32 %v) nounwind noinline {
247entry:
248  tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
249  %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* @countsint, i32 %v)
250  tail call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 true)
251  ret i32 %0
252
253; CHECK:   CheckSync:
254; CHECK:   sync 0
255; CHECK:   ll
256; CHECK:   sc
257; CHECK:   beq
258; CHECK:   sync 0
259}
260
261