• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=mipsel < %s | FileCheck %s
2
3@x = common global i32 0, align 4
4
5define i32 @AtomicLoadAdd32(i32 %incr) nounwind {
6entry:
7  %0 = atomicrmw add i32* @x, i32 %incr monotonic
8  ret i32 %0
9
10; CHECK:   AtomicLoadAdd32:
11; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
12; CHECK:   $[[BB0:[A-Z_0-9]+]]:
13; CHECK:   ll      $[[R1:[0-9]+]], 0($[[R0]])
14; CHECK:   addu    $[[R2:[0-9]+]], $[[R1]], $4
15; CHECK:   sc      $[[R2]], 0($[[R0]])
16; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
17}
18
19define i32 @AtomicLoadNand32(i32 %incr) nounwind {
20entry:
21  %0 = atomicrmw nand i32* @x, i32 %incr monotonic
22  ret i32 %0
23
24; CHECK:   AtomicLoadNand32:
25; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
26; CHECK:   $[[BB0:[A-Z_0-9]+]]:
27; CHECK:   ll      $[[R1:[0-9]+]], 0($[[R0]])
28; CHECK:   and     $[[R3:[0-9]+]], $[[R1]], $4
29; CHECK:   nor     $[[R2:[0-9]+]], $zero, $[[R3]]
30; CHECK:   sc      $[[R2]], 0($[[R0]])
31; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
32}
33
34define i32 @AtomicSwap32(i32 %newval) nounwind {
35entry:
36  %newval.addr = alloca i32, align 4
37  store i32 %newval, i32* %newval.addr, align 4
38  %tmp = load i32* %newval.addr, align 4
39  %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic
40  ret i32 %0
41
42; CHECK:   AtomicSwap32:
43; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
44; CHECK:   $[[BB0:[A-Z_0-9]+]]:
45; CHECK:   ll      ${{[0-9]+}}, 0($[[R0]])
46; CHECK:   sc      $[[R2:[0-9]+]], 0($[[R0]])
47; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
48}
49
50define i32 @AtomicCmpSwap32(i32 %oldval, i32 %newval) nounwind {
51entry:
52  %newval.addr = alloca i32, align 4
53  store i32 %newval, i32* %newval.addr, align 4
54  %tmp = load i32* %newval.addr, align 4
55  %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic
56  ret i32 %0
57
58; CHECK:   AtomicCmpSwap32:
59; CHECK:   lw      $[[R0:[0-9]+]], %got(x)($gp)
60; CHECK:   $[[BB0:[A-Z_0-9]+]]:
61; CHECK:   ll      $2, 0($[[R0]])
62; CHECK:   bne     $2, $4, $[[BB1:[A-Z_0-9]+]]
63; CHECK:   sc      $[[R2:[0-9]+]], 0($[[R0]])
64; CHECK:   beq     $[[R2]], $zero, $[[BB0]]
65; CHECK:   $[[BB1]]:
66}
67
68
69
70@y = common global i8 0, align 1
71
72define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind {
73entry:
74  %0 = atomicrmw add i8* @y, i8 %incr monotonic
75  ret i8 %0
76
77; CHECK:   AtomicLoadAdd8:
78; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
79; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
80; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
81; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
82; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
83; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
84; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
85; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
86; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
87
88; CHECK:   $[[BB0:[A-Z_0-9]+]]:
89; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
90; CHECK:   addu    $[[R11:[0-9]+]], $[[R10]], $[[R9]]
91; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
92; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
93; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
94; CHECK:   sc      $[[R14]], 0($[[R2]])
95; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
96
97; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
98; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
99; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
100; CHECK:   sra     $2, $[[R17]], 24
101}
102
103define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind {
104entry:
105  %0 = atomicrmw sub i8* @y, i8 %incr monotonic
106  ret i8 %0
107
108; CHECK:   AtomicLoadSub8:
109; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
110; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
111; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
112; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
113; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
114; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
115; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
116; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
117; CHECK:   sllv     $[[R9:[0-9]+]], $4, $[[R4]]
118
119; CHECK:   $[[BB0:[A-Z_0-9]+]]:
120; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
121; CHECK:   subu    $[[R11:[0-9]+]], $[[R10]], $[[R9]]
122; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
123; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
124; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
125; CHECK:   sc      $[[R14]], 0($[[R2]])
126; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
127
128; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
129; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
130; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
131; CHECK:   sra     $2, $[[R17]], 24
132}
133
134define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind {
135entry:
136  %0 = atomicrmw nand i8* @y, i8 %incr monotonic
137  ret i8 %0
138
139; CHECK:   AtomicLoadNand8:
140; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
141; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
142; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
143; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
144; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
145; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
146; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
147; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
148; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
149
150; CHECK:   $[[BB0:[A-Z_0-9]+]]:
151; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
152; CHECK:   and     $[[R18:[0-9]+]], $[[R10]], $[[R9]]
153; CHECK:   nor     $[[R11:[0-9]+]], $zero, $[[R18]]
154; CHECK:   and     $[[R12:[0-9]+]], $[[R11]], $[[R6]]
155; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
156; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R12]]
157; CHECK:   sc      $[[R14]], 0($[[R2]])
158; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
159
160; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
161; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
162; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
163; CHECK:   sra     $2, $[[R17]], 24
164}
165
166define signext i8 @AtomicSwap8(i8 signext %newval) nounwind {
167entry:
168  %0 = atomicrmw xchg i8* @y, i8 %newval monotonic
169  ret i8 %0
170
171; CHECK:   AtomicSwap8:
172; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
173; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
174; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
175; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
176; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
177; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
178; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
179; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
180; CHECK:   sllv    $[[R9:[0-9]+]], $4, $[[R4]]
181
182; CHECK:   $[[BB0:[A-Z_0-9]+]]:
183; CHECK:   ll      $[[R10:[0-9]+]], 0($[[R2]])
184; CHECK:   and     $[[R13:[0-9]+]], $[[R10]], $[[R7]]
185; CHECK:   or      $[[R14:[0-9]+]], $[[R13]], $[[R9]]
186; CHECK:   sc      $[[R14]], 0($[[R2]])
187; CHECK:   beq     $[[R14]], $zero, $[[BB0]]
188
189; CHECK:   and     $[[R15:[0-9]+]], $[[R10]], $[[R6]]
190; CHECK:   srlv    $[[R16:[0-9]+]], $[[R15]], $[[R4]]
191; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
192; CHECK:   sra     $2, $[[R17]], 24
193}
194
195define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind {
196entry:
197  %0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic
198  ret i8 %0
199
200; CHECK:   AtomicCmpSwap8:
201; CHECK:   lw      $[[R0:[0-9]+]], %got(y)($gp)
202; CHECK:   addiu   $[[R1:[0-9]+]], $zero, -4
203; CHECK:   and     $[[R2:[0-9]+]], $[[R0]], $[[R1]]
204; CHECK:   andi    $[[R3:[0-9]+]], $[[R0]], 3
205; CHECK:   sll     $[[R4:[0-9]+]], $[[R3]], 3
206; CHECK:   ori     $[[R5:[0-9]+]], $zero, 255
207; CHECK:   sllv    $[[R6:[0-9]+]], $[[R5]], $[[R4]]
208; CHECK:   nor     $[[R7:[0-9]+]], $zero, $[[R6]]
209; CHECK:   andi    $[[R8:[0-9]+]], $4, 255
210; CHECK:   sllv    $[[R9:[0-9]+]], $[[R8]], $[[R4]]
211; CHECK:   andi    $[[R10:[0-9]+]], $5, 255
212; CHECK:   sllv    $[[R11:[0-9]+]], $[[R10]], $[[R4]]
213
214; CHECK:   $[[BB0:[A-Z_0-9]+]]:
215; CHECK:   ll      $[[R12:[0-9]+]], 0($[[R2]])
216; CHECK:   and     $[[R13:[0-9]+]], $[[R12]], $[[R6]]
217; CHECK:   bne     $[[R13]], $[[R9]], $[[BB1:[A-Z_0-9]+]]
218
219; CHECK:   and     $[[R14:[0-9]+]], $[[R12]], $[[R7]]
220; CHECK:   or      $[[R15:[0-9]+]], $[[R14]], $[[R11]]
221; CHECK:   sc      $[[R15]], 0($[[R2]])
222; CHECK:   beq     $[[R15]], $zero, $[[BB0]]
223
224; CHECK:   $[[BB1]]:
225; CHECK:   srlv    $[[R16:[0-9]+]], $[[R13]], $[[R4]]
226; CHECK:   sll     $[[R17:[0-9]+]], $[[R16]], 24
227; CHECK:   sra     $2, $[[R17]], 24
228}
229
230@countsint = common global i32 0, align 4
231
232define i32 @CheckSync(i32 %v) nounwind noinline {
233entry:
234  %0 = atomicrmw add i32* @countsint, i32 %v seq_cst
235  ret i32 %0
236
237; CHECK:   CheckSync:
238; CHECK:   sync 0
239; CHECK:   ll
240; CHECK:   sc
241; CHECK:   beq
242; CHECK:   sync 0
243}
244
245