1 /*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #ifndef __ASM_ATOMIC_LSE_H
22 #define __ASM_ATOMIC_LSE_H
23
24 #ifndef __ARM64_IN_ATOMIC_IMPL
25 #error "please don't include this file directly"
26 #endif
27
28 #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29
atomic_andnot(int i,atomic_t * v)30 static inline void atomic_andnot(int i, atomic_t *v)
31 {
32 register int w0 asm ("w0") = i;
33 register atomic_t *x1 asm ("x1") = v;
34
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
36 " stclr %w[i], %[v]\n")
37 : [i] "+r" (w0), [v] "+Q" (v->counter)
38 : "r" (x1)
39 : __LL_SC_CLOBBERS);
40 }
41
atomic_or(int i,atomic_t * v)42 static inline void atomic_or(int i, atomic_t *v)
43 {
44 register int w0 asm ("w0") = i;
45 register atomic_t *x1 asm ("x1") = v;
46
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i] "+r" (w0), [v] "+Q" (v->counter)
50 : "r" (x1)
51 : __LL_SC_CLOBBERS);
52 }
53
atomic_xor(int i,atomic_t * v)54 static inline void atomic_xor(int i, atomic_t *v)
55 {
56 register int w0 asm ("w0") = i;
57 register atomic_t *x1 asm ("x1") = v;
58
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i] "+r" (w0), [v] "+Q" (v->counter)
62 : "r" (x1)
63 : __LL_SC_CLOBBERS);
64 }
65
atomic_add(int i,atomic_t * v)66 static inline void atomic_add(int i, atomic_t *v)
67 {
68 register int w0 asm ("w0") = i;
69 register atomic_t *x1 asm ("x1") = v;
70
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
72 " stadd %w[i], %[v]\n")
73 : [i] "+r" (w0), [v] "+Q" (v->counter)
74 : "r" (x1)
75 : __LL_SC_CLOBBERS);
76 }
77
78 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
79 static inline int atomic_add_return##name(int i, atomic_t *v) \
80 { \
81 register int w0 asm ("w0") = i; \
82 register atomic_t *x1 asm ("x1") = v; \
83 \
84 asm volatile(ARM64_LSE_ATOMIC_INSN( \
85 /* LL/SC */ \
86 " nop\n" \
87 __LL_SC_ATOMIC(add_return##name), \
88 /* LSE atomics */ \
89 " ldadd" #mb " %w[i], w30, %[v]\n" \
90 " add %w[i], %w[i], w30") \
91 : [i] "+r" (w0), [v] "+Q" (v->counter) \
92 : "r" (x1) \
93 : __LL_SC_CLOBBERS, ##cl); \
94 \
95 return w0; \
96 }
97
98 ATOMIC_OP_ADD_RETURN(_relaxed, )
99 ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
100 ATOMIC_OP_ADD_RETURN(_release, l, "memory")
101 ATOMIC_OP_ADD_RETURN( , al, "memory")
102
103 #undef ATOMIC_OP_ADD_RETURN
104
atomic_and(int i,atomic_t * v)105 static inline void atomic_and(int i, atomic_t *v)
106 {
107 register int w0 asm ("w0") = i;
108 register atomic_t *x1 asm ("x1") = v;
109
110 asm volatile(ARM64_LSE_ATOMIC_INSN(
111 /* LL/SC */
112 " nop\n"
113 __LL_SC_ATOMIC(and),
114 /* LSE atomics */
115 " mvn %w[i], %w[i]\n"
116 " stclr %w[i], %[v]")
117 : [i] "+&r" (w0), [v] "+Q" (v->counter)
118 : "r" (x1)
119 : __LL_SC_CLOBBERS);
120 }
121
atomic_sub(int i,atomic_t * v)122 static inline void atomic_sub(int i, atomic_t *v)
123 {
124 register int w0 asm ("w0") = i;
125 register atomic_t *x1 asm ("x1") = v;
126
127 asm volatile(ARM64_LSE_ATOMIC_INSN(
128 /* LL/SC */
129 " nop\n"
130 __LL_SC_ATOMIC(sub),
131 /* LSE atomics */
132 " neg %w[i], %w[i]\n"
133 " stadd %w[i], %[v]")
134 : [i] "+&r" (w0), [v] "+Q" (v->counter)
135 : "r" (x1)
136 : __LL_SC_CLOBBERS);
137 }
138
139 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
140 static inline int atomic_sub_return##name(int i, atomic_t *v) \
141 { \
142 register int w0 asm ("w0") = i; \
143 register atomic_t *x1 asm ("x1") = v; \
144 \
145 asm volatile(ARM64_LSE_ATOMIC_INSN( \
146 /* LL/SC */ \
147 " nop\n" \
148 __LL_SC_ATOMIC(sub_return##name) \
149 " nop", \
150 /* LSE atomics */ \
151 " neg %w[i], %w[i]\n" \
152 " ldadd" #mb " %w[i], w30, %[v]\n" \
153 " add %w[i], %w[i], w30") \
154 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
155 : "r" (x1) \
156 : __LL_SC_CLOBBERS , ##cl); \
157 \
158 return w0; \
159 }
160
161 ATOMIC_OP_SUB_RETURN(_relaxed, )
162 ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
163 ATOMIC_OP_SUB_RETURN(_release, l, "memory")
164 ATOMIC_OP_SUB_RETURN( , al, "memory")
165
166 #undef ATOMIC_OP_SUB_RETURN
167 #undef __LL_SC_ATOMIC
168
169 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
170
atomic64_andnot(long i,atomic64_t * v)171 static inline void atomic64_andnot(long i, atomic64_t *v)
172 {
173 register long x0 asm ("x0") = i;
174 register atomic64_t *x1 asm ("x1") = v;
175
176 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
177 " stclr %[i], %[v]\n")
178 : [i] "+r" (x0), [v] "+Q" (v->counter)
179 : "r" (x1)
180 : __LL_SC_CLOBBERS);
181 }
182
atomic64_or(long i,atomic64_t * v)183 static inline void atomic64_or(long i, atomic64_t *v)
184 {
185 register long x0 asm ("x0") = i;
186 register atomic64_t *x1 asm ("x1") = v;
187
188 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
189 " stset %[i], %[v]\n")
190 : [i] "+r" (x0), [v] "+Q" (v->counter)
191 : "r" (x1)
192 : __LL_SC_CLOBBERS);
193 }
194
atomic64_xor(long i,atomic64_t * v)195 static inline void atomic64_xor(long i, atomic64_t *v)
196 {
197 register long x0 asm ("x0") = i;
198 register atomic64_t *x1 asm ("x1") = v;
199
200 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
201 " steor %[i], %[v]\n")
202 : [i] "+r" (x0), [v] "+Q" (v->counter)
203 : "r" (x1)
204 : __LL_SC_CLOBBERS);
205 }
206
atomic64_add(long i,atomic64_t * v)207 static inline void atomic64_add(long i, atomic64_t *v)
208 {
209 register long x0 asm ("x0") = i;
210 register atomic64_t *x1 asm ("x1") = v;
211
212 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
213 " stadd %[i], %[v]\n")
214 : [i] "+r" (x0), [v] "+Q" (v->counter)
215 : "r" (x1)
216 : __LL_SC_CLOBBERS);
217 }
218
219 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
220 static inline long atomic64_add_return##name(long i, atomic64_t *v) \
221 { \
222 register long x0 asm ("x0") = i; \
223 register atomic64_t *x1 asm ("x1") = v; \
224 \
225 asm volatile(ARM64_LSE_ATOMIC_INSN( \
226 /* LL/SC */ \
227 " nop\n" \
228 __LL_SC_ATOMIC64(add_return##name), \
229 /* LSE atomics */ \
230 " ldadd" #mb " %[i], x30, %[v]\n" \
231 " add %[i], %[i], x30") \
232 : [i] "+r" (x0), [v] "+Q" (v->counter) \
233 : "r" (x1) \
234 : __LL_SC_CLOBBERS, ##cl); \
235 \
236 return x0; \
237 }
238
239 ATOMIC64_OP_ADD_RETURN(_relaxed, )
240 ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
241 ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
242 ATOMIC64_OP_ADD_RETURN( , al, "memory")
243
244 #undef ATOMIC64_OP_ADD_RETURN
245
atomic64_and(long i,atomic64_t * v)246 static inline void atomic64_and(long i, atomic64_t *v)
247 {
248 register long x0 asm ("x0") = i;
249 register atomic64_t *x1 asm ("x1") = v;
250
251 asm volatile(ARM64_LSE_ATOMIC_INSN(
252 /* LL/SC */
253 " nop\n"
254 __LL_SC_ATOMIC64(and),
255 /* LSE atomics */
256 " mvn %[i], %[i]\n"
257 " stclr %[i], %[v]")
258 : [i] "+&r" (x0), [v] "+Q" (v->counter)
259 : "r" (x1)
260 : __LL_SC_CLOBBERS);
261 }
262
atomic64_sub(long i,atomic64_t * v)263 static inline void atomic64_sub(long i, atomic64_t *v)
264 {
265 register long x0 asm ("x0") = i;
266 register atomic64_t *x1 asm ("x1") = v;
267
268 asm volatile(ARM64_LSE_ATOMIC_INSN(
269 /* LL/SC */
270 " nop\n"
271 __LL_SC_ATOMIC64(sub),
272 /* LSE atomics */
273 " neg %[i], %[i]\n"
274 " stadd %[i], %[v]")
275 : [i] "+&r" (x0), [v] "+Q" (v->counter)
276 : "r" (x1)
277 : __LL_SC_CLOBBERS);
278 }
279
280 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
281 static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
282 { \
283 register long x0 asm ("x0") = i; \
284 register atomic64_t *x1 asm ("x1") = v; \
285 \
286 asm volatile(ARM64_LSE_ATOMIC_INSN( \
287 /* LL/SC */ \
288 " nop\n" \
289 __LL_SC_ATOMIC64(sub_return##name) \
290 " nop", \
291 /* LSE atomics */ \
292 " neg %[i], %[i]\n" \
293 " ldadd" #mb " %[i], x30, %[v]\n" \
294 " add %[i], %[i], x30") \
295 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
296 : "r" (x1) \
297 : __LL_SC_CLOBBERS, ##cl); \
298 \
299 return x0; \
300 }
301
302 ATOMIC64_OP_SUB_RETURN(_relaxed, )
303 ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
304 ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
305 ATOMIC64_OP_SUB_RETURN( , al, "memory")
306
307 #undef ATOMIC64_OP_SUB_RETURN
308
atomic64_dec_if_positive(atomic64_t * v)309 static inline long atomic64_dec_if_positive(atomic64_t *v)
310 {
311 register long x0 asm ("x0") = (long)v;
312
313 asm volatile(ARM64_LSE_ATOMIC_INSN(
314 /* LL/SC */
315 " nop\n"
316 __LL_SC_ATOMIC64(dec_if_positive)
317 " nop\n"
318 " nop\n"
319 " nop\n"
320 " nop\n"
321 " nop",
322 /* LSE atomics */
323 "1: ldr x30, %[v]\n"
324 " subs %[ret], x30, #1\n"
325 " b.lt 2f\n"
326 " casal x30, %[ret], %[v]\n"
327 " sub x30, x30, #1\n"
328 " sub x30, x30, %[ret]\n"
329 " cbnz x30, 1b\n"
330 "2:")
331 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
332 :
333 : __LL_SC_CLOBBERS, "cc", "memory");
334
335 return x0;
336 }
337
338 #undef __LL_SC_ATOMIC64
339
340 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
341
342 #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
343 static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
344 unsigned long old, \
345 unsigned long new) \
346 { \
347 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
348 register unsigned long x1 asm ("x1") = old; \
349 register unsigned long x2 asm ("x2") = new; \
350 \
351 asm volatile(ARM64_LSE_ATOMIC_INSN( \
352 /* LL/SC */ \
353 " nop\n" \
354 __LL_SC_CMPXCHG(name) \
355 " nop", \
356 /* LSE atomics */ \
357 " mov " #w "30, %" #w "[old]\n" \
358 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
359 " mov %" #w "[ret], " #w "30") \
360 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
361 : [old] "r" (x1), [new] "r" (x2) \
362 : __LL_SC_CLOBBERS, ##cl); \
363 \
364 return x0; \
365 }
366
367 __CMPXCHG_CASE(w, b, 1, )
368 __CMPXCHG_CASE(w, h, 2, )
369 __CMPXCHG_CASE(w, , 4, )
370 __CMPXCHG_CASE(x, , 8, )
371 __CMPXCHG_CASE(w, b, acq_1, a, "memory")
372 __CMPXCHG_CASE(w, h, acq_2, a, "memory")
373 __CMPXCHG_CASE(w, , acq_4, a, "memory")
374 __CMPXCHG_CASE(x, , acq_8, a, "memory")
375 __CMPXCHG_CASE(w, b, rel_1, l, "memory")
376 __CMPXCHG_CASE(w, h, rel_2, l, "memory")
377 __CMPXCHG_CASE(w, , rel_4, l, "memory")
378 __CMPXCHG_CASE(x, , rel_8, l, "memory")
379 __CMPXCHG_CASE(w, b, mb_1, al, "memory")
380 __CMPXCHG_CASE(w, h, mb_2, al, "memory")
381 __CMPXCHG_CASE(w, , mb_4, al, "memory")
382 __CMPXCHG_CASE(x, , mb_8, al, "memory")
383
384 #undef __LL_SC_CMPXCHG
385 #undef __CMPXCHG_CASE
386
387 #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
388
389 #define __CMPXCHG_DBL(name, mb, cl...) \
390 static inline long __cmpxchg_double##name(unsigned long old1, \
391 unsigned long old2, \
392 unsigned long new1, \
393 unsigned long new2, \
394 volatile void *ptr) \
395 { \
396 unsigned long oldval1 = old1; \
397 unsigned long oldval2 = old2; \
398 register unsigned long x0 asm ("x0") = old1; \
399 register unsigned long x1 asm ("x1") = old2; \
400 register unsigned long x2 asm ("x2") = new1; \
401 register unsigned long x3 asm ("x3") = new2; \
402 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
403 \
404 asm volatile(ARM64_LSE_ATOMIC_INSN( \
405 /* LL/SC */ \
406 " nop\n" \
407 " nop\n" \
408 " nop\n" \
409 __LL_SC_CMPXCHG_DBL(name), \
410 /* LSE atomics */ \
411 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
412 " eor %[old1], %[old1], %[oldval1]\n" \
413 " eor %[old2], %[old2], %[oldval2]\n" \
414 " orr %[old1], %[old1], %[old2]") \
415 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
416 [v] "+Q" (*(unsigned long *)ptr) \
417 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
418 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
419 : __LL_SC_CLOBBERS, ##cl); \
420 \
421 return x0; \
422 }
423
424 __CMPXCHG_DBL( , )
425 __CMPXCHG_DBL(_mb, al, "memory")
426
427 #undef __LL_SC_CMPXCHG_DBL
428 #undef __CMPXCHG_DBL
429
430 #endif /* __ASM_ATOMIC_LSE_H */
431