• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef foopulseatomichfoo
2 #define foopulseatomichfoo
3 
4 /***
5   This file is part of PulseAudio.
6 
7   Copyright 2006-2008 Lennart Poettering
8   Copyright 2008 Nokia Corporation
9 
10   PulseAudio is free software; you can redistribute it and/or modify
11   it under the terms of the GNU Lesser General Public License as
12   published by the Free Software Foundation; either version 2.1 of the
13   License, or (at your option) any later version.
14 
15   PulseAudio is distributed in the hope that it will be useful, but
16   WITHOUT ANY WARRANTY; without even the implied warranty of
17   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18   General Public License for more details.
19 
20   You should have received a copy of the GNU Lesser General Public
21   License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
22 ***/
23 
24 #include <stdint.h>
25 #include <pulsecore/macro.h>
26 
27 /*
28  * atomic_ops guarantees us that sizeof(AO_t) == sizeof(void*).  It is
29  * not guaranteed however, that sizeof(AO_t) == sizeof(size_t).
30  * however very likely.
31  *
32  * For now we do only full memory barriers. Eventually we might want
33  * to support more elaborate memory barriers, in which case we will add
34  * suffixes to the function names.
35  *
36  * On gcc >= 4.1 we use the builtin atomic functions. otherwise we use
37  * libatomic_ops
38  */
39 
40 #ifndef PACKAGE
41 #error "Please include config.h before including this file!"
42 #endif
43 
44 #ifdef HAVE_ATOMIC_BUILTINS
45 
46 /* __sync based implementation */
47 
48 typedef struct pa_atomic {
49     volatile int value;
50 } pa_atomic_t;
51 
52 #define PA_ATOMIC_INIT(v) { .value = (v) }
53 
54 #ifdef HAVE_ATOMIC_BUILTINS_MEMORY_MODEL
55 
56 /* __atomic based implementation */
57 
pa_atomic_load(const pa_atomic_t * a)58 static inline int pa_atomic_load(const pa_atomic_t *a) {
59     return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
60 }
61 
pa_atomic_store(pa_atomic_t * a,int i)62 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
63     __atomic_store_n(&a->value, i, __ATOMIC_SEQ_CST);
64 }
65 
66 #else
67 
pa_atomic_load(const pa_atomic_t * a)68 static inline int pa_atomic_load(const pa_atomic_t *a) {
69     __sync_synchronize();
70     return a->value;
71 }
72 
pa_atomic_store(pa_atomic_t * a,int i)73 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
74     a->value = i;
75     __sync_synchronize();
76 }
77 
78 #endif
79 
80 
81 /* Returns the previously set value */
pa_atomic_add(pa_atomic_t * a,int i)82 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
83     return __sync_fetch_and_add(&a->value, i);
84 }
85 
86 /* Returns the previously set value */
pa_atomic_sub(pa_atomic_t * a,int i)87 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
88     return __sync_fetch_and_sub(&a->value, i);
89 }
90 
91 /* Returns the previously set value */
pa_atomic_inc(pa_atomic_t * a)92 static inline int pa_atomic_inc(pa_atomic_t *a) {
93     return pa_atomic_add(a, 1);
94 }
95 
96 /* Returns the previously set value */
pa_atomic_dec(pa_atomic_t * a)97 static inline int pa_atomic_dec(pa_atomic_t *a) {
98     return pa_atomic_sub(a, 1);
99 }
100 
101 /* Returns true when the operation was successful. */
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)102 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
103     return __sync_bool_compare_and_swap(&a->value, old_i, new_i);
104 }
105 
106 typedef struct pa_atomic_ptr {
107     volatile uintptr_t value;
108 } pa_atomic_ptr_t;
109 
110 #define PA_ATOMIC_PTR_INIT(v) { .value = (uintptr_t) (v) }
111 
112 #ifdef HAVE_ATOMIC_BUILTINS_MEMORY_MODEL
113 
114 /* __atomic based implementation */
115 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)116 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
117     return (void*) __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
118 }
119 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)120 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void* p) {
121     __atomic_store_n(&a->value, (uintptr_t) p, __ATOMIC_SEQ_CST);
122 }
123 
124 #else
125 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)126 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
127     __sync_synchronize();
128     return (void*) a->value;
129 }
130 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)131 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
132     a->value = (uintptr_t) p;
133     __sync_synchronize();
134 }
135 
136 #endif
137 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)138 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
139     return __sync_bool_compare_and_swap(&a->value, (uintptr_t) old_p, (uintptr_t) new_p);
140 }
141 
142 #elif defined(__NetBSD__) && defined(HAVE_SYS_ATOMIC_H)
143 
144 /* NetBSD 5.0+ atomic_ops(3) implementation */
145 
146 #include <sys/atomic.h>
147 
148 typedef struct pa_atomic {
149     volatile unsigned int value;
150 } pa_atomic_t;
151 
152 #define PA_ATOMIC_INIT(v) { .value = (unsigned int) (v) }
153 
pa_atomic_load(const pa_atomic_t * a)154 static inline int pa_atomic_load(const pa_atomic_t *a) {
155     membar_sync();
156     return (int) a->value;
157 }
158 
pa_atomic_store(pa_atomic_t * a,int i)159 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
160     a->value = (unsigned int) i;
161     membar_sync();
162 }
163 
164 /* Returns the previously set value */
pa_atomic_add(pa_atomic_t * a,int i)165 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
166     int nv = (int) atomic_add_int_nv(&a->value, i);
167     return nv - i;
168 }
169 
170 /* Returns the previously set value */
pa_atomic_sub(pa_atomic_t * a,int i)171 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
172     int nv = (int) atomic_add_int_nv(&a->value, -i);
173     return nv + i;
174 }
175 
176 /* Returns the previously set value */
pa_atomic_inc(pa_atomic_t * a)177 static inline int pa_atomic_inc(pa_atomic_t *a) {
178     int nv = (int) atomic_inc_uint_nv(&a->value);
179     return nv - 1;
180 }
181 
182 /* Returns the previously set value */
pa_atomic_dec(pa_atomic_t * a)183 static inline int pa_atomic_dec(pa_atomic_t *a) {
184     int nv = (int) atomic_dec_uint_nv(&a->value);
185     return nv + 1;
186 }
187 
188 /* Returns true when the operation was successful. */
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)189 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
190     unsigned int r = atomic_cas_uint(&a->value, (unsigned int) old_i, (unsigned int) new_i);
191     return (int) r == old_i;
192 }
193 
194 typedef struct pa_atomic_ptr {
195     volatile void *value;
196 } pa_atomic_ptr_t;
197 
198 #define PA_ATOMIC_PTR_INIT(v) { .value = (v) }
199 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)200 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
201     membar_sync();
202     return (void *) a->value;
203 }
204 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)205 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
206     a->value = p;
207     membar_sync();
208 }
209 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)210 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
211     void *r = atomic_cas_ptr(&a->value, old_p, new_p);
212     return r == old_p;
213 }
214 
215 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
216 
217 #include <sys/cdefs.h>
218 #include <sys/types.h>
219 #include <sys/param.h>
220 #include <machine/atomic.h>
221 
222 typedef struct pa_atomic {
223     volatile unsigned long value;
224 } pa_atomic_t;
225 
226 #define PA_ATOMIC_INIT(v) { .value = (v) }
227 
pa_atomic_load(const pa_atomic_t * a)228 static inline int pa_atomic_load(const pa_atomic_t *a) {
229     return (int) atomic_load_acq_int((unsigned int *) &a->value);
230 }
231 
pa_atomic_store(pa_atomic_t * a,int i)232 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
233     atomic_store_rel_int((unsigned int *) &a->value, i);
234 }
235 
pa_atomic_add(pa_atomic_t * a,int i)236 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
237     return atomic_fetchadd_int((unsigned int *) &a->value, i);
238 }
239 
pa_atomic_sub(pa_atomic_t * a,int i)240 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
241     return atomic_fetchadd_int((unsigned int *) &a->value, -(i));
242 }
243 
pa_atomic_inc(pa_atomic_t * a)244 static inline int pa_atomic_inc(pa_atomic_t *a) {
245     return atomic_fetchadd_int((unsigned int *) &a->value, 1);
246 }
247 
pa_atomic_dec(pa_atomic_t * a)248 static inline int pa_atomic_dec(pa_atomic_t *a) {
249     return atomic_fetchadd_int((unsigned int *) &a->value, -1);
250 }
251 
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)252 static inline int pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
253     return atomic_cmpset_int((unsigned int *) &a->value, old_i, new_i);
254 }
255 
256 typedef struct pa_atomic_ptr {
257     volatile unsigned long value;
258 } pa_atomic_ptr_t;
259 
260 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
261 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)262 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
263 #ifdef atomic_load_acq_64
264     return (void*) atomic_load_acq_ptr((unsigned long *) &a->value);
265 #else
266     return (void*) atomic_load_acq_ptr((unsigned int *) &a->value);
267 #endif
268 }
269 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)270 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
271 #ifdef atomic_load_acq_64
272     atomic_store_rel_ptr(&a->value, (unsigned long) p);
273 #else
274     atomic_store_rel_ptr((unsigned int *) &a->value, (unsigned int) p);
275 #endif
276 }
277 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)278 static inline int pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
279 #ifdef atomic_load_acq_64
280     return atomic_cmpset_ptr(&a->value, (unsigned long) old_p, (unsigned long) new_p);
281 #else
282     return atomic_cmpset_ptr((unsigned int *) &a->value, (unsigned int) old_p, (unsigned int) new_p);
283 #endif
284 }
285 
286 #elif defined(__GNUC__) && (defined(__amd64__) || defined(__x86_64__))
287 
288 #warning "The native atomic operations implementation for AMD64 has not been tested thoroughly. libatomic_ops is known to not work properly on AMD64 and your gcc version is too old for the gcc-builtin atomic ops support. You have three options now: test the native atomic operations implementation for AMD64, fix libatomic_ops, or upgrade your GCC."
289 
290 /* Adapted from glibc */
291 
292 typedef struct pa_atomic {
293     volatile int value;
294 } pa_atomic_t;
295 
296 #define PA_ATOMIC_INIT(v) { .value = (v) }
297 
pa_atomic_load(const pa_atomic_t * a)298 static inline int pa_atomic_load(const pa_atomic_t *a) {
299     return a->value;
300 }
301 
pa_atomic_store(pa_atomic_t * a,int i)302 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
303     a->value = i;
304 }
305 
pa_atomic_add(pa_atomic_t * a,int i)306 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
307     int result;
308 
309     __asm __volatile ("lock; xaddl %0, %1"
310                       : "=r" (result), "=m" (a->value)
311                       : "0" (i), "m" (a->value));
312 
313     return result;
314 }
315 
pa_atomic_sub(pa_atomic_t * a,int i)316 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
317     return pa_atomic_add(a, -i);
318 }
319 
pa_atomic_inc(pa_atomic_t * a)320 static inline int pa_atomic_inc(pa_atomic_t *a) {
321     return pa_atomic_add(a, 1);
322 }
323 
pa_atomic_dec(pa_atomic_t * a)324 static inline int pa_atomic_dec(pa_atomic_t *a) {
325     return pa_atomic_sub(a, 1);
326 }
327 
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)328 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
329     int result;
330 
331     __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
332                           : "=a" (result), "=m" (a->value)
333                           : "r" (new_i), "m" (a->value), "0" (old_i));
334 
335     return result == old_i;
336 }
337 
338 typedef struct pa_atomic_ptr {
339     volatile unsigned long value;
340 } pa_atomic_ptr_t;
341 
342 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
343 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)344 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
345     return (void*) a->value;
346 }
347 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)348 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
349     a->value = (unsigned long) p;
350 }
351 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)352 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
353     void *result;
354 
355     __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
356                           : "=a" (result), "=m" (a->value)
357                           : "r" (new_p), "m" (a->value), "0" (old_p));
358 
359     return result == old_p;
360 }
361 
362 #elif defined(ATOMIC_ARM_INLINE_ASM)
363 
364 /*
365    These should only be enabled if we have ARMv6 or better.
366 */
367 
368 typedef struct pa_atomic {
369     volatile int value;
370 } pa_atomic_t;
371 
372 #define PA_ATOMIC_INIT(v) { .value = (v) }
373 
pa_memory_barrier(void)374 static inline void pa_memory_barrier(void) {
375 #ifdef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
376     asm volatile ("mcr  p15, 0, r0, c7, c10, 5  @ dmb");
377 #endif
378 }
379 
pa_atomic_load(const pa_atomic_t * a)380 static inline int pa_atomic_load(const pa_atomic_t *a) {
381     pa_memory_barrier();
382     return a->value;
383 }
384 
pa_atomic_store(pa_atomic_t * a,int i)385 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
386     a->value = i;
387     pa_memory_barrier();
388 }
389 
390 /* Returns the previously set value */
pa_atomic_add(pa_atomic_t * a,int i)391 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
392     unsigned long not_exclusive;
393     int new_val, old_val;
394 
395     pa_memory_barrier();
396     do {
397         asm volatile ("ldrex    %0, [%3]\n"
398                       "add      %2, %0, %4\n"
399                       "strex    %1, %2, [%3]\n"
400                       : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
401                       : "r" (&a->value), "Ir" (i)
402                       : "cc");
403     } while(not_exclusive);
404     pa_memory_barrier();
405 
406     return old_val;
407 }
408 
409 /* Returns the previously set value */
pa_atomic_sub(pa_atomic_t * a,int i)410 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
411     unsigned long not_exclusive;
412     int new_val, old_val;
413 
414     pa_memory_barrier();
415     do {
416         asm volatile ("ldrex    %0, [%3]\n"
417                       "sub      %2, %0, %4\n"
418                       "strex    %1, %2, [%3]\n"
419                       : "=&r" (old_val), "=&r" (not_exclusive), "=&r" (new_val)
420                       : "r" (&a->value), "Ir" (i)
421                       : "cc");
422     } while(not_exclusive);
423     pa_memory_barrier();
424 
425     return old_val;
426 }
427 
pa_atomic_inc(pa_atomic_t * a)428 static inline int pa_atomic_inc(pa_atomic_t *a) {
429     return pa_atomic_add(a, 1);
430 }
431 
pa_atomic_dec(pa_atomic_t * a)432 static inline int pa_atomic_dec(pa_atomic_t *a) {
433     return pa_atomic_sub(a, 1);
434 }
435 
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)436 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
437     unsigned long not_equal, not_exclusive;
438 
439     pa_memory_barrier();
440     do {
441         asm volatile ("ldrex    %0, [%2]\n"
442                       "subs     %0, %0, %3\n"
443                       "mov      %1, %0\n"
444                       "strexeq %0, %4, [%2]\n"
445                       : "=&r" (not_exclusive), "=&r" (not_equal)
446                       : "r" (&a->value), "Ir" (old_i), "r" (new_i)
447                       : "cc");
448     } while(not_exclusive && !not_equal);
449     pa_memory_barrier();
450 
451     return !not_equal;
452 }
453 
454 typedef struct pa_atomic_ptr {
455     volatile unsigned long value;
456 } pa_atomic_ptr_t;
457 
458 #define PA_ATOMIC_PTR_INIT(v) { .value = (long) (v) }
459 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)460 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
461     pa_memory_barrier();
462     return (void*) a->value;
463 }
464 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)465 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
466     a->value = (unsigned long) p;
467     pa_memory_barrier();
468 }
469 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)470 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
471     unsigned long not_equal, not_exclusive;
472 
473     pa_memory_barrier();
474     do {
475         asm volatile ("ldrex    %0, [%2]\n"
476                       "subs     %0, %0, %3\n"
477                       "mov      %1, %0\n"
478                       "strexeq %0, %4, [%2]\n"
479                       : "=&r" (not_exclusive), "=&r" (not_equal)
480                       : "r" (&a->value), "Ir" (old_p), "r" (new_p)
481                       : "cc");
482     } while(not_exclusive && !not_equal);
483     pa_memory_barrier();
484 
485     return !not_equal;
486 }
487 
488 #elif defined(ATOMIC_ARM_LINUX_HELPERS)
489 
490 /* See file arch/arm/kernel/entry-armv.S in your kernel sources for more
491    information about these functions. The arm kernel helper functions first
492    appeared in 2.6.16.
493    Apply --disable-atomic-arm-linux-helpers flag to configure if you prefer
494    inline asm implementation or you have an obsolete Linux kernel.
495 */
496 /* Memory barrier */
497 typedef void (__kernel_dmb_t)(void);
498 #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
499 
pa_memory_barrier(void)500 static inline void pa_memory_barrier(void) {
501 #ifndef ATOMIC_ARM_MEMORY_BARRIER_ENABLED
502     __kernel_dmb();
503 #endif
504 }
505 
506 /* Atomic exchange (__kernel_cmpxchg_t contains memory barriers if needed) */
507 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
508 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
509 
510 /* This is just to get rid of all warnings */
511 typedef int (__kernel_cmpxchg_u_t)(unsigned long oldval, unsigned long newval, volatile unsigned long *ptr);
512 #define __kernel_cmpxchg_u (*(__kernel_cmpxchg_u_t *)0xffff0fc0)
513 
514 typedef struct pa_atomic {
515     volatile int value;
516 } pa_atomic_t;
517 
518 #define PA_ATOMIC_INIT(v) { .value = (v) }
519 
pa_atomic_load(const pa_atomic_t * a)520 static inline int pa_atomic_load(const pa_atomic_t *a) {
521     pa_memory_barrier();
522     return a->value;
523 }
524 
pa_atomic_store(pa_atomic_t * a,int i)525 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
526     a->value = i;
527     pa_memory_barrier();
528 }
529 
530 /* Returns the previously set value */
pa_atomic_add(pa_atomic_t * a,int i)531 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
532     int old_val;
533     do {
534         old_val = a->value;
535     } while(__kernel_cmpxchg(old_val, old_val + i, &a->value));
536     return old_val;
537 }
538 
539 /* Returns the previously set value */
pa_atomic_sub(pa_atomic_t * a,int i)540 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
541     int old_val;
542     do {
543         old_val = a->value;
544     } while(__kernel_cmpxchg(old_val, old_val - i, &a->value));
545     return old_val;
546 }
547 
548 /* Returns the previously set value */
pa_atomic_inc(pa_atomic_t * a)549 static inline int pa_atomic_inc(pa_atomic_t *a) {
550     return pa_atomic_add(a, 1);
551 }
552 
553 /* Returns the previously set value */
pa_atomic_dec(pa_atomic_t * a)554 static inline int pa_atomic_dec(pa_atomic_t *a) {
555     return pa_atomic_sub(a, 1);
556 }
557 
558 /* Returns true when the operation was successful. */
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)559 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
560     bool failed;
561     do {
562       failed = !!__kernel_cmpxchg(old_i, new_i, &a->value);
563     } while(failed && a->value == old_i);
564     return !failed;
565 }
566 
567 typedef struct pa_atomic_ptr {
568     volatile unsigned long value;
569 } pa_atomic_ptr_t;
570 
571 #define PA_ATOMIC_PTR_INIT(v) { .value = (unsigned long) (v) }
572 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)573 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
574     pa_memory_barrier();
575     return (void*) a->value;
576 }
577 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)578 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
579     a->value = (unsigned long) p;
580     pa_memory_barrier();
581 }
582 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)583 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
584     bool failed;
585     do {
586         failed = !!__kernel_cmpxchg_u((unsigned long) old_p, (unsigned long) new_p, &a->value);
587     } while(failed && a->value == (unsigned long) old_p);
588     return !failed;
589 }
590 
591 #else
592 
593 /* libatomic_ops based implementation */
594 
595 #include <atomic_ops.h>
596 
597 typedef struct pa_atomic {
598     volatile AO_t value;
599 } pa_atomic_t;
600 
601 #define PA_ATOMIC_INIT(v) { .value = (AO_t) (v) }
602 
pa_atomic_load(const pa_atomic_t * a)603 static inline int pa_atomic_load(const pa_atomic_t *a) {
604     return (int) AO_load_full((AO_t*) &a->value);
605 }
606 
pa_atomic_store(pa_atomic_t * a,int i)607 static inline void pa_atomic_store(pa_atomic_t *a, int i) {
608     AO_store_full(&a->value, (AO_t) i);
609 }
610 
pa_atomic_add(pa_atomic_t * a,int i)611 static inline int pa_atomic_add(pa_atomic_t *a, int i) {
612     return (int) AO_fetch_and_add_full(&a->value, (AO_t) i);
613 }
614 
pa_atomic_sub(pa_atomic_t * a,int i)615 static inline int pa_atomic_sub(pa_atomic_t *a, int i) {
616     return (int) AO_fetch_and_add_full(&a->value, (AO_t) -i);
617 }
618 
pa_atomic_inc(pa_atomic_t * a)619 static inline int pa_atomic_inc(pa_atomic_t *a) {
620     return (int) AO_fetch_and_add1_full(&a->value);
621 }
622 
pa_atomic_dec(pa_atomic_t * a)623 static inline int pa_atomic_dec(pa_atomic_t *a) {
624     return (int) AO_fetch_and_sub1_full(&a->value);
625 }
626 
pa_atomic_cmpxchg(pa_atomic_t * a,int old_i,int new_i)627 static inline bool pa_atomic_cmpxchg(pa_atomic_t *a, int old_i, int new_i) {
628     return AO_compare_and_swap_full(&a->value, (unsigned long) old_i, (unsigned long) new_i);
629 }
630 
631 typedef struct pa_atomic_ptr {
632     volatile AO_t value;
633 } pa_atomic_ptr_t;
634 
635 #define PA_ATOMIC_PTR_INIT(v) { .value = (AO_t) (v) }
636 
pa_atomic_ptr_load(const pa_atomic_ptr_t * a)637 static inline void* pa_atomic_ptr_load(const pa_atomic_ptr_t *a) {
638     return (void*) AO_load_full((AO_t*) &a->value);
639 }
640 
pa_atomic_ptr_store(pa_atomic_ptr_t * a,void * p)641 static inline void pa_atomic_ptr_store(pa_atomic_ptr_t *a, void *p) {
642     AO_store_full(&a->value, (AO_t) p);
643 }
644 
pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t * a,void * old_p,void * new_p)645 static inline bool pa_atomic_ptr_cmpxchg(pa_atomic_ptr_t *a, void *old_p, void* new_p) {
646     return AO_compare_and_swap_full(&a->value, (AO_t) old_p, (AO_t) new_p);
647 }
648 
649 #endif
650 
651 #endif
652