• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Many similar implementations exist. See for example libwsbm
3  * or the linux kernel include/atomic.h
4  *
5  * No copyright claimed on this file.
6  *
7  */
8 
9 #include "no_extern_c.h"
10 
11 #ifndef U_ATOMIC_H
12 #define U_ATOMIC_H
13 
14 #include <stdbool.h>
15 #include <stdint.h>
16 
17 /* Favor OS-provided implementations.
18  *
19  * Where no OS-provided implementation is available, fall back to
20  * locally coded assembly, compiler intrinsic or ultimately a
21  * mutex-based implementation.
22  */
23 #if defined(__sun)
24 #define PIPE_ATOMIC_OS_SOLARIS
25 #elif defined(_MSC_VER)
26 #define PIPE_ATOMIC_MSVC_INTRINSIC
27 #elif defined(__GNUC__)
28 #define PIPE_ATOMIC_GCC_INTRINSIC
29 #else
30 #error "Unsupported platform"
31 #endif
32 
33 
34 /* Implementation using GCC-provided synchronization intrinsics
35  */
36 #if defined(PIPE_ATOMIC_GCC_INTRINSIC)
37 
38 #define PIPE_ATOMIC "GCC Sync Intrinsics"
39 
40 #if defined(USE_GCC_ATOMIC_BUILTINS)
41 
42 /* The builtins with explicit memory model are available since GCC 4.7. */
43 #define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
44 #define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
45 #define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
46 #define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
47 #define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
48 #define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
49 #define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
50 #define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
51 #define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
52 #define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
53 #define PIPE_NATIVE_ATOMIC_XCHG
54 
55 #else
56 
57 #define p_atomic_set(_v, _i) (*(_v) = (_i))
58 #define p_atomic_read(_v) (*(_v))
59 #define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
60 #define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
61 #define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
62 #define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
63 #define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
64 #define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
65 #define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
66 
67 #endif
68 
69 /* There is no __atomic_* compare and exchange that returns the current value.
70  * Also, GCC 5.4 seems unable to optimize a compound statement expression that
71  * uses an additional stack variable with __atomic_compare_exchange[_n].
72  */
73 #define p_atomic_cmpxchg(v, old, _new) \
74    __sync_val_compare_and_swap((v), (old), (_new))
75 
76 #endif
77 
78 
79 
80 /* Unlocked version for single threaded environments, such as some
81  * windows kernel modules.
82  */
83 #if defined(PIPE_ATOMIC_OS_UNLOCKED)
84 
85 #define PIPE_ATOMIC "Unlocked"
86 
87 #define p_atomic_set(_v, _i) (*(_v) = (_i))
88 #define p_atomic_read(_v) (*(_v))
89 #define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
90 #define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
91 #define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
92 #define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
93 #define p_atomic_inc_return(_v) (++(*(_v)))
94 #define p_atomic_dec_return(_v) (--(*(_v)))
95 #define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
96 #define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
97 
98 #endif
99 
100 
101 #if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
102 
103 #define PIPE_ATOMIC "MSVC Intrinsics"
104 
105 /* We use the Windows header's Interlocked*64 functions instead of the
106  * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
107  * with target CPU, whereas Windows headers take care of all portability
108  * issues: using intrinsics where available, falling back to library
109  * implementations where not.
110  */
111 #ifndef WIN32_LEAN_AND_MEAN
112 #define WIN32_LEAN_AND_MEAN 1
113 #endif
114 #include <windows.h>
115 #include <intrin.h>
116 #include <assert.h>
117 
118 /* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
119  * quite work here; and if a C++-only solution is worthwhile, then it would be
120  * better to use templates / function overloading, instead of decltype magic.
121  * Therefore, we rely on implicit casting to LONGLONG for the functions that return
122  */
123 
124 #define p_atomic_set(_v, _i) (*(_v) = (_i))
125 #define p_atomic_read(_v) (*(_v))
126 
127 #define p_atomic_dec_zero(_v) \
128    (p_atomic_dec_return(_v) == 0)
129 
130 #define p_atomic_inc(_v) \
131    ((void) p_atomic_inc_return(_v))
132 
133 #define p_atomic_inc_return(_v) (\
134    sizeof *(_v) == sizeof(short)   ? _InterlockedIncrement16((short *)  (_v)) : \
135    sizeof *(_v) == sizeof(long)    ? _InterlockedIncrement  ((long *)   (_v)) : \
136    sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
137                                      (assert(!"should not get here"), 0))
138 
139 #define p_atomic_dec(_v) \
140    ((void) p_atomic_dec_return(_v))
141 
142 #define p_atomic_dec_return(_v) (\
143    sizeof *(_v) == sizeof(short)   ? _InterlockedDecrement16((short *)  (_v)) : \
144    sizeof *(_v) == sizeof(long)    ? _InterlockedDecrement  ((long *)   (_v)) : \
145    sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
146                                      (assert(!"should not get here"), 0))
147 
148 #define p_atomic_add(_v, _i) \
149    ((void) p_atomic_add_return((_v), (_i)))
150 
151 #define p_atomic_add_return(_v, _i) (\
152    sizeof *(_v) == sizeof(char)    ? _InterlockedExchangeAdd8 ((char *)   (_v), (_i)) : \
153    sizeof *(_v) == sizeof(short)   ? _InterlockedExchangeAdd16((short *)  (_v), (_i)) : \
154    sizeof *(_v) == sizeof(long)    ? _InterlockedExchangeAdd  ((long *)   (_v), (_i)) : \
155    sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
156                                      (assert(!"should not get here"), 0))
157 
158 #define p_atomic_cmpxchg(_v, _old, _new) (\
159    sizeof *(_v) == sizeof(char)    ? _InterlockedCompareExchange8 ((char *)   (_v), (char)   (_new), (char)   (_old)) : \
160    sizeof *(_v) == sizeof(short)   ? _InterlockedCompareExchange16((short *)  (_v), (short)  (_new), (short)  (_old)) : \
161    sizeof *(_v) == sizeof(long)    ? _InterlockedCompareExchange  ((long *)   (_v), (long)   (_new), (long)   (_old)) : \
162    sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
163                                      (assert(!"should not get here"), 0))
164 
165 #endif
166 
167 #if defined(PIPE_ATOMIC_OS_SOLARIS)
168 
169 #define PIPE_ATOMIC "Solaris OS atomic functions"
170 
171 #include <atomic.h>
172 #include <assert.h>
173 
174 #define p_atomic_set(_v, _i) (*(_v) = (_i))
175 #define p_atomic_read(_v) (*(_v))
176 
177 #define p_atomic_dec_zero(v) (\
178    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) == 0 : \
179    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
180    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
181    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
182                                     (assert(!"should not get here"), 0))
183 
184 #define p_atomic_inc(v) (void) (\
185    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8 ((uint8_t  *)(v)) : \
186    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
187    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
188    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
189                                     (assert(!"should not get here"), 0))
190 
191 #define p_atomic_inc_return(v) (__typeof(*v))( \
192    sizeof(*v) == sizeof(uint8_t)  ? atomic_inc_8_nv ((uint8_t  *)(v)) : \
193    sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
194    sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
195    sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
196                                     (assert(!"should not get here"), 0))
197 
198 #define p_atomic_dec(v) (void) ( \
199    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8 ((uint8_t  *)(v)) : \
200    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
201    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
202    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
203                                     (assert(!"should not get here"), 0))
204 
205 #define p_atomic_dec_return(v) (__typeof(*v))( \
206    sizeof(*v) == sizeof(uint8_t)  ? atomic_dec_8_nv ((uint8_t  *)(v)) : \
207    sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
208    sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
209    sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
210                                     (assert(!"should not get here"), 0))
211 
212 #define p_atomic_add(v, i) (void) ( \
213    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8 ((uint8_t  *)(v), (i)) : \
214    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
215    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
216    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
217                                     (assert(!"should not get here"), 0))
218 
219 #define p_atomic_add_return(v, i) (__typeof(*v)) ( \
220    sizeof(*v) == sizeof(uint8_t)  ? atomic_add_8_nv ((uint8_t  *)(v), (i)) : \
221    sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
222    sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
223    sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
224                                     (assert(!"should not get here"), 0))
225 
226 #define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
227    sizeof(*v) == sizeof(uint8_t)  ? atomic_cas_8 ((uint8_t  *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
228    sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
229    sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
230    sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
231                                     (assert(!"should not get here"), 0))
232 
233 #endif
234 
235 #ifndef PIPE_ATOMIC
236 #error "No pipe_atomic implementation selected"
237 #endif
238 
239 #ifndef PIPE_NATIVE_ATOMIC_XCHG
p_atomic_xchg_32(uint32_t * v,uint32_t i)240 static inline uint32_t p_atomic_xchg_32(uint32_t *v, uint32_t i)
241 {
242    uint32_t actual = p_atomic_read(v);
243    uint32_t expected;
244    do {
245       expected = actual;
246       actual = p_atomic_cmpxchg(v, expected, i);
247    } while (expected != actual);
248    return actual;
249 }
250 
p_atomic_xchg_64(uint64_t * v,uint64_t i)251 static inline uint64_t p_atomic_xchg_64(uint64_t *v, uint64_t i)
252 {
253    uint64_t actual = p_atomic_read(v);
254    uint64_t expected;
255    do {
256       expected = actual;
257       actual = p_atomic_cmpxchg(v, expected, i);
258    } while (expected != actual);
259    return actual;
260 }
261 
262 #define p_atomic_xchg(v, i) (__typeof(*(v)))( \
263    sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
264    sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
265                                       (assert(!"should not get here"), 0))
266 #endif
267 
268 #endif /* U_ATOMIC_H */
269