• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_CUTILS_ATOMIC_H
18 #define ANDROID_CUTILS_ATOMIC_H
19 
20 #include <stdint.h>
21 #include <sys/types.h>
22 #include <stdatomic.h>
23 
24 #ifndef ANDROID_ATOMIC_INLINE
25 #define ANDROID_ATOMIC_INLINE static inline
26 #endif
27 
28 /*
29  * A handful of basic atomic operations.
30  * THESE ARE HERE FOR LEGACY REASONS ONLY.  AVOID.
31  *
32  * PREFERRED ALTERNATIVES:
33  * - Use C++/C/pthread locks/mutexes whenever there is not a
34  *   convincing reason to do otherwise.  Note that very clever and
35  *   complicated, but correct, lock-free code is often slower than
36  *   using locks, especially where nontrivial data structures
37  *   are involved.
38  * - C11 stdatomic.h.
39  * - Where supported, C++11 std::atomic<T> .
40  *
41  * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND
42  * OR UPDATE OLD CODE.
43  *
44  * The "acquire" and "release" terms can be defined intuitively in terms
45  * of the placement of memory barriers in a simple lock implementation:
46  *   - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
47  *   - barrier
48  *   - [do work]
49  *   - barrier
50  *   - store(lock-is-free)
51  * In very crude terms, the initial (acquire) barrier prevents any of the
52  * "work" from happening before the lock is held, and the later (release)
53  * barrier ensures that all of the work happens before the lock is released.
54  * (Think of cached writes, cache read-ahead, and instruction reordering
55  * around the CAS and store instructions.)
56  *
57  * The barriers must apply to both the compiler and the CPU.  Note it is
58  * legal for instructions that occur before an "acquire" barrier to be
59  * moved down below it, and for instructions that occur after a "release"
60  * barrier to be moved up above it.
61  *
62  * The ARM-driven implementation we use here is short on subtlety,
63  * and actually requests a full barrier from the compiler and the CPU.
64  * The only difference between acquire and release is in whether they
65  * are issued before or after the atomic operation with which they
66  * are associated.  To ease the transition to C/C++ atomic intrinsics,
67  * you should not rely on this, and instead assume that only the minimal
68  * acquire/release protection is provided.
69  *
70  * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
71  * If they are not, atomicity is not guaranteed.
72  */
73 
74 ANDROID_ATOMIC_INLINE
to_atomic_int_least32_t(volatile const int32_t * addr)75 volatile atomic_int_least32_t* to_atomic_int_least32_t(volatile const int32_t* addr) {
76 #ifdef __cplusplus
77     return reinterpret_cast<volatile atomic_int_least32_t*>(const_cast<volatile int32_t*>(addr));
78 #else
79     return (volatile atomic_int_least32_t*)addr;
80 #endif
81 }
82 
83 /*
84  * Basic arithmetic and bitwise operations.  These all provide a
85  * barrier with "release" ordering, and return the previous value.
86  *
87  * These have the same characteristics (e.g. what happens on overflow)
88  * as the equivalent non-atomic C operations.
89  */
90 ANDROID_ATOMIC_INLINE
android_atomic_inc(volatile int32_t * addr)91 int32_t android_atomic_inc(volatile int32_t* addr)
92 {
93     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
94         /* Int32_t, if it exists, is the same as int_least32_t. */
95     return atomic_fetch_add_explicit(a, 1, memory_order_release);
96 }
97 
98 ANDROID_ATOMIC_INLINE
android_atomic_dec(volatile int32_t * addr)99 int32_t android_atomic_dec(volatile int32_t* addr)
100 {
101     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
102     return atomic_fetch_sub_explicit(a, 1, memory_order_release);
103 }
104 
105 ANDROID_ATOMIC_INLINE
android_atomic_add(int32_t value,volatile int32_t * addr)106 int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
107 {
108     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
109     return atomic_fetch_add_explicit(a, value, memory_order_release);
110 }
111 
112 ANDROID_ATOMIC_INLINE
android_atomic_and(int32_t value,volatile int32_t * addr)113 int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
114 {
115     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
116     return atomic_fetch_and_explicit(a, value, memory_order_release);
117 }
118 
119 ANDROID_ATOMIC_INLINE
android_atomic_or(int32_t value,volatile int32_t * addr)120 int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
121 {
122     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
123     return atomic_fetch_or_explicit(a, value, memory_order_release);
124 }
125 
126 /*
127  * Perform an atomic load with "acquire" or "release" ordering.
128  *
129  * Note that the notion of a "release" ordering for a load does not
130  * really fit into the C11 or C++11 memory model.  The extra ordering
131  * is normally observable only by code using memory_order_relaxed
132  * atomics, or data races.  In the rare cases in which such ordering
133  * is called for, use memory_order_relaxed atomics and a leading
134  * atomic_thread_fence (typically with memory_order_acquire,
135  * not memory_order_release!) instead.  If you do not understand
136  * this comment, you are in the vast majority, and should not be
137  * using release loads or replacing them with anything other than
138  * locks or default sequentially consistent atomics.
139  */
140 ANDROID_ATOMIC_INLINE
android_atomic_acquire_load(volatile const int32_t * addr)141 int32_t android_atomic_acquire_load(volatile const int32_t* addr)
142 {
143     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
144     return atomic_load_explicit(a, memory_order_acquire);
145 }
146 
147 ANDROID_ATOMIC_INLINE
android_atomic_release_load(volatile const int32_t * addr)148 int32_t android_atomic_release_load(volatile const int32_t* addr)
149 {
150     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
151     atomic_thread_fence(memory_order_seq_cst);
152     /* Any reasonable clients of this interface would probably prefer   */
153     /* something weaker.  But some remaining clients seem to be         */
154     /* abusing this API in strange ways, e.g. by using it as a fence.   */
155     /* Thus we are conservative until we can get rid of remaining       */
156     /* clients (and this function).                                     */
157     return atomic_load_explicit(a, memory_order_relaxed);
158 }
159 
160 /*
161  * Perform an atomic store with "acquire" or "release" ordering.
162  *
163  * Note that the notion of an "acquire" ordering for a store does not
164  * really fit into the C11 or C++11 memory model.  The extra ordering
165  * is normally observable only by code using memory_order_relaxed
166  * atomics, or data races.  In the rare cases in which such ordering
167  * is called for, use memory_order_relaxed atomics and a trailing
168  * atomic_thread_fence (typically with memory_order_release,
169  * not memory_order_acquire!) instead.
170  */
171 ANDROID_ATOMIC_INLINE
android_atomic_acquire_store(int32_t value,volatile int32_t * addr)172 void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
173 {
174     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
175     atomic_store_explicit(a, value, memory_order_relaxed);
176     atomic_thread_fence(memory_order_seq_cst);
177     /* Again overly conservative to accomodate weird clients.   */
178 }
179 
180 ANDROID_ATOMIC_INLINE
android_atomic_release_store(int32_t value,volatile int32_t * addr)181 void android_atomic_release_store(int32_t value, volatile int32_t* addr)
182 {
183     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
184     atomic_store_explicit(a, value, memory_order_release);
185 }
186 
187 /*
188  * Compare-and-set operation with "acquire" or "release" ordering.
189  *
190  * This returns zero if the new value was successfully stored, which will
191  * only happen when *addr == oldvalue.
192  *
193  * (The return value is inverted from implementations on other platforms,
194  * but matches the ARM ldrex/strex result.)
195  *
196  * Implementations that use the release CAS in a loop may be less efficient
197  * than possible, because we re-issue the memory barrier on each iteration.
198  */
199 ANDROID_ATOMIC_INLINE
android_atomic_acquire_cas(int32_t oldvalue,int32_t newvalue,volatile int32_t * addr)200 int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
201                            volatile int32_t* addr)
202 {
203     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
204     return !atomic_compare_exchange_strong_explicit(
205                                           a, &oldvalue, newvalue,
206                                           memory_order_acquire,
207                                           memory_order_acquire);
208 }
209 
210 ANDROID_ATOMIC_INLINE
android_atomic_release_cas(int32_t oldvalue,int32_t newvalue,volatile int32_t * addr)211 int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
212                                volatile int32_t* addr)
213 {
214     volatile atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
215     return !atomic_compare_exchange_strong_explicit(
216                                           a, &oldvalue, newvalue,
217                                           memory_order_release,
218                                           memory_order_relaxed);
219 }
220 
221 /*
222  * Fence primitives.
223  */
224 ANDROID_ATOMIC_INLINE
android_compiler_barrier(void)225 void android_compiler_barrier(void)
226 {
227     __asm__ __volatile__ ("" : : : "memory");
228     /* Could probably also be:                          */
229     /* atomic_signal_fence(memory_order_seq_cst);       */
230 }
231 
232 ANDROID_ATOMIC_INLINE
android_memory_barrier(void)233 void android_memory_barrier(void)
234 {
235     atomic_thread_fence(memory_order_seq_cst);
236 }
237 
238 /*
239  * Aliases for code using an older version of this header.  These are now
240  * deprecated and should not be used.  The definitions will be removed
241  * in a future release.
242  */
243 #define android_atomic_write android_atomic_release_store
244 #define android_atomic_cmpxchg android_atomic_release_cas
245 
246 #endif // ANDROID_CUTILS_ATOMIC_H
247