• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_CUTILS_ATOMIC_H
18 #define ANDROID_CUTILS_ATOMIC_H
19 
20 #include <stdint.h>
21 #include <sys/types.h>
22 
23 #include <atomic>
24 
25 #ifndef ANDROID_ATOMIC_INLINE
26 #define ANDROID_ATOMIC_INLINE static inline
27 #endif
28 
29 /*
30  * A handful of basic atomic operations.
31  * THESE ARE HERE FOR LEGACY REASONS ONLY.  AVOID.
32  *
33  * PREFERRED ALTERNATIVES:
34  * - Use C++/C/pthread locks/mutexes whenever there is not a
35  *   convincing reason to do otherwise.  Note that very clever and
36  *   complicated, but correct, lock-free code is often slower than
37  *   using locks, especially where nontrivial data structures
38  *   are involved.
39  * - C11 stdatomic.h.
40  * - Where supported, C++11 std::atomic<T> .
41  *
42  * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND
43  * OR UPDATE OLD CODE.
44  *
45  * The "acquire" and "release" terms can be defined intuitively in terms
46  * of the placement of memory barriers in a simple lock implementation:
47  *   - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds
48  *   - barrier
49  *   - [do work]
50  *   - barrier
51  *   - store(lock-is-free)
52  * In very crude terms, the initial (acquire) barrier prevents any of the
53  * "work" from happening before the lock is held, and the later (release)
54  * barrier ensures that all of the work happens before the lock is released.
55  * (Think of cached writes, cache read-ahead, and instruction reordering
56  * around the CAS and store instructions.)
57  *
58  * The barriers must apply to both the compiler and the CPU.  Note it is
59  * legal for instructions that occur before an "acquire" barrier to be
60  * moved down below it, and for instructions that occur after a "release"
61  * barrier to be moved up above it.
62  *
63  * The ARM-driven implementation we use here is short on subtlety,
64  * and actually requests a full barrier from the compiler and the CPU.
65  * The only difference between acquire and release is in whether they
66  * are issued before or after the atomic operation with which they
67  * are associated.  To ease the transition to C/C++ atomic intrinsics,
68  * you should not rely on this, and instead assume that only the minimal
69  * acquire/release protection is provided.
70  *
71  * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries.
72  * If they are not, atomicity is not guaranteed.
73  */
74 
75 ANDROID_ATOMIC_INLINE
to_atomic_int_least32_t(volatile const int32_t * addr)76 volatile std::atomic_int_least32_t* to_atomic_int_least32_t(
77     volatile const int32_t* addr) {
78 #ifdef __cplusplus
79   return reinterpret_cast<volatile std::atomic_int_least32_t*>(
80       const_cast<volatile int32_t*>(addr));
81 #else
82   return (volatile std::atomic_int_least32_t*)addr;
83 #endif
84 }
85 
86 /*
87  * Basic arithmetic and bitwise operations.  These all provide a
88  * barrier with "release" ordering, and return the previous value.
89  *
90  * These have the same characteristics (e.g. what happens on overflow)
91  * as the equivalent non-atomic C operations.
92  */
93 ANDROID_ATOMIC_INLINE
android_atomic_inc(volatile int32_t * addr)94 int32_t android_atomic_inc(volatile int32_t* addr)
95 {
96   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
97   /* Int32_t, if it exists, is the same as int_least32_t. */
98   return atomic_fetch_add_explicit(a, 1, std::memory_order_release);
99 }
100 
101 ANDROID_ATOMIC_INLINE
android_atomic_dec(volatile int32_t * addr)102 int32_t android_atomic_dec(volatile int32_t* addr)
103 {
104   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
105   return atomic_fetch_sub_explicit(a, 1, std::memory_order_release);
106 }
107 
108 ANDROID_ATOMIC_INLINE
android_atomic_add(int32_t value,volatile int32_t * addr)109 int32_t android_atomic_add(int32_t value, volatile int32_t* addr)
110 {
111   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
112   return atomic_fetch_add_explicit(a, value, std::memory_order_release);
113 }
114 
115 ANDROID_ATOMIC_INLINE
android_atomic_and(int32_t value,volatile int32_t * addr)116 int32_t android_atomic_and(int32_t value, volatile int32_t* addr)
117 {
118   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
119   return atomic_fetch_and_explicit(a, value, std::memory_order_release);
120 }
121 
122 ANDROID_ATOMIC_INLINE
android_atomic_or(int32_t value,volatile int32_t * addr)123 int32_t android_atomic_or(int32_t value, volatile int32_t* addr)
124 {
125   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
126   return atomic_fetch_or_explicit(a, value, std::memory_order_release);
127 }
128 
129 /*
130  * Perform an atomic load with "acquire" or "release" ordering.
131  *
132  * Note that the notion of a "release" ordering for a load does not
133  * really fit into the C11 or C++11 memory model.  The extra ordering
134  * is normally observable only by code using memory_order_relaxed
135  * atomics, or data races.  In the rare cases in which such ordering
136  * is called for, use memory_order_relaxed atomics and a leading
137  * atomic_thread_fence (typically with memory_order_acquire,
138  * not memory_order_release!) instead.  If you do not understand
139  * this comment, you are in the vast majority, and should not be
140  * using release loads or replacing them with anything other than
141  * locks or default sequentially consistent atomics.
142  */
143 ANDROID_ATOMIC_INLINE
android_atomic_acquire_load(volatile const int32_t * addr)144 int32_t android_atomic_acquire_load(volatile const int32_t* addr)
145 {
146   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
147   return atomic_load_explicit(a, std::memory_order_acquire);
148 }
149 
150 ANDROID_ATOMIC_INLINE
android_atomic_release_load(volatile const int32_t * addr)151 int32_t android_atomic_release_load(volatile const int32_t* addr)
152 {
153   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
154   atomic_thread_fence(std::memory_order_seq_cst);
155   /* Any reasonable clients of this interface would probably prefer   */
156   /* something weaker.  But some remaining clients seem to be         */
157   /* abusing this API in strange ways, e.g. by using it as a fence.   */
158   /* Thus we are conservative until we can get rid of remaining       */
159   /* clients (and this function).                                     */
160   return atomic_load_explicit(a, std::memory_order_relaxed);
161 }
162 
163 /*
164  * Perform an atomic store with "acquire" or "release" ordering.
165  *
166  * Note that the notion of an "acquire" ordering for a store does not
167  * really fit into the C11 or C++11 memory model.  The extra ordering
168  * is normally observable only by code using memory_order_relaxed
169  * atomics, or data races.  In the rare cases in which such ordering
170  * is called for, use memory_order_relaxed atomics and a trailing
171  * atomic_thread_fence (typically with memory_order_release,
172  * not memory_order_acquire!) instead.
173  */
174 ANDROID_ATOMIC_INLINE
android_atomic_acquire_store(int32_t value,volatile int32_t * addr)175 void android_atomic_acquire_store(int32_t value, volatile int32_t* addr)
176 {
177   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
178   atomic_store_explicit(a, value, std::memory_order_relaxed);
179   atomic_thread_fence(std::memory_order_seq_cst);
180   /* Again overly conservative to accommodate weird clients.   */
181 }
182 
183 ANDROID_ATOMIC_INLINE
android_atomic_release_store(int32_t value,volatile int32_t * addr)184 void android_atomic_release_store(int32_t value, volatile int32_t* addr)
185 {
186   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
187   atomic_store_explicit(a, value, std::memory_order_release);
188 }
189 
190 /*
191  * Compare-and-set operation with "acquire" or "release" ordering.
192  *
193  * This returns zero if the new value was successfully stored, which will
194  * only happen when *addr == oldvalue.
195  *
196  * (The return value is inverted from implementations on other platforms,
197  * but matches the ARM ldrex/strex result.)
198  *
199  * Implementations that use the release CAS in a loop may be less efficient
200  * than possible, because we re-issue the memory barrier on each iteration.
201  */
202 ANDROID_ATOMIC_INLINE
android_atomic_acquire_cas(int32_t oldvalue,int32_t newvalue,volatile int32_t * addr)203 int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue,
204                            volatile int32_t* addr)
205 {
206   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
207   return !atomic_compare_exchange_strong_explicit(a, &oldvalue, newvalue,
208                                                   std::memory_order_acquire,
209                                                   std::memory_order_acquire);
210 }
211 
212 ANDROID_ATOMIC_INLINE
android_atomic_release_cas(int32_t oldvalue,int32_t newvalue,volatile int32_t * addr)213 int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue,
214                                volatile int32_t* addr)
215 {
216   volatile std::atomic_int_least32_t* a = to_atomic_int_least32_t(addr);
217   return !atomic_compare_exchange_strong_explicit(a, &oldvalue, newvalue,
218                                                   std::memory_order_release,
219                                                   std::memory_order_relaxed);
220 }
221 
222 /*
223  * Fence primitives.
224  */
225 ANDROID_ATOMIC_INLINE
android_compiler_barrier(void)226 void android_compiler_barrier(void)
227 {
228     __asm__ __volatile__ ("" : : : "memory");
229     /* Could probably also be:                          */
230     /* atomic_signal_fence(std::memory_order_seq_cst);       */
231 }
232 
233 ANDROID_ATOMIC_INLINE
android_memory_barrier(void)234 void android_memory_barrier(void)
235 {
236   atomic_thread_fence(std::memory_order_seq_cst);
237 }
238 
239 /*
240  * Aliases for code using an older version of this header.  These are now
241  * deprecated and should not be used.  The definitions will be removed
242  * in a future release.
243  */
244 #define android_atomic_write android_atomic_release_store
245 #define android_atomic_cmpxchg android_atomic_release_cas
246 
247 #endif // ANDROID_CUTILS_ATOMIC_H
248