1 /* 2 * Copyright (C) 2007 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ANDROID_CUTILS_ATOMIC_H 18 #define ANDROID_CUTILS_ATOMIC_H 19 20 #include <stdint.h> 21 #include <sys/types.h> 22 23 #ifdef __cplusplus 24 extern "C" { 25 #endif 26 27 /* 28 * A handful of basic atomic operations. 29 * THESE ARE HERE FOR LEGACY REASONS ONLY. AVOID. 30 * 31 * PREFERRED ALTERNATIVES: 32 * - Use C++/C/pthread locks/mutexes whenever there is not a 33 * convincing reason to do otherwise. Note that very clever and 34 * complicated, but correct, lock-free code is often slower than 35 * using locks, especially where nontrivial data structures 36 * are involved. 37 * - C11 stdatomic.h. 38 * - Where supported, C++11 std::atomic<T> . 39 * 40 * PLEASE STOP READING HERE UNLESS YOU ARE TRYING TO UNDERSTAND 41 * OR UPDATE OLD CODE. 42 * 43 * The "acquire" and "release" terms can be defined intuitively in terms 44 * of the placement of memory barriers in a simple lock implementation: 45 * - wait until compare-and-swap(lock-is-free --> lock-is-held) succeeds 46 * - barrier 47 * - [do work] 48 * - barrier 49 * - store(lock-is-free) 50 * In very crude terms, the initial (acquire) barrier prevents any of the 51 * "work" from happening before the lock is held, and the later (release) 52 * barrier ensures that all of the work happens before the lock is released. 53 * (Think of cached writes, cache read-ahead, and instruction reordering 54 * around the CAS and store instructions.) 55 * 56 * The barriers must apply to both the compiler and the CPU. Note it is 57 * legal for instructions that occur before an "acquire" barrier to be 58 * moved down below it, and for instructions that occur after a "release" 59 * barrier to be moved up above it. 60 * 61 * The ARM-driven implementation we use here is short on subtlety, 62 * and actually requests a full barrier from the compiler and the CPU. 63 * The only difference between acquire and release is in whether they 64 * are issued before or after the atomic operation with which they 65 * are associated. To ease the transition to C/C++ atomic intrinsics, 66 * you should not rely on this, and instead assume that only the minimal 67 * acquire/release protection is provided. 68 * 69 * NOTE: all int32_t* values are expected to be aligned on 32-bit boundaries. 70 * If they are not, atomicity is not guaranteed. 71 */ 72 73 /* 74 * Basic arithmetic and bitwise operations. These all provide a 75 * barrier with "release" ordering, and return the previous value. 76 * 77 * These have the same characteristics (e.g. what happens on overflow) 78 * as the equivalent non-atomic C operations. 79 */ 80 int32_t android_atomic_inc(volatile int32_t* addr); 81 int32_t android_atomic_dec(volatile int32_t* addr); 82 int32_t android_atomic_add(int32_t value, volatile int32_t* addr); 83 int32_t android_atomic_and(int32_t value, volatile int32_t* addr); 84 int32_t android_atomic_or(int32_t value, volatile int32_t* addr); 85 86 /* 87 * Perform an atomic load with "acquire" or "release" ordering. 88 * 89 * Note that the notion of a "release" ordering for a load does not 90 * really fit into the C11 or C++11 memory model. The extra ordering 91 * is normally observable only by code using memory_order_relaxed 92 * atomics, or data races. In the rare cases in which such ordering 93 * is called for, use memory_order_relaxed atomics and a leading 94 * atomic_thread_fence (typically with memory_order_acquire, 95 * not memory_order_release!) instead. If you do not understand 96 * this comment, you are in the vast majority, and should not be 97 * using release loads or replacing them with anything other than 98 * locks or default sequentially consistent atomics. 99 * 100 * This is only necessary if you need the memory barrier. A 32-bit read 101 * from a 32-bit aligned address is atomic on all supported platforms. 102 */ 103 int32_t android_atomic_acquire_load(volatile const int32_t* addr); 104 int32_t android_atomic_release_load(volatile const int32_t* addr); 105 106 /* 107 * Perform an atomic store with "acquire" or "release" ordering. 108 * 109 * Note that the notion of a "acquire" ordering for a store does not 110 * really fit into the C11 or C++11 memory model. The extra ordering 111 * is normally observable only by code using memory_order_relaxed 112 * atomics, or data races. In the rare cases in which such ordering 113 * is called for, use memory_order_relaxed atomics and a trailing 114 * atomic_thread_fence (typically with memory_order_release, 115 * not memory_order_acquire!) instead. 116 * 117 * This is only necessary if you need the memory barrier. A 32-bit write 118 * to a 32-bit aligned address is atomic on all supported platforms. 119 */ 120 void android_atomic_acquire_store(int32_t value, volatile int32_t* addr); 121 void android_atomic_release_store(int32_t value, volatile int32_t* addr); 122 123 /* 124 * Compare-and-set operation with "acquire" or "release" ordering. 125 * 126 * This returns zero if the new value was successfully stored, which will 127 * only happen when *addr == oldvalue. 128 * 129 * (The return value is inverted from implementations on other platforms, 130 * but matches the ARM ldrex/strex result.) 131 * 132 * Implementations that use the release CAS in a loop may be less efficient 133 * than possible, because we re-issue the memory barrier on each iteration. 134 */ 135 int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue, 136 volatile int32_t* addr); 137 int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue, 138 volatile int32_t* addr); 139 140 /* 141 * Aliases for code using an older version of this header. These are now 142 * deprecated and should not be used. The definitions will be removed 143 * in a future release. 144 */ 145 #define android_atomic_write android_atomic_release_store 146 #define android_atomic_cmpxchg android_atomic_release_cas 147 148 #ifdef __cplusplus 149 } // extern "C" 150 #endif 151 152 #endif // ANDROID_CUTILS_ATOMIC_H 153