1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkAtomics_DEFINED
9 #define SkAtomics_DEFINED
10
11 // This file is not part of the public Skia API.
12 #include "SkTypes.h"
13 #include <atomic>
14
15 // ~~~~~~~~ APIs ~~~~~~~~~
16
17 enum sk_memory_order {
18 sk_memory_order_relaxed,
19 sk_memory_order_consume,
20 sk_memory_order_acquire,
21 sk_memory_order_release,
22 sk_memory_order_acq_rel,
23 sk_memory_order_seq_cst,
24 };
25
26 template <typename T>
27 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
28
29 template <typename T>
30 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
31
32 template <typename T>
33 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
34
35 template <typename T>
36 T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
37
38 template <typename T>
39 bool sk_atomic_compare_exchange(T*, T* expected, T desired,
40 sk_memory_order success = sk_memory_order_seq_cst,
41 sk_memory_order failure = sk_memory_order_seq_cst);
42
43 template <typename T>
44 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
45
46 // A little wrapper class for small T (think, builtins: int, float, void*) to
47 // ensure they're always used atomically. This is our stand-in for std::atomic<T>.
48 // !!! Please _really_ know what you're doing if you change default_memory_order. !!!
49 template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
50 class SkAtomic : SkNoncopyable {
51 public:
SkAtomic()52 SkAtomic() {}
SkAtomic(const T & val)53 explicit SkAtomic(const T& val) : fVal(val) {}
54
55 // It is essential we return by value rather than by const&. fVal may change at any time.
56 T load(sk_memory_order mo = default_memory_order) const {
57 return sk_atomic_load(&fVal, mo);
58 }
59
60 void store(const T& val, sk_memory_order mo = default_memory_order) {
61 sk_atomic_store(&fVal, val, mo);
62 }
63
64 // Alias for .load(default_memory_order).
T()65 operator T() const {
66 return this->load();
67 }
68
69 // Alias for .store(v, default_memory_order).
70 T operator=(const T& v) {
71 this->store(v);
72 return v;
73 }
74
75 T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
76 return sk_atomic_fetch_add(&fVal, val, mo);
77 }
78
79 T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
80 return sk_atomic_fetch_sub(&fVal, val, mo);
81 }
82
83 bool compare_exchange(T* expected, const T& desired,
84 sk_memory_order success = default_memory_order,
85 sk_memory_order failure = default_memory_order) {
86 return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
87 }
88 private:
89 T fVal;
90 };
91
92 // ~~~~~~~~ Implementations ~~~~~~~~~
93
94 template <typename T>
sk_atomic_load(const T * ptr,sk_memory_order mo)95 T sk_atomic_load(const T* ptr, sk_memory_order mo) {
96 SkASSERT(mo == sk_memory_order_relaxed ||
97 mo == sk_memory_order_seq_cst ||
98 mo == sk_memory_order_acquire ||
99 mo == sk_memory_order_consume);
100 const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
101 return std::atomic_load_explicit(ap, (std::memory_order)mo);
102 }
103
104 template <typename T>
sk_atomic_store(T * ptr,T val,sk_memory_order mo)105 void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
106 SkASSERT(mo == sk_memory_order_relaxed ||
107 mo == sk_memory_order_seq_cst ||
108 mo == sk_memory_order_release);
109 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
110 return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
111 }
112
113 template <typename T>
sk_atomic_fetch_add(T * ptr,T val,sk_memory_order mo)114 T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
115 // All values of mo are valid.
116 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
117 return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
118 }
119
120 template <typename T>
sk_atomic_fetch_sub(T * ptr,T val,sk_memory_order mo)121 T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
122 // All values of mo are valid.
123 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
124 return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
125 }
126
127 template <typename T>
sk_atomic_compare_exchange(T * ptr,T * expected,T desired,sk_memory_order success,sk_memory_order failure)128 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
129 sk_memory_order success,
130 sk_memory_order failure) {
131 // All values of success are valid.
132 SkASSERT(failure == sk_memory_order_relaxed ||
133 failure == sk_memory_order_seq_cst ||
134 failure == sk_memory_order_acquire ||
135 failure == sk_memory_order_consume);
136 SkASSERT(failure <= success);
137 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
138 return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
139 (std::memory_order)success,
140 (std::memory_order)failure);
141 }
142
143 template <typename T>
sk_atomic_exchange(T * ptr,T val,sk_memory_order mo)144 T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) {
145 // All values of mo are valid.
146 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
147 return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo);
148 }
149
150 // ~~~~~~~~ Legacy APIs ~~~~~~~~~
151
152 // From here down we have shims for our old atomics API, to be weaned off of.
153 // We use the default sequentially-consistent memory order to make things simple
154 // and to match the practical reality of our old _sync and _win implementations.
155
sk_atomic_inc(int32_t * ptr)156 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
sk_atomic_dec(int32_t * ptr)157 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
sk_atomic_add(int32_t * ptr,int32_t v)158 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr, v); }
159
sk_atomic_inc(int64_t * ptr)160 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
161
sk_atomic_cas(int32_t * ptr,int32_t expected,int32_t desired)162 inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
163 return sk_atomic_compare_exchange(ptr, &expected, desired);
164 }
165
sk_atomic_cas(void ** ptr,void * expected,void * desired)166 inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
167 (void)sk_atomic_compare_exchange(ptr, &expected, desired);
168 return expected;
169 }
170
sk_atomic_conditional_inc(int32_t * ptr)171 inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
172 int32_t prev = sk_atomic_load(ptr);
173 do {
174 if (0 == prev) {
175 break;
176 }
177 } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
178 return prev;
179 }
180
181 template <typename T>
sk_acquire_load(T * ptr)182 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
183
184 template <typename T>
sk_release_store(T * ptr,T val)185 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
186
sk_membar_acquire__after_atomic_dec()187 inline void sk_membar_acquire__after_atomic_dec() {}
sk_membar_acquire__after_atomic_conditional_inc()188 inline void sk_membar_acquire__after_atomic_conditional_inc() {}
189
190 #endif//SkAtomics_DEFINED
191