1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7
8 use bytemuck::NoUninit;
9
10 #[cfg(feature = "fallback")]
11 use crate::fallback;
12 use core::cmp;
13 use core::mem;
14 use core::num::Wrapping;
15 use core::ops;
16 use core::sync::atomic::Ordering;
17
18 macro_rules! match_atomic {
19 ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
20 match mem::size_of::<$type>() {
21 #[cfg(target_has_atomic = "8")]
22 1 if mem::align_of::<$type>() >= 1 => {
23 type $atomic = core::sync::atomic::AtomicU8;
24
25 $impl
26 }
27 #[cfg(target_has_atomic = "16")]
28 2 if mem::align_of::<$type>() >= 2 => {
29 type $atomic = core::sync::atomic::AtomicU16;
30
31 $impl
32 }
33 #[cfg(target_has_atomic = "32")]
34 4 if mem::align_of::<$type>() >= 4 => {
35 type $atomic = core::sync::atomic::AtomicU32;
36
37 $impl
38 }
39 #[cfg(target_has_atomic = "64")]
40 8 if mem::align_of::<$type>() >= 8 => {
41 type $atomic = core::sync::atomic::AtomicU64;
42
43 $impl
44 }
45 #[cfg(all(feature = "nightly", target_has_atomic = "128"))]
46 16 if mem::align_of::<$type>() >= 16 => {
47 type $atomic = core::sync::atomic::AtomicU128;
48
49 $impl
50 }
51 #[cfg(feature = "fallback")]
52 _ => $fallback_impl,
53 #[cfg(not(feature = "fallback"))]
54 _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
55 }
56 };
57 }
58
59 macro_rules! match_signed_atomic {
60 ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
61 match mem::size_of::<$type>() {
62 #[cfg(target_has_atomic = "8")]
63 1 if mem::align_of::<$type>() >= 1 => {
64 type $atomic = core::sync::atomic::AtomicI8;
65
66 $impl
67 }
68 #[cfg(target_has_atomic = "16")]
69 2 if mem::align_of::<$type>() >= 2 => {
70 type $atomic = core::sync::atomic::AtomicI16;
71
72 $impl
73 }
74 #[cfg(target_has_atomic = "32")]
75 4 if mem::align_of::<$type>() >= 4 => {
76 type $atomic = core::sync::atomic::AtomicI32;
77
78 $impl
79 }
80 #[cfg(target_has_atomic = "64")]
81 8 if mem::align_of::<$type>() >= 8 => {
82 type $atomic = core::sync::atomic::AtomicI64;
83
84 $impl
85 }
86 #[cfg(all(feature = "nightly", target_has_atomic = "128"))]
87 16 if mem::align_of::<$type>() >= 16 => {
88 type $atomic = core::sync::atomic::AtomicI128;
89
90 $impl
91 }
92 #[cfg(feature = "fallback")]
93 _ => $fallback_impl,
94 #[cfg(not(feature = "fallback"))]
95 _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
96 }
97 };
98 }
99
100 #[inline]
atomic_is_lock_free<T>() -> bool101 pub const fn atomic_is_lock_free<T>() -> bool {
102 let size = mem::size_of::<T>();
103 let align = mem::align_of::<T>();
104
105 (cfg!(target_has_atomic = "8") & (size == 1) & (align >= 1))
106 | (cfg!(target_has_atomic = "16") & (size == 2) & (align >= 2))
107 | (cfg!(target_has_atomic = "32") & (size == 4) & (align >= 4))
108 | (cfg!(target_has_atomic = "64") & (size == 8) & (align >= 8))
109 | (cfg!(feature = "nightly")
110 & cfg!(target_has_atomic = "128")
111 & (size == 16)
112 & (align >= 16))
113 }
114
115 #[inline]
atomic_load<T: NoUninit>(dst: *mut T, order: Ordering) -> T116 pub unsafe fn atomic_load<T: NoUninit>(dst: *mut T, order: Ordering) -> T {
117 match_atomic!(
118 T,
119 A,
120 mem::transmute_copy(&(*(dst as *const A)).load(order)),
121 fallback::atomic_load(dst)
122 )
123 }
124
125 #[inline]
atomic_store<T: NoUninit>(dst: *mut T, val: T, order: Ordering)126 pub unsafe fn atomic_store<T: NoUninit>(dst: *mut T, val: T, order: Ordering) {
127 match_atomic!(
128 T,
129 A,
130 (*(dst as *const A)).store(mem::transmute_copy(&val), order),
131 fallback::atomic_store(dst, val)
132 )
133 }
134
135 #[inline]
atomic_swap<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T136 pub unsafe fn atomic_swap<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T {
137 match_atomic!(
138 T,
139 A,
140 mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)),
141 fallback::atomic_swap(dst, val)
142 )
143 }
144
145 #[inline]
map_result<T, U>(r: Result<T, T>) -> Result<U, U>146 unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
147 match r {
148 Ok(x) => Ok(mem::transmute_copy(&x)),
149 Err(x) => Err(mem::transmute_copy(&x)),
150 }
151 }
152
153 #[inline]
atomic_compare_exchange<T: NoUninit>( dst: *mut T, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>154 pub unsafe fn atomic_compare_exchange<T: NoUninit>(
155 dst: *mut T,
156 current: T,
157 new: T,
158 success: Ordering,
159 failure: Ordering,
160 ) -> Result<T, T> {
161 match_atomic!(
162 T,
163 A,
164 map_result((*(dst as *const A)).compare_exchange(
165 mem::transmute_copy(¤t),
166 mem::transmute_copy(&new),
167 success,
168 failure,
169 )),
170 fallback::atomic_compare_exchange(dst, current, new)
171 )
172 }
173
174 #[inline]
atomic_compare_exchange_weak<T: NoUninit>( dst: *mut T, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>175 pub unsafe fn atomic_compare_exchange_weak<T: NoUninit>(
176 dst: *mut T,
177 current: T,
178 new: T,
179 success: Ordering,
180 failure: Ordering,
181 ) -> Result<T, T> {
182 match_atomic!(
183 T,
184 A,
185 map_result((*(dst as *const A)).compare_exchange_weak(
186 mem::transmute_copy(¤t),
187 mem::transmute_copy(&new),
188 success,
189 failure,
190 )),
191 fallback::atomic_compare_exchange(dst, current, new)
192 )
193 }
194
195 #[inline]
atomic_add<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T where Wrapping<T>: ops::Add<Output = Wrapping<T>>,196 pub unsafe fn atomic_add<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T
197 where
198 Wrapping<T>: ops::Add<Output = Wrapping<T>>,
199 {
200 match_atomic!(
201 T,
202 A,
203 mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),),
204 fallback::atomic_add(dst, val)
205 )
206 }
207
208 #[inline]
atomic_sub<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T where Wrapping<T>: ops::Sub<Output = Wrapping<T>>,209 pub unsafe fn atomic_sub<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T
210 where
211 Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
212 {
213 match_atomic!(
214 T,
215 A,
216 mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),),
217 fallback::atomic_sub(dst, val)
218 )
219 }
220
221 #[inline]
atomic_and<T: NoUninit + ops::BitAnd<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T222 pub unsafe fn atomic_and<T: NoUninit + ops::BitAnd<Output = T>>(
223 dst: *mut T,
224 val: T,
225 order: Ordering,
226 ) -> T {
227 match_atomic!(
228 T,
229 A,
230 mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),),
231 fallback::atomic_and(dst, val)
232 )
233 }
234
235 #[inline]
atomic_or<T: NoUninit + ops::BitOr<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T236 pub unsafe fn atomic_or<T: NoUninit + ops::BitOr<Output = T>>(
237 dst: *mut T,
238 val: T,
239 order: Ordering,
240 ) -> T {
241 match_atomic!(
242 T,
243 A,
244 mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),),
245 fallback::atomic_or(dst, val)
246 )
247 }
248
249 #[inline]
atomic_xor<T: NoUninit + ops::BitXor<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T250 pub unsafe fn atomic_xor<T: NoUninit + ops::BitXor<Output = T>>(
251 dst: *mut T,
252 val: T,
253 order: Ordering,
254 ) -> T {
255 match_atomic!(
256 T,
257 A,
258 mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),),
259 fallback::atomic_xor(dst, val)
260 )
261 }
262
263 #[inline]
atomic_min<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T264 pub unsafe fn atomic_min<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
265 match_signed_atomic!(
266 T,
267 A,
268 mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
269 fallback::atomic_min(dst, val)
270 )
271 }
272
273 #[inline]
atomic_max<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T274 pub unsafe fn atomic_max<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
275 match_signed_atomic!(
276 T,
277 A,
278 mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
279 fallback::atomic_max(dst, val)
280 )
281 }
282
283 #[inline]
atomic_umin<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T284 pub unsafe fn atomic_umin<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
285 match_atomic!(
286 T,
287 A,
288 mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
289 fallback::atomic_min(dst, val)
290 )
291 }
292
293 #[inline]
atomic_umax<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T294 pub unsafe fn atomic_umax<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
295 match_atomic!(
296 T,
297 A,
298 mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
299 fallback::atomic_max(dst, val)
300 )
301 }
302