• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 #[cfg(feature = "fallback")]
9 use crate::fallback;
10 use core::cmp;
11 use core::mem;
12 use core::num::Wrapping;
13 use core::ops;
14 use core::sync::atomic::Ordering;
15 
16 macro_rules! match_atomic {
17     ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
18         match mem::size_of::<$type>() {
19             #[cfg(has_atomic_u8)]
20             1 if mem::align_of::<$type>() >= 1 => {
21                 type $atomic = core::sync::atomic::AtomicU8;
22 
23                 $impl
24             }
25             #[cfg(has_atomic_u16)]
26             2 if mem::align_of::<$type>() >= 2 => {
27                 type $atomic = core::sync::atomic::AtomicU16;
28 
29                 $impl
30             }
31             #[cfg(has_atomic_u32)]
32             4 if mem::align_of::<$type>() >= 4 => {
33                 type $atomic = core::sync::atomic::AtomicU32;
34 
35                 $impl
36             }
37             #[cfg(has_atomic_u64)]
38             8 if mem::align_of::<$type>() >= 8 => {
39                 type $atomic = core::sync::atomic::AtomicU64;
40 
41                 $impl
42             }
43             #[cfg(has_atomic_u128)]
44             16 if mem::align_of::<$type>() >= 16 => {
45                 type $atomic = core::sync::atomic::AtomicU128;
46 
47                 $impl
48             }
49             #[cfg(feature = "fallback")]
50             _ => $fallback_impl,
51             #[cfg(not(feature = "fallback"))]
52             _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
53         }
54     };
55 }
56 
57 macro_rules! match_signed_atomic {
58     ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {
59         match mem::size_of::<$type>() {
60             #[cfg(has_atomic_i8)]
61             1 if mem::align_of::<$type>() >= 1 => {
62                 type $atomic = core::sync::atomic::AtomicI8;
63 
64                 $impl
65             }
66             #[cfg(has_atomic_i16)]
67             2 if mem::align_of::<$type>() >= 2 => {
68                 type $atomic = core::sync::atomic::AtomicI16;
69 
70                 $impl
71             }
72             #[cfg(has_atomic_i32)]
73             4 if mem::align_of::<$type>() >= 4 => {
74                 type $atomic = core::sync::atomic::AtomicI32;
75 
76                 $impl
77             }
78             #[cfg(has_atomic_i64)]
79             8 if mem::align_of::<$type>() >= 8 => {
80                 type $atomic = core::sync::atomic::AtomicI64;
81 
82                 $impl
83             }
84             #[cfg(has_atomic_u128)]
85             16 if mem::align_of::<$type>() >= 16 => {
86                 type $atomic = core::sync::atomic::AtomicI128;
87 
88                 $impl
89             }
90             #[cfg(feature = "fallback")]
91             _ => $fallback_impl,
92             #[cfg(not(feature = "fallback"))]
93             _ => panic!("Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.", core::any::type_name::<$type>()),
94         }
95     };
96 }
97 
98 #[inline]
atomic_is_lock_free<T>() -> bool99 pub const fn atomic_is_lock_free<T>() -> bool {
100     let size = mem::size_of::<T>();
101     let align = mem::align_of::<T>();
102 
103     (cfg!(has_atomic_u8) & (size == 1) & (align >= 1))
104         | (cfg!(has_atomic_u16) & (size == 2) & (align >= 2))
105         | (cfg!(has_atomic_u32) & (size == 4) & (align >= 4))
106         | (cfg!(has_atomic_u64) & (size == 8) & (align >= 8))
107         | (cfg!(has_atomic_u128) & (size == 16) & (align >= 16))
108 }
109 
110 #[inline]
atomic_load<T>(dst: *mut T, order: Ordering) -> T111 pub unsafe fn atomic_load<T>(dst: *mut T, order: Ordering) -> T {
112     match_atomic!(
113         T,
114         A,
115         mem::transmute_copy(&(*(dst as *const A)).load(order)),
116         fallback::atomic_load(dst)
117     )
118 }
119 
120 #[inline]
atomic_store<T>(dst: *mut T, val: T, order: Ordering)121 pub unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
122     match_atomic!(
123         T,
124         A,
125         (*(dst as *const A)).store(mem::transmute_copy(&val), order),
126         fallback::atomic_store(dst, val)
127     )
128 }
129 
130 #[inline]
atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T131 pub unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
132     match_atomic!(
133         T,
134         A,
135         mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)),
136         fallback::atomic_swap(dst, val)
137     )
138 }
139 
140 #[inline]
map_result<T, U>(r: Result<T, T>) -> Result<U, U>141 unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
142     match r {
143         Ok(x) => Ok(mem::transmute_copy(&x)),
144         Err(x) => Err(mem::transmute_copy(&x)),
145     }
146 }
147 
148 #[inline]
atomic_compare_exchange<T>( dst: *mut T, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>149 pub unsafe fn atomic_compare_exchange<T>(
150     dst: *mut T,
151     current: T,
152     new: T,
153     success: Ordering,
154     failure: Ordering,
155 ) -> Result<T, T> {
156     match_atomic!(
157         T,
158         A,
159         map_result((*(dst as *const A)).compare_exchange(
160             mem::transmute_copy(&current),
161             mem::transmute_copy(&new),
162             success,
163             failure,
164         )),
165         fallback::atomic_compare_exchange(dst, current, new)
166     )
167 }
168 
169 #[inline]
atomic_compare_exchange_weak<T>( dst: *mut T, current: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T>170 pub unsafe fn atomic_compare_exchange_weak<T>(
171     dst: *mut T,
172     current: T,
173     new: T,
174     success: Ordering,
175     failure: Ordering,
176 ) -> Result<T, T> {
177     match_atomic!(
178         T,
179         A,
180         map_result((*(dst as *const A)).compare_exchange_weak(
181             mem::transmute_copy(&current),
182             mem::transmute_copy(&new),
183             success,
184             failure,
185         )),
186         fallback::atomic_compare_exchange(dst, current, new)
187     )
188 }
189 
190 #[inline]
atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T where Wrapping<T>: ops::Add<Output = Wrapping<T>>,191 pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
192 where
193     Wrapping<T>: ops::Add<Output = Wrapping<T>>,
194 {
195     match_atomic!(
196         T,
197         A,
198         mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),),
199         fallback::atomic_add(dst, val)
200     )
201 }
202 
203 #[inline]
atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T where Wrapping<T>: ops::Sub<Output = Wrapping<T>>,204 pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
205 where
206     Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
207 {
208     match_atomic!(
209         T,
210         A,
211         mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),),
212         fallback::atomic_sub(dst, val)
213     )
214 }
215 
216 #[inline]
atomic_and<T: Copy + ops::BitAnd<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T217 pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(
218     dst: *mut T,
219     val: T,
220     order: Ordering,
221 ) -> T {
222     match_atomic!(
223         T,
224         A,
225         mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),),
226         fallback::atomic_and(dst, val)
227     )
228 }
229 
230 #[inline]
atomic_or<T: Copy + ops::BitOr<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T231 pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(
232     dst: *mut T,
233     val: T,
234     order: Ordering,
235 ) -> T {
236     match_atomic!(
237         T,
238         A,
239         mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),),
240         fallback::atomic_or(dst, val)
241     )
242 }
243 
244 #[inline]
atomic_xor<T: Copy + ops::BitXor<Output = T>>( dst: *mut T, val: T, order: Ordering, ) -> T245 pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(
246     dst: *mut T,
247     val: T,
248     order: Ordering,
249 ) -> T {
250     match_atomic!(
251         T,
252         A,
253         mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),),
254         fallback::atomic_xor(dst, val)
255     )
256 }
257 
258 #[inline]
atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T259 pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
260     match_signed_atomic!(
261         T,
262         A,
263         mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
264         fallback::atomic_min(dst, val)
265     )
266 }
267 
268 #[inline]
atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T269 pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
270     match_signed_atomic!(
271         T,
272         A,
273         mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
274         fallback::atomic_max(dst, val)
275     )
276 }
277 
278 #[inline]
atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T279 pub unsafe fn atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
280     match_atomic!(
281         T,
282         A,
283         mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),
284         fallback::atomic_min(dst, val)
285     )
286 }
287 
288 #[inline]
atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T289 pub unsafe fn atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
290     match_atomic!(
291         T,
292         A,
293         mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),
294         fallback::atomic_max(dst, val)
295     )
296 }
297