• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2 
3 macro_rules! atomic64 {
4     ($atomic_type:ident, $int_type:ident, $atomic_max:ident, $atomic_min:ident) => {
5         #[repr(C, align(8))]
6         pub(crate) struct $atomic_type {
7             v: core::cell::UnsafeCell<$int_type>,
8         }
9 
10         // Send is implicitly implemented.
11         // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock.
12         unsafe impl Sync for $atomic_type {}
13 
14         impl_default_no_fetch_ops!($atomic_type, $int_type);
15         impl_default_bit_opts!($atomic_type, $int_type);
16         impl $atomic_type {
17             #[inline]
18             pub(crate) const fn new(v: $int_type) -> Self {
19                 Self { v: core::cell::UnsafeCell::new(v) }
20             }
21 
22             #[inline]
23             pub(crate) fn is_lock_free() -> bool {
24                 is_lock_free()
25             }
26             pub(crate) const IS_ALWAYS_LOCK_FREE: bool = IS_ALWAYS_LOCK_FREE;
27 
28             #[inline]
29             pub(crate) fn get_mut(&mut self) -> &mut $int_type {
30                 // SAFETY: the mutable reference guarantees unique ownership.
31                 // (UnsafeCell::get_mut requires Rust 1.50)
32                 unsafe { &mut *self.v.get() }
33             }
34 
35             #[inline]
36             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
37             pub(crate) fn load(&self, order: Ordering) -> $int_type {
38                 crate::utils::assert_load_ordering(order);
39                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
40                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
41                 // and the raw pointer passed in is valid because we got it from a reference.
42                 unsafe {
43                     atomic_load(self.v.get().cast::<u64>(), order) as $int_type
44                 }
45             }
46 
47             #[inline]
48             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
49             pub(crate) fn store(&self, val: $int_type, order: Ordering) {
50                 crate::utils::assert_store_ordering(order);
51                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
52                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
53                 // and the raw pointer passed in is valid because we got it from a reference.
54                 unsafe {
55                     atomic_store(self.v.get().cast::<u64>(), val as u64, order)
56                 }
57             }
58 
59             #[inline]
60             pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
61                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
62                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
63                 // and the raw pointer passed in is valid because we got it from a reference.
64                 unsafe {
65                     atomic_swap(self.v.get().cast::<u64>(), val as u64, order) as $int_type
66                 }
67             }
68 
69             #[inline]
70             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
71             pub(crate) fn compare_exchange(
72                 &self,
73                 current: $int_type,
74                 new: $int_type,
75                 success: Ordering,
76                 failure: Ordering,
77             ) -> Result<$int_type, $int_type> {
78                 crate::utils::assert_compare_exchange_ordering(success, failure);
79                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
80                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
81                 // and the raw pointer passed in is valid because we got it from a reference.
82                 unsafe {
83                     match atomic_compare_exchange(
84                         self.v.get().cast::<u64>(),
85                         current as u64,
86                         new as u64,
87                         success,
88                         failure,
89                     ) {
90                         Ok(v) => Ok(v as $int_type),
91                         Err(v) => Err(v as $int_type),
92                     }
93                 }
94             }
95 
96             #[inline]
97             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
98             pub(crate) fn compare_exchange_weak(
99                 &self,
100                 current: $int_type,
101                 new: $int_type,
102                 success: Ordering,
103                 failure: Ordering,
104             ) -> Result<$int_type, $int_type> {
105                 crate::utils::assert_compare_exchange_ordering(success, failure);
106                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
107                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
108                 // and the raw pointer passed in is valid because we got it from a reference.
109                 unsafe {
110                     match atomic_compare_exchange_weak(
111                         self.v.get().cast::<u64>(),
112                         current as u64,
113                         new as u64,
114                         success,
115                         failure,
116                     ) {
117                         Ok(v) => Ok(v as $int_type),
118                         Err(v) => Err(v as $int_type),
119                     }
120                 }
121             }
122 
123             #[inline]
124             pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
125                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
126                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
127                 // and the raw pointer passed in is valid because we got it from a reference.
128                 unsafe {
129                     atomic_add(self.v.get().cast::<u64>(), val as u64, order) as $int_type
130                 }
131             }
132 
133             #[inline]
134             pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
135                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
136                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
137                 // and the raw pointer passed in is valid because we got it from a reference.
138                 unsafe {
139                     atomic_sub(self.v.get().cast::<u64>(), val as u64, order) as $int_type
140                 }
141             }
142 
143             #[inline]
144             pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
145                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
146                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
147                 // and the raw pointer passed in is valid because we got it from a reference.
148                 unsafe {
149                     atomic_and(self.v.get().cast::<u64>(), val as u64, order) as $int_type
150                 }
151             }
152 
153             #[inline]
154             pub(crate) fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
155                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
156                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
157                 // and the raw pointer passed in is valid because we got it from a reference.
158                 unsafe {
159                     atomic_nand(self.v.get().cast::<u64>(), val as u64, order) as $int_type
160                 }
161             }
162 
163             #[inline]
164             pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
165                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
166                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
167                 // and the raw pointer passed in is valid because we got it from a reference.
168                 unsafe {
169                     atomic_or(self.v.get().cast::<u64>(), val as u64, order) as $int_type
170                 }
171             }
172 
173             #[inline]
174             pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
175                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
176                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
177                 // and the raw pointer passed in is valid because we got it from a reference.
178                 unsafe {
179                     atomic_xor(self.v.get().cast::<u64>(), val as u64, order) as $int_type
180                 }
181             }
182 
183             #[inline]
184             pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
185                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
186                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
187                 // and the raw pointer passed in is valid because we got it from a reference.
188                 unsafe {
189                     $atomic_max(self.v.get().cast::<u64>(), val as u64, order) as $int_type
190                 }
191             }
192 
193             #[inline]
194             pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
195                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
196                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
197                 // and the raw pointer passed in is valid because we got it from a reference.
198                 unsafe {
199                     $atomic_min(self.v.get().cast::<u64>(), val as u64, order) as $int_type
200                 }
201             }
202 
203             #[inline]
204             pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
205                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
206                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
207                 // and the raw pointer passed in is valid because we got it from a reference.
208                 unsafe {
209                     atomic_not(self.v.get().cast::<u64>(), order) as $int_type
210                 }
211             }
212             #[inline]
213             pub(crate) fn not(&self, order: Ordering) {
214                 self.fetch_not(order);
215             }
216 
217             #[inline]
218             pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
219                 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
220                 // SAFETY: any data races are prevented by atomic intrinsics, the kernel user helper, or the lock
221                 // and the raw pointer passed in is valid because we got it from a reference.
222                 unsafe {
223                     atomic_neg(self.v.get().cast::<u64>(), order) as $int_type
224                 }
225             }
226             #[inline]
227             pub(crate) fn neg(&self, order: Ordering) {
228                 self.fetch_neg(order);
229             }
230 
231             #[inline]
232             pub(crate) const fn as_ptr(&self) -> *mut $int_type {
233                 self.v.get()
234             }
235         }
236     };
237 }
238