• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2 
3 /*
4 Atomic load/store implementation on RISC-V.
5 
6 This is for RISC-V targets without atomic CAS. (rustc doesn't provide atomics
7 at all on such targets. https://github.com/rust-lang/rust/pull/114499)
8 
9 Also, optionally provides RMW implementation when force-amo or Zaamo target feature is enabled.
10 
11 Refs:
12 - RISC-V Instruction Set Manual
13   https://github.com/riscv/riscv-isa-manual/tree/riscv-isa-release-8b9dc50-2024-08-30
14   "Zaamo" Extension for Atomic Memory Operations
15   https://github.com/riscv/riscv-isa-manual/blob/riscv-isa-release-8b9dc50-2024-08-30/src/a-st-ext.adoc#zaamo-extension-for-atomic-memory-operations
16   "Zabha" Extension for Byte and Halfword Atomic Memory Operations
17   https://github.com/riscv/riscv-isa-manual/blob/riscv-isa-release-8b9dc50-2024-08-30/src/zabha.adoc
18 - RISC-V Atomics ABI Specification
19   https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/draft-20240829-13bfa9f54634cb60d86b9b333e109f077805b4b3/riscv-atomic.adoc
20 - atomic-maybe-uninit https://github.com/taiki-e/atomic-maybe-uninit
21 
22 Generated asm:
23 - riscv64gc https://godbolt.org/z/q4fhcPEv4
24 - riscv64gc (+zabha) https://godbolt.org/z/hde3ao7hx
25 - riscv32imac https://godbolt.org/z/7PKMx5KK3
26 - riscv32imac (+zabha) https://godbolt.org/z/E1aTff9f7
27 */
28 
29 // TODO: Zacas extension
30 
31 #[cfg(not(portable_atomic_no_asm))]
32 use core::arch::asm;
33 use core::{cell::UnsafeCell, sync::atomic::Ordering};
34 
35 #[cfg(any(
36     test,
37     portable_atomic_force_amo,
38     target_feature = "zaamo",
39     portable_atomic_target_feature = "zaamo",
40 ))]
41 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
42 #[cfg(target_arch = "riscv32")]
43 macro_rules! w {
44     () => {
45         ""
46     };
47 }
48 #[cfg(any(
49     test,
50     portable_atomic_force_amo,
51     target_feature = "zaamo",
52     portable_atomic_target_feature = "zaamo",
53 ))]
54 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
55 #[cfg(target_arch = "riscv64")]
56 macro_rules! w {
57     () => {
58         "w"
59     };
60 }
61 
62 #[cfg(any(
63     test,
64     portable_atomic_force_amo,
65     target_feature = "zaamo",
66     portable_atomic_target_feature = "zaamo",
67 ))]
68 macro_rules! atomic_rmw_amo_ext {
69     ("w") => {
70         "+a"
71     };
72     ("d") => {
73         "+a"
74     };
75     ("b") => {
76         "+a,+zabha"
77     };
78     ("h") => {
79         "+a,+zabha"
80     };
81 }
82 #[cfg(any(
83     test,
84     portable_atomic_force_amo,
85     target_feature = "zaamo",
86     portable_atomic_target_feature = "zaamo",
87 ))]
88 macro_rules! atomic_rmw_amo {
89     ($op:ident, $dst:ident, $val:ident, $order:ident, $asm_suffix:tt) => {{
90         let out;
91         macro_rules! op {
92             ($asm_order:tt) => {
93                 // SAFETY: The user guaranteed that the AMO instruction is available in this
94                 // system by setting the portable_atomic_force_amo/target_feature and
95                 // portable_atomic_unsafe_assume_single_core.
96                 // The caller of this macro must guarantee the validity of the pointer.
97                 asm!(
98                     ".option push",
99                     // https://github.com/riscv-non-isa/riscv-asm-manual/blob/ad0de8c004e29c9a7ac33cfd054f4d4f9392f2fb/src/asm-manual.adoc#arch
100                     // LLVM supports `.option arch` directive on LLVM 17+, so use .insn directive on old LLVM.
101                     // https://github.com/llvm/llvm-project/commit/9e8ed3403c191ab9c4903e8eeb8f732ff8a43cb4
102                     // Note that `.insn <value>` directive requires LLVM 19.
103                     // https://github.com/llvm/llvm-project/commit/2a086dce691e3cc34a2fc27f4fb255bb2cbbfac9
104                     concat!(".option arch, ", atomic_rmw_amo_ext!($asm_suffix)),
105                     concat!("amo", stringify!($op), ".", $asm_suffix, $asm_order, " {out}, {val}, 0({dst})"),
106                     ".option pop",
107                     dst = in(reg) ptr_reg!($dst),
108                     val = in(reg) $val,
109                     out = lateout(reg) out,
110                     options(nostack, preserves_flags),
111                 )
112             };
113         }
114         match $order {
115             Ordering::Relaxed => op!(""),
116             Ordering::Acquire => op!(".aq"),
117             Ordering::Release => op!(".rl"),
118             // AcqRel and SeqCst RMWs are equivalent.
119             Ordering::AcqRel | Ordering::SeqCst => op!(".aqrl"),
120             _ => unreachable!(),
121         }
122         out
123     }};
124 }
125 // 32-bit val.wrapping_shl(shift) but no extra `& (u32::BITS - 1)`
126 #[cfg(any(
127     test,
128     portable_atomic_force_amo,
129     target_feature = "zaamo",
130     portable_atomic_target_feature = "zaamo",
131 ))]
132 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
133 #[inline(always)]
sllw(val: u32, shift: u32) -> u32134 fn sllw(val: u32, shift: u32) -> u32 {
135     // SAFETY: Calling sll{,w} is safe.
136     unsafe {
137         let out;
138         asm!(
139             concat!("sll", w!(), " {out}, {val}, {shift}"),
140             out = lateout(reg) out,
141             val = in(reg) val,
142             shift = in(reg) shift,
143             options(pure, nomem, nostack, preserves_flags),
144         );
145         out
146     }
147 }
148 // 32-bit val.wrapping_shr(shift) but no extra `& (u32::BITS - 1)`
149 #[cfg(any(
150     test,
151     portable_atomic_force_amo,
152     target_feature = "zaamo",
153     portable_atomic_target_feature = "zaamo",
154 ))]
155 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
156 macro_rules! srlw {
157     ($val:expr, $shift:expr) => {
158         // SAFETY: Calling srl{,w} is safe.
159         unsafe {
160             let val: u32 = $val;
161             let shift: u32 = $shift;
162             let out;
163             asm!(
164                 concat!("srl", w!(), " {out}, {val}, {shift}"),
165                 out = lateout(reg) out,
166                 val = in(reg) val,
167                 shift = in(reg) shift,
168                 options(pure, nomem, nostack, preserves_flags),
169             );
170             out
171         }
172     };
173 }
174 
175 macro_rules! atomic_load_store {
176     ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
177         #[repr(transparent)]
178         pub(crate) struct $atomic_type $(<$($generics)*>)? {
179             v: UnsafeCell<$value_type>,
180         }
181 
182         // Send is implicitly implemented for atomic integers, but not for atomic pointers.
183         // SAFETY: any data races are prevented by atomic operations.
184         unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {}
185         // SAFETY: any data races are prevented by atomic operations.
186         unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {}
187 
188         #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))]
189         impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
190             #[inline]
191             pub(crate) const fn new(v: $value_type) -> Self {
192                 Self { v: UnsafeCell::new(v) }
193             }
194 
195             #[inline]
196             pub(crate) fn is_lock_free() -> bool {
197                 Self::IS_ALWAYS_LOCK_FREE
198             }
199             pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true;
200 
201             #[inline]
202             pub(crate) fn get_mut(&mut self) -> &mut $value_type {
203                 // SAFETY: the mutable reference guarantees unique ownership.
204                 // (UnsafeCell::get_mut requires Rust 1.50)
205                 unsafe { &mut *self.v.get() }
206             }
207 
208             #[inline]
209             pub(crate) const fn as_ptr(&self) -> *mut $value_type {
210                 self.v.get()
211             }
212         }
213         impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
214             #[inline]
215             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
216             pub(crate) fn load(&self, order: Ordering) -> $value_type {
217                 crate::utils::assert_load_ordering(order);
218                 let src = self.v.get();
219                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
220                 // pointer passed in is valid because we got it from a reference.
221                 unsafe {
222                     let out;
223                     macro_rules! atomic_load {
224                         ($acquire:tt, $release:tt) => {
225                             asm!(
226                                 $release,
227                                 concat!("l", $asm_suffix, " {out}, 0({src})"),
228                                 $acquire,
229                                 src = in(reg) ptr_reg!(src),
230                                 out = lateout(reg) out,
231                                 options(nostack, preserves_flags),
232                             )
233                         };
234                     }
235                     match order {
236                         Ordering::Relaxed => atomic_load!("", ""),
237                         Ordering::Acquire => atomic_load!("fence r, rw", ""),
238                         Ordering::SeqCst => atomic_load!("fence r, rw", "fence rw, rw"),
239                         _ => unreachable!(),
240                     }
241                     out
242                 }
243             }
244 
245             #[inline]
246             #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
247             pub(crate) fn store(&self, val: $value_type, order: Ordering) {
248                 crate::utils::assert_store_ordering(order);
249                 let dst = self.v.get();
250                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
251                 // pointer passed in is valid because we got it from a reference.
252                 unsafe {
253                     macro_rules! atomic_store {
254                         ($acquire:tt, $release:tt) => {
255                             asm!(
256                                 $release,
257                                 concat!("s", $asm_suffix, " {val}, 0({dst})"),
258                                 $acquire,
259                                 dst = in(reg) ptr_reg!(dst),
260                                 val = in(reg) val,
261                                 options(nostack, preserves_flags),
262                             )
263                         };
264                     }
265                     match order {
266                         Ordering::Relaxed => atomic_store!("", ""),
267                         Ordering::Release => atomic_store!("", "fence rw, w"),
268                         // https://github.com/llvm/llvm-project/commit/3ea8f2526541884e03d5bd4f4e46f4eb190990b6
269                         Ordering::SeqCst => atomic_store!("fence rw, rw", "fence rw, w"),
270                         _ => unreachable!(),
271                     }
272                 }
273             }
274         }
275     };
276 }
277 
278 macro_rules! atomic_ptr {
279     ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
280         atomic_load_store!($([$($generics)*])? $atomic_type, $value_type, $asm_suffix);
281         #[cfg(any(
282             test,
283             portable_atomic_force_amo,
284             target_feature = "zaamo",
285             portable_atomic_target_feature = "zaamo",
286         ))]
287         impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
288             #[inline]
289             pub(crate) fn swap(&self, val: $value_type, order: Ordering) -> $value_type {
290                 let dst = self.v.get();
291                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
292                 // pointer passed in is valid because we got it from a reference.
293                 unsafe { atomic_rmw_amo!(swap, dst, val, order, $asm_suffix) }
294             }
295         }
296     };
297 }
298 
299 macro_rules! atomic {
300     ($atomic_type:ident, $value_type:ty, $asm_suffix:tt, $max:tt, $min:tt) => {
301         atomic_load_store!($atomic_type, $value_type, $asm_suffix);
302         #[cfg(any(
303             test,
304             portable_atomic_force_amo,
305             target_feature = "zaamo",
306             portable_atomic_target_feature = "zaamo",
307         ))]
308         #[cfg(not(any(portable_atomic_unsafe_assume_single_core, feature = "critical-section")))]
309         impl_default_no_fetch_ops!($atomic_type, $value_type);
310         #[cfg(any(
311             test,
312             portable_atomic_force_amo,
313             target_feature = "zaamo",
314             portable_atomic_target_feature = "zaamo",
315         ))]
316         #[cfg(not(any(portable_atomic_unsafe_assume_single_core, feature = "critical-section")))]
317         impl_default_bit_opts!($atomic_type, $value_type);
318         // There is no amo{sub,nand,neg}.
319         #[cfg(any(
320             test,
321             portable_atomic_force_amo,
322             target_feature = "zaamo",
323             portable_atomic_target_feature = "zaamo",
324         ))]
325         impl $atomic_type {
326             #[inline]
327             pub(crate) fn swap(&self, val: $value_type, order: Ordering) -> $value_type {
328                 let dst = self.v.get();
329                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
330                 // pointer passed in is valid because we got it from a reference.
331                 unsafe { atomic_rmw_amo!(swap, dst, val, order, $asm_suffix) }
332             }
333 
334             #[inline]
335             pub(crate) fn fetch_add(&self, val: $value_type, order: Ordering) -> $value_type {
336                 let dst = self.v.get();
337                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
338                 // pointer passed in is valid because we got it from a reference.
339                 unsafe { atomic_rmw_amo!(add, dst, val, order, $asm_suffix) }
340             }
341 
342             #[inline]
343             pub(crate) fn fetch_sub(&self, val: $value_type, order: Ordering) -> $value_type {
344                 self.fetch_add(val.wrapping_neg(), order)
345             }
346 
347             #[inline]
348             pub(crate) fn fetch_and(&self, val: $value_type, order: Ordering) -> $value_type {
349                 let dst = self.v.get();
350                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
351                 // pointer passed in is valid because we got it from a reference.
352                 unsafe { atomic_rmw_amo!(and, dst, val, order, $asm_suffix) }
353             }
354 
355             #[inline]
356             pub(crate) fn fetch_or(&self, val: $value_type, order: Ordering) -> $value_type {
357                 let dst = self.v.get();
358                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
359                 // pointer passed in is valid because we got it from a reference.
360                 unsafe { atomic_rmw_amo!(or, dst, val, order, $asm_suffix) }
361             }
362 
363             #[inline]
364             pub(crate) fn fetch_xor(&self, val: $value_type, order: Ordering) -> $value_type {
365                 let dst = self.v.get();
366                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
367                 // pointer passed in is valid because we got it from a reference.
368                 unsafe { atomic_rmw_amo!(xor, dst, val, order, $asm_suffix) }
369             }
370 
371             #[inline]
372             pub(crate) fn fetch_not(&self, order: Ordering) -> $value_type {
373                 self.fetch_xor(!0, order)
374             }
375             #[cfg(not(any(
376                 portable_atomic_unsafe_assume_single_core,
377                 feature = "critical-section",
378             )))]
379             #[inline]
380             pub(crate) fn not(&self, order: Ordering) {
381                 self.fetch_not(order);
382             }
383 
384             #[inline]
385             pub(crate) fn fetch_max(&self, val: $value_type, order: Ordering) -> $value_type {
386                 let dst = self.v.get();
387                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
388                 // pointer passed in is valid because we got it from a reference.
389                 unsafe { atomic_rmw_amo!($max, dst, val, order, $asm_suffix) }
390             }
391 
392             #[inline]
393             pub(crate) fn fetch_min(&self, val: $value_type, order: Ordering) -> $value_type {
394                 let dst = self.v.get();
395                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
396                 // pointer passed in is valid because we got it from a reference.
397                 unsafe { atomic_rmw_amo!($min, dst, val, order, $asm_suffix) }
398             }
399         }
400     };
401 }
402 
403 #[cfg(any(
404     test,
405     portable_atomic_force_amo,
406     target_feature = "zaamo",
407     portable_atomic_target_feature = "zaamo",
408 ))]
409 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
410 trait ZeroExtend: Copy {
411     /// Zero-extends `self` to `u32` if it is smaller than 32-bit.
zero_extend(self) -> u32412     fn zero_extend(self) -> u32;
413 }
414 macro_rules! zero_extend {
415     ($int:ident, $uint:ident) => {
416         #[cfg(any(
417             test,
418             portable_atomic_force_amo,
419             target_feature = "zaamo",
420             portable_atomic_target_feature = "zaamo",
421         ))]
422         #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
423         impl ZeroExtend for $uint {
424             #[inline(always)]
425             fn zero_extend(self) -> u32 {
426                 self as u32
427             }
428         }
429         #[cfg(any(
430             test,
431             portable_atomic_force_amo,
432             target_feature = "zaamo",
433             portable_atomic_target_feature = "zaamo",
434         ))]
435         #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
436         impl ZeroExtend for $int {
437             #[allow(clippy::cast_sign_loss)]
438             #[inline(always)]
439             fn zero_extend(self) -> u32 {
440                 self as $uint as u32
441             }
442         }
443     };
444 }
445 zero_extend!(i8, u8);
446 zero_extend!(i16, u16);
447 
448 macro_rules! atomic_sub_word {
449     ($atomic_type:ident, $value_type:ty, $asm_suffix:tt, $max:tt, $min:tt) => {
450         #[cfg(any(target_feature = "zabha", portable_atomic_target_feature = "zabha"))]
451         atomic!($atomic_type, $value_type, $asm_suffix, $max, $min);
452         #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
453         atomic_load_store!($atomic_type, $value_type, $asm_suffix);
454         #[cfg(any(
455             test,
456             portable_atomic_force_amo,
457             target_feature = "zaamo",
458             portable_atomic_target_feature = "zaamo",
459         ))]
460         #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
461         #[cfg(not(any(portable_atomic_unsafe_assume_single_core, feature = "critical-section")))]
462         impl_default_bit_opts!($atomic_type, $value_type);
463         #[cfg(any(
464             test,
465             portable_atomic_force_amo,
466             target_feature = "zaamo",
467             portable_atomic_target_feature = "zaamo",
468         ))]
469         #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
470         impl $atomic_type {
471             #[inline]
472             pub(crate) fn fetch_and(&self, val: $value_type, order: Ordering) -> $value_type {
473                 let dst = self.v.get();
474                 let (dst, shift, mut mask) = crate::utils::create_sub_word_mask_values(dst);
475                 mask = !sllw(mask, shift);
476                 let mut val = sllw(ZeroExtend::zero_extend(val), shift);
477                 val |= mask;
478                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
479                 // pointer passed in is valid because we got it from a reference.
480                 let out: u32 = unsafe { atomic_rmw_amo!(and, dst, val, order, "w") };
481                 srlw!(out, shift)
482             }
483             #[cfg(not(any(
484                 portable_atomic_unsafe_assume_single_core,
485                 feature = "critical-section",
486             )))]
487             #[inline]
488             pub(crate) fn and(&self, val: $value_type, order: Ordering) {
489                 self.fetch_and(val, order);
490             }
491 
492             #[inline]
493             pub(crate) fn fetch_or(&self, val: $value_type, order: Ordering) -> $value_type {
494                 let dst = self.v.get();
495                 let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst);
496                 let val = sllw(ZeroExtend::zero_extend(val), shift);
497                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
498                 // pointer passed in is valid because we got it from a reference.
499                 let out: u32 = unsafe { atomic_rmw_amo!(or, dst, val, order, "w") };
500                 srlw!(out, shift)
501             }
502             #[cfg(not(any(
503                 portable_atomic_unsafe_assume_single_core,
504                 feature = "critical-section",
505             )))]
506             #[inline]
507             pub(crate) fn or(&self, val: $value_type, order: Ordering) {
508                 self.fetch_or(val, order);
509             }
510 
511             #[inline]
512             pub(crate) fn fetch_xor(&self, val: $value_type, order: Ordering) -> $value_type {
513                 let dst = self.v.get();
514                 let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst);
515                 let val = sllw(ZeroExtend::zero_extend(val), shift);
516                 // SAFETY: any data races are prevented by atomic intrinsics and the raw
517                 // pointer passed in is valid because we got it from a reference.
518                 let out: u32 = unsafe { atomic_rmw_amo!(xor, dst, val, order, "w") };
519                 srlw!(out, shift)
520             }
521             #[cfg(not(any(
522                 portable_atomic_unsafe_assume_single_core,
523                 feature = "critical-section",
524             )))]
525             #[inline]
526             pub(crate) fn xor(&self, val: $value_type, order: Ordering) {
527                 self.fetch_xor(val, order);
528             }
529 
530             #[inline]
531             pub(crate) fn fetch_not(&self, order: Ordering) -> $value_type {
532                 self.fetch_xor(!0, order)
533             }
534             #[cfg(not(any(
535                 portable_atomic_unsafe_assume_single_core,
536                 feature = "critical-section",
537             )))]
538             #[inline]
539             pub(crate) fn not(&self, order: Ordering) {
540                 self.fetch_not(order);
541             }
542         }
543     };
544 }
545 
546 atomic_sub_word!(AtomicI8, i8, "b", max, min);
547 atomic_sub_word!(AtomicU8, u8, "b", maxu, minu);
548 atomic_sub_word!(AtomicI16, i16, "h", max, min);
549 atomic_sub_word!(AtomicU16, u16, "h", maxu, minu);
550 atomic!(AtomicI32, i32, "w", max, min);
551 atomic!(AtomicU32, u32, "w", maxu, minu);
552 #[cfg(target_arch = "riscv64")]
553 atomic!(AtomicI64, i64, "d", max, min);
554 #[cfg(target_arch = "riscv64")]
555 atomic!(AtomicU64, u64, "d", maxu, minu);
556 #[cfg(target_pointer_width = "32")]
557 atomic!(AtomicIsize, isize, "w", max, min);
558 #[cfg(target_pointer_width = "32")]
559 atomic!(AtomicUsize, usize, "w", maxu, minu);
560 #[cfg(target_pointer_width = "32")]
561 atomic_ptr!([T] AtomicPtr, *mut T, "w");
562 #[cfg(target_pointer_width = "64")]
563 atomic!(AtomicIsize, isize, "d", max, min);
564 #[cfg(target_pointer_width = "64")]
565 atomic!(AtomicUsize, usize, "d", maxu, minu);
566 #[cfg(target_pointer_width = "64")]
567 atomic_ptr!([T] AtomicPtr, *mut T, "d");
568 
569 #[cfg(test)]
570 mod tests {
571     use super::*;
572 
573     test_atomic_ptr_load_store!();
574     test_atomic_int_load_store!(i8);
575     test_atomic_int_load_store!(u8);
576     test_atomic_int_load_store!(i16);
577     test_atomic_int_load_store!(u16);
578     test_atomic_int_load_store!(i32);
579     test_atomic_int_load_store!(u32);
580     #[cfg(target_arch = "riscv64")]
581     test_atomic_int_load_store!(i64);
582     #[cfg(target_arch = "riscv64")]
583     test_atomic_int_load_store!(u64);
584     test_atomic_int_load_store!(isize);
585     test_atomic_int_load_store!(usize);
586 
587     macro_rules! test_atomic_ptr_amo {
588         () => {
589             #[allow(
590                 clippy::alloc_instead_of_core,
591                 clippy::std_instead_of_alloc,
592                 clippy::std_instead_of_core,
593                 clippy::undocumented_unsafe_blocks
594             )]
595             mod test_atomic_ptr_amo {
596                 use super::*;
597                 test_atomic_ptr_amo!(AtomicPtr<u8>);
598             }
599         };
600         ($atomic_type:ty) => {
601             ::quickcheck::quickcheck! {
602                 fn quickcheck_swap(x: usize, y: usize) -> bool {
603                     let x = sptr::invalid_mut(x);
604                     let y = sptr::invalid_mut(y);
605                     for &order in &test_helper::SWAP_ORDERINGS {
606                         let a = <$atomic_type>::new(x);
607                         assert_eq!(a.swap(y, order), x);
608                         assert_eq!(a.swap(x, order), y);
609                     }
610                     true
611                 }
612             }
613         };
614     }
615     macro_rules! test_atomic_int_amo {
616         ($int_type:ident) => {
617             paste::paste! {
618                 #[allow(
619                     clippy::alloc_instead_of_core,
620                     clippy::std_instead_of_alloc,
621                     clippy::std_instead_of_core,
622                     clippy::undocumented_unsafe_blocks
623                 )]
624                 mod [<test_atomic_ $int_type _amo>] {
625                     use super::*;
626                     test_atomic_int_amo!([<Atomic $int_type:camel>], $int_type);
627                 }
628             }
629         };
630         ($atomic_type:ty, $int_type:ident) => {
631             ::quickcheck::quickcheck! {
632                 fn quickcheck_swap(x: $int_type, y: $int_type) -> bool {
633                     for &order in &test_helper::SWAP_ORDERINGS {
634                         let a = <$atomic_type>::new(x);
635                         assert_eq!(a.swap(y, order), x);
636                         assert_eq!(a.swap(x, order), y);
637                     }
638                     true
639                 }
640                 fn quickcheck_fetch_add(x: $int_type, y: $int_type) -> bool {
641                     for &order in &test_helper::SWAP_ORDERINGS {
642                         let a = <$atomic_type>::new(x);
643                         assert_eq!(a.fetch_add(y, order), x);
644                         assert_eq!(a.load(Ordering::Relaxed), x.wrapping_add(y));
645                         let a = <$atomic_type>::new(y);
646                         assert_eq!(a.fetch_add(x, order), y);
647                         assert_eq!(a.load(Ordering::Relaxed), y.wrapping_add(x));
648                     }
649                     true
650                 }
651                 fn quickcheck_fetch_sub(x: $int_type, y: $int_type) -> bool {
652                     for &order in &test_helper::SWAP_ORDERINGS {
653                         let a = <$atomic_type>::new(x);
654                         assert_eq!(a.fetch_sub(y, order), x);
655                         assert_eq!(a.load(Ordering::Relaxed), x.wrapping_sub(y));
656                         let a = <$atomic_type>::new(y);
657                         assert_eq!(a.fetch_sub(x, order), y);
658                         assert_eq!(a.load(Ordering::Relaxed), y.wrapping_sub(x));
659                     }
660                     true
661                 }
662                 fn quickcheck_fetch_and(x: $int_type, y: $int_type) -> bool {
663                     for &order in &test_helper::SWAP_ORDERINGS {
664                         let a = <$atomic_type>::new(x);
665                         assert_eq!(a.fetch_and(y, order), x);
666                         assert_eq!(a.load(Ordering::Relaxed), x & y);
667                         let a = <$atomic_type>::new(y);
668                         assert_eq!(a.fetch_and(x, order), y);
669                         assert_eq!(a.load(Ordering::Relaxed), y & x);
670                     }
671                     true
672                 }
673                 fn quickcheck_fetch_or(x: $int_type, y: $int_type) -> bool {
674                     for &order in &test_helper::SWAP_ORDERINGS {
675                         let a = <$atomic_type>::new(x);
676                         assert_eq!(a.fetch_or(y, order), x);
677                         assert_eq!(a.load(Ordering::Relaxed), x | y);
678                         let a = <$atomic_type>::new(y);
679                         assert_eq!(a.fetch_or(x, order), y);
680                         assert_eq!(a.load(Ordering::Relaxed), y | x);
681                     }
682                     true
683                 }
684                 fn quickcheck_fetch_xor(x: $int_type, y: $int_type) -> bool {
685                     for &order in &test_helper::SWAP_ORDERINGS {
686                         let a = <$atomic_type>::new(x);
687                         assert_eq!(a.fetch_xor(y, order), x);
688                         assert_eq!(a.load(Ordering::Relaxed), x ^ y);
689                         let a = <$atomic_type>::new(y);
690                         assert_eq!(a.fetch_xor(x, order), y);
691                         assert_eq!(a.load(Ordering::Relaxed), y ^ x);
692                     }
693                     true
694                 }
695                 fn quickcheck_fetch_max(x: $int_type, y: $int_type) -> bool {
696                     for &order in &test_helper::SWAP_ORDERINGS {
697                         let a = <$atomic_type>::new(x);
698                         assert_eq!(a.fetch_max(y, order), x);
699                         assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(x, y));
700                         let a = <$atomic_type>::new(y);
701                         assert_eq!(a.fetch_max(x, order), y);
702                         assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(y, x));
703                     }
704                     true
705                 }
706                 fn quickcheck_fetch_min(x: $int_type, y: $int_type) -> bool {
707                     for &order in &test_helper::SWAP_ORDERINGS {
708                         let a = <$atomic_type>::new(x);
709                         assert_eq!(a.fetch_min(y, order), x);
710                         assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(x, y));
711                         let a = <$atomic_type>::new(y);
712                         assert_eq!(a.fetch_min(x, order), y);
713                         assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(y, x));
714                     }
715                     true
716                 }
717                 fn quickcheck_fetch_not(x: $int_type) -> bool {
718                     for &order in &test_helper::SWAP_ORDERINGS {
719                         let a = <$atomic_type>::new(x);
720                         assert_eq!(a.fetch_not(order), x);
721                         assert_eq!(a.load(Ordering::Relaxed), !x);
722                         assert_eq!(a.fetch_not(order), !x);
723                         assert_eq!(a.load(Ordering::Relaxed), x);
724                     }
725                     true
726                 }
727             }
728         };
729     }
730     macro_rules! test_atomic_int_amo_sub_word {
731         ($int_type:ident) => {
732             paste::paste! {
733                 #[allow(
734                     clippy::alloc_instead_of_core,
735                     clippy::std_instead_of_alloc,
736                     clippy::std_instead_of_core,
737                     clippy::undocumented_unsafe_blocks
738                 )]
739                 mod [<test_atomic_ $int_type _amo>] {
740                     use super::*;
741                     #[cfg(any(target_feature = "zabha", portable_atomic_target_feature = "zabha"))]
742                     test_atomic_int_amo!([<Atomic $int_type:camel>], $int_type);
743                     #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
744                     test_atomic_int_amo_sub_word!([<Atomic $int_type:camel>], $int_type);
745                 }
746             }
747         };
748         ($atomic_type:ty, $int_type:ident) => {
749             use crate::tests::helper::*;
750             ::quickcheck::quickcheck! {
751                 fn quickcheck_fetch_and(x: $int_type, y: $int_type) -> bool {
752                     for &order in &test_helper::SWAP_ORDERINGS {
753                         for base in [0, !0] {
754                             let mut arr = Align16([
755                                 <$atomic_type>::new(base),
756                                 <$atomic_type>::new(base),
757                                 <$atomic_type>::new(base),
758                                 <$atomic_type>::new(base),
759                                 <$atomic_type>::new(base),
760                                 <$atomic_type>::new(base),
761                                 <$atomic_type>::new(base),
762                                 <$atomic_type>::new(base),
763                                 <$atomic_type>::new(base),
764                                 <$atomic_type>::new(base),
765                             ]);
766                             let a_idx = fastrand::usize(3..=6);
767                             arr.0[a_idx] = <$atomic_type>::new(x);
768                             let a = &arr.0[a_idx];
769                             assert_eq!(a.fetch_and(y, order), x);
770                             assert_eq!(a.load(Ordering::Relaxed), x & y);
771                             for i in 0..a_idx {
772                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
773                             }
774                             for i in a_idx + 1..arr.0.len() {
775                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
776                             }
777                             arr.0[a_idx] = <$atomic_type>::new(y);
778                             let a = &arr.0[a_idx];
779                             assert_eq!(a.fetch_and(x, order), y);
780                             assert_eq!(a.load(Ordering::Relaxed), y & x);
781                             for i in 0..a_idx {
782                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
783                             }
784                             for i in a_idx + 1..arr.0.len() {
785                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
786                             }
787                         }
788                     }
789                     true
790                 }
791                 fn quickcheck_fetch_or(x: $int_type, y: $int_type) -> bool {
792                     for &order in &test_helper::SWAP_ORDERINGS {
793                         for base in [0, !0] {
794                             let mut arr = Align16([
795                                 <$atomic_type>::new(base),
796                                 <$atomic_type>::new(base),
797                                 <$atomic_type>::new(base),
798                                 <$atomic_type>::new(base),
799                                 <$atomic_type>::new(base),
800                                 <$atomic_type>::new(base),
801                                 <$atomic_type>::new(base),
802                                 <$atomic_type>::new(base),
803                                 <$atomic_type>::new(base),
804                                 <$atomic_type>::new(base),
805                             ]);
806                             let a_idx = fastrand::usize(3..=6);
807                             arr.0[a_idx] = <$atomic_type>::new(x);
808                             let a = &arr.0[a_idx];
809                             assert_eq!(a.fetch_or(y, order), x);
810                             assert_eq!(a.load(Ordering::Relaxed), x | y);
811                             for i in 0..a_idx {
812                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
813                             }
814                             for i in a_idx + 1..arr.0.len() {
815                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
816                             }
817                             arr.0[a_idx] = <$atomic_type>::new(y);
818                             let a = &arr.0[a_idx];
819                             assert_eq!(a.fetch_or(x, order), y);
820                             assert_eq!(a.load(Ordering::Relaxed), y | x);
821                             for i in 0..a_idx {
822                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
823                             }
824                             for i in a_idx + 1..arr.0.len() {
825                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
826                             }
827                         }
828                     }
829                     true
830                 }
831                 fn quickcheck_fetch_xor(x: $int_type, y: $int_type) -> bool {
832                     for &order in &test_helper::SWAP_ORDERINGS {
833                         for base in [0, !0] {
834                             let mut arr = Align16([
835                                 <$atomic_type>::new(base),
836                                 <$atomic_type>::new(base),
837                                 <$atomic_type>::new(base),
838                                 <$atomic_type>::new(base),
839                                 <$atomic_type>::new(base),
840                                 <$atomic_type>::new(base),
841                                 <$atomic_type>::new(base),
842                                 <$atomic_type>::new(base),
843                                 <$atomic_type>::new(base),
844                                 <$atomic_type>::new(base),
845                             ]);
846                             let a_idx = fastrand::usize(3..=6);
847                             arr.0[a_idx] = <$atomic_type>::new(x);
848                             let a = &arr.0[a_idx];
849                             assert_eq!(a.fetch_xor(y, order), x);
850                             assert_eq!(a.load(Ordering::Relaxed), x ^ y);
851                             for i in 0..a_idx {
852                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
853                             }
854                             for i in a_idx + 1..arr.0.len() {
855                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
856                             }
857                             arr.0[a_idx] = <$atomic_type>::new(y);
858                             let a = &arr.0[a_idx];
859                             assert_eq!(a.fetch_xor(x, order), y);
860                             assert_eq!(a.load(Ordering::Relaxed), y ^ x);
861                             for i in 0..a_idx {
862                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
863                             }
864                             for i in a_idx + 1..arr.0.len() {
865                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
866                             }
867                         }
868                     }
869                     true
870                 }
871                 fn quickcheck_fetch_not(x: $int_type) -> bool {
872                     for &order in &test_helper::SWAP_ORDERINGS {
873                         for base in [0, !0] {
874                             let mut arr = Align16([
875                                 <$atomic_type>::new(base),
876                                 <$atomic_type>::new(base),
877                                 <$atomic_type>::new(base),
878                                 <$atomic_type>::new(base),
879                                 <$atomic_type>::new(base),
880                                 <$atomic_type>::new(base),
881                                 <$atomic_type>::new(base),
882                                 <$atomic_type>::new(base),
883                                 <$atomic_type>::new(base),
884                                 <$atomic_type>::new(base),
885                             ]);
886                             let a_idx = fastrand::usize(3..=6);
887                             arr.0[a_idx] = <$atomic_type>::new(x);
888                             let a = &arr.0[a_idx];
889                             assert_eq!(a.fetch_not(order), x);
890                             assert_eq!(a.load(Ordering::Relaxed), !x);
891                             assert_eq!(a.fetch_not(order), !x);
892                             assert_eq!(a.load(Ordering::Relaxed), x);
893                             for i in 0..a_idx {
894                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
895                             }
896                             for i in a_idx + 1..arr.0.len() {
897                                 assert_eq!(arr.0[i].load(Ordering::Relaxed), base, "invalid value written");
898                             }
899                         }
900                     }
901                     true
902                 }
903             }
904         };
905     }
906     test_atomic_ptr_amo!();
907     test_atomic_int_amo_sub_word!(i8);
908     test_atomic_int_amo_sub_word!(u8);
909     test_atomic_int_amo_sub_word!(i16);
910     test_atomic_int_amo_sub_word!(u16);
911     test_atomic_int_amo!(i32);
912     test_atomic_int_amo!(u32);
913     #[cfg(target_arch = "riscv64")]
914     test_atomic_int_amo!(i64);
915     #[cfg(target_arch = "riscv64")]
916     test_atomic_int_amo!(u64);
917     test_atomic_int_amo!(isize);
918     test_atomic_int_amo!(usize);
919 }
920