1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2
3 #![allow(unused_macros, clippy::undocumented_unsafe_blocks)]
4
5 use core::sync::atomic::Ordering;
6
7 macro_rules! __test_atomic_common {
8 ($atomic_type:ty, $value_type:ty) => {
9 #[test]
10 fn assert_auto_traits() {
11 fn _assert<T: Send + Sync + Unpin + std::panic::UnwindSafe>() {}
12 _assert::<$atomic_type>();
13 }
14 #[test]
15 fn alignment() {
16 // https://github.com/rust-lang/rust/blob/1.80.0/library/core/tests/atomic.rs#L250
17 assert_eq!(core::mem::align_of::<$atomic_type>(), core::mem::size_of::<$atomic_type>());
18 assert_eq!(core::mem::size_of::<$atomic_type>(), core::mem::size_of::<$value_type>());
19 }
20 #[test]
21 fn is_lock_free() {
22 const IS_ALWAYS_LOCK_FREE: bool = <$atomic_type>::IS_ALWAYS_LOCK_FREE;
23 assert_eq!(IS_ALWAYS_LOCK_FREE, <$atomic_type>::IS_ALWAYS_LOCK_FREE);
24 let is_lock_free = <$atomic_type>::is_lock_free();
25 if IS_ALWAYS_LOCK_FREE {
26 // If is_always_lock_free is true, then is_lock_free must always be true.
27 assert!(is_lock_free);
28 }
29 }
30 };
31 }
32 macro_rules! __test_atomic_pub_common {
33 ($atomic_type:ty, $value_type:ty) => {
34 #[test]
35 fn is_always_lock_free() {
36 assert_eq!(<$atomic_type>::IS_ALWAYS_LOCK_FREE, <$atomic_type>::is_always_lock_free());
37 }
38 #[test]
39 fn assert_ref_unwind_safe() {
40 #[cfg(not(all(portable_atomic_no_core_unwind_safe, not(feature = "std"))))]
41 static_assertions::assert_impl_all!($atomic_type: std::panic::RefUnwindSafe);
42 #[cfg(all(portable_atomic_no_core_unwind_safe, not(feature = "std")))]
43 static_assertions::assert_not_impl_all!($atomic_type: std::panic::RefUnwindSafe);
44 }
45 };
46 }
47
48 macro_rules! __test_atomic_int_load_store {
49 ($atomic_type:ty, $int_type:ident, single_thread) => {
50 __test_atomic_common!($atomic_type, $int_type);
51 use crate::tests::helper::*;
52 #[test]
53 fn accessor() {
54 let mut a = <$atomic_type>::new(10);
55 assert_eq!(*a.get_mut(), 10);
56 *a.get_mut() = 5;
57 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
58 assert_eq!(*a.get_mut(), 5);
59 }
60 // https://bugs.llvm.org/show_bug.cgi?id=37061
61 #[test]
62 fn static_load_only() {
63 static VAR: $atomic_type = <$atomic_type>::new(10);
64 for &order in &test_helper::LOAD_ORDERINGS {
65 assert_eq!(VAR.load(order), 10);
66 }
67 }
68 #[test]
69 fn load_store() {
70 static VAR: $atomic_type = <$atomic_type>::new(10);
71 test_load_ordering(|order| VAR.load(order));
72 test_store_ordering(|order| VAR.store(10, order));
73 for (&load_order, &store_order) in
74 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
75 {
76 assert_eq!(VAR.load(load_order), 10);
77 VAR.store(5, store_order);
78 assert_eq!(VAR.load(load_order), 5);
79 VAR.store(10, store_order);
80 let a = <$atomic_type>::new(1);
81 assert_eq!(a.load(load_order), 1);
82 a.store(2, store_order);
83 assert_eq!(a.load(load_order), 2);
84 }
85 }
86 };
87 ($atomic_type:ty, $int_type:ident) => {
88 __test_atomic_int_load_store!($atomic_type, $int_type, single_thread);
89 use crossbeam_utils::thread;
90 use std::{collections::BTreeSet, vec, vec::Vec};
91 #[test]
92 fn stress_load_store() {
93 let (iterations, threads) = stress_test_config();
94 let data1 = (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>();
95 let set = data1.iter().copied().collect::<BTreeSet<_>>();
96 let a = <$atomic_type>::new(data1[fastrand::usize(0..iterations)]);
97 let now = &std::time::Instant::now();
98 thread::scope(|s| {
99 for _ in 0..threads {
100 s.spawn(|_| {
101 let now = *now;
102 for i in 0..iterations {
103 a.store(data1[i], rand_store_ordering());
104 }
105 std::eprintln!("store end={:?}", now.elapsed());
106 });
107 s.spawn(|_| {
108 let now = *now;
109 let mut v = vec![0; iterations];
110 for i in 0..iterations {
111 v[i] = a.load(rand_load_ordering());
112 }
113 std::eprintln!("load end={:?}", now.elapsed());
114 for v in v {
115 assert!(set.contains(&v), "v={}", v);
116 }
117 });
118 }
119 })
120 .unwrap();
121 }
122 };
123 }
124 macro_rules! __test_atomic_float_load_store {
125 ($atomic_type:ty, $float_type:ident, single_thread) => {
126 __test_atomic_common!($atomic_type, $float_type);
127 use crate::tests::helper::*;
128 #[test]
129 fn accessor() {
130 let mut a = <$atomic_type>::new(10.);
131 assert_eq!(*a.get_mut(), 10.);
132 *a.get_mut() = 5.;
133 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
134 assert_eq!(*a.get_mut(), 5.);
135 }
136 // https://bugs.llvm.org/show_bug.cgi?id=37061
137 #[test]
138 fn static_load_only() {
139 static VAR: $atomic_type = <$atomic_type>::new(10.);
140 for &order in &test_helper::LOAD_ORDERINGS {
141 assert_eq!(VAR.load(order), 10.);
142 }
143 }
144 #[test]
145 fn load_store() {
146 static VAR: $atomic_type = <$atomic_type>::new(10.);
147 test_load_ordering(|order| VAR.load(order));
148 test_store_ordering(|order| VAR.store(10., order));
149 for (&load_order, &store_order) in
150 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
151 {
152 assert_eq!(VAR.load(load_order), 10.);
153 VAR.store(5., store_order);
154 assert_eq!(VAR.load(load_order), 5.);
155 VAR.store(10., store_order);
156 let a = <$atomic_type>::new(1.);
157 assert_eq!(a.load(load_order), 1.);
158 a.store(2., store_order);
159 assert_eq!(a.load(load_order), 2.);
160 }
161 }
162 };
163 ($atomic_type:ty, $float_type:ident) => {
164 __test_atomic_float_load_store!($atomic_type, $float_type, single_thread);
165 // TODO: multi thread
166 };
167 }
168 macro_rules! __test_atomic_bool_load_store {
169 ($atomic_type:ty, single_thread) => {
170 __test_atomic_common!($atomic_type, bool);
171 use crate::tests::helper::*;
172 #[test]
173 fn accessor() {
174 let mut a = <$atomic_type>::new(false);
175 assert_eq!(*a.get_mut(), false);
176 *a.get_mut() = true;
177 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
178 assert_eq!(*a.get_mut(), true);
179 }
180 // https://bugs.llvm.org/show_bug.cgi?id=37061
181 #[test]
182 fn static_load_only() {
183 static VAR: $atomic_type = <$atomic_type>::new(false);
184 for &order in &test_helper::LOAD_ORDERINGS {
185 assert_eq!(VAR.load(order), false);
186 }
187 }
188 #[test]
189 fn load_store() {
190 static VAR: $atomic_type = <$atomic_type>::new(false);
191 test_load_ordering(|order| VAR.load(order));
192 test_store_ordering(|order| VAR.store(false, order));
193 for (&load_order, &store_order) in
194 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
195 {
196 assert_eq!(VAR.load(load_order), false);
197 VAR.store(true, store_order);
198 assert_eq!(VAR.load(load_order), true);
199 VAR.store(false, store_order);
200 let a = <$atomic_type>::new(true);
201 assert_eq!(a.load(load_order), true);
202 a.store(false, store_order);
203 assert_eq!(a.load(load_order), false);
204 }
205 }
206 };
207 ($atomic_type:ty) => {
208 __test_atomic_bool_load_store!($atomic_type, single_thread);
209 // TODO: multi thread
210 };
211 }
212 macro_rules! __test_atomic_ptr_load_store {
213 ($atomic_type:ty, single_thread) => {
214 __test_atomic_common!($atomic_type, *mut u8);
215 use crate::tests::helper::*;
216 use std::ptr;
217 #[test]
218 fn accessor() {
219 let mut v = 1;
220 let mut a = <$atomic_type>::new(ptr::null_mut());
221 assert!(a.get_mut().is_null());
222 *a.get_mut() = &mut v;
223 assert_eq!(a.as_ptr() as *const (), &a as *const _ as *const ());
224 assert!(!a.get_mut().is_null());
225 }
226 // https://bugs.llvm.org/show_bug.cgi?id=37061
227 #[test]
228 fn static_load_only() {
229 static VAR: $atomic_type = <$atomic_type>::new(ptr::null_mut());
230 for &order in &test_helper::LOAD_ORDERINGS {
231 assert_eq!(VAR.load(order), ptr::null_mut());
232 }
233 }
234 #[test]
235 fn load_store() {
236 static VAR: $atomic_type = <$atomic_type>::new(ptr::null_mut());
237 test_load_ordering(|order| VAR.load(order));
238 test_store_ordering(|order| VAR.store(ptr::null_mut(), order));
239 let mut v = 1_u8;
240 let p = &mut v as *mut u8;
241 for (&load_order, &store_order) in
242 test_helper::LOAD_ORDERINGS.iter().zip(&test_helper::STORE_ORDERINGS)
243 {
244 assert_eq!(VAR.load(load_order), ptr::null_mut());
245 VAR.store(p, store_order);
246 assert_eq!(VAR.load(load_order), p);
247 VAR.store(ptr::null_mut(), store_order);
248 let a = <$atomic_type>::new(p);
249 assert_eq!(a.load(load_order), p);
250 a.store(ptr::null_mut(), store_order);
251 assert_eq!(a.load(load_order), ptr::null_mut());
252 }
253 }
254 };
255 ($atomic_type:ty) => {
256 __test_atomic_ptr_load_store!($atomic_type, single_thread);
257 // TODO: multi thread
258 };
259 }
260
261 macro_rules! __test_atomic_int {
262 ($atomic_type:ty, $int_type:ident, single_thread) => {
263 #[test]
264 fn swap() {
265 let a = <$atomic_type>::new(5);
266 test_swap_ordering(|order| a.swap(5, order));
267 for &order in &test_helper::SWAP_ORDERINGS {
268 assert_eq!(a.swap(10, order), 5);
269 assert_eq!(a.swap(5, order), 10);
270 }
271 }
272 #[test]
273 fn compare_exchange() {
274 let a = <$atomic_type>::new(5);
275 test_compare_exchange_ordering(|success, failure| {
276 a.compare_exchange(5, 5, success, failure)
277 });
278 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
279 let a = <$atomic_type>::new(5);
280 assert_eq!(a.compare_exchange(5, 10, success, failure), Ok(5));
281 assert_eq!(a.load(Ordering::Relaxed), 10);
282 assert_eq!(a.compare_exchange(6, 12, success, failure), Err(10));
283 assert_eq!(a.load(Ordering::Relaxed), 10);
284 }
285 }
286 #[test]
287 fn compare_exchange_weak() {
288 let a = <$atomic_type>::new(4);
289 test_compare_exchange_ordering(|success, failure| {
290 a.compare_exchange_weak(4, 4, success, failure)
291 });
292 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
293 let a = <$atomic_type>::new(4);
294 assert_eq!(a.compare_exchange_weak(6, 8, success, failure), Err(4));
295 let mut old = a.load(Ordering::Relaxed);
296 loop {
297 let new = old * 2;
298 match a.compare_exchange_weak(old, new, success, failure) {
299 Ok(_) => break,
300 Err(x) => old = x,
301 }
302 }
303 assert_eq!(a.load(Ordering::Relaxed), 8);
304 }
305 }
306 #[test]
307 fn fetch_add() {
308 let a = <$atomic_type>::new(0);
309 test_swap_ordering(|order| a.fetch_add(0, order));
310 for &order in &test_helper::SWAP_ORDERINGS {
311 let a = <$atomic_type>::new(0);
312 assert_eq!(a.fetch_add(10, order), 0);
313 assert_eq!(a.load(Ordering::Relaxed), 10);
314 let a = <$atomic_type>::new($int_type::MAX);
315 assert_eq!(a.fetch_add(1, order), $int_type::MAX);
316 assert_eq!(a.load(Ordering::Relaxed), $int_type::MAX.wrapping_add(1));
317 }
318 }
319 #[test]
320 fn add() {
321 let a = <$atomic_type>::new(0);
322 test_swap_ordering(|order| a.add(0, order));
323 for &order in &test_helper::SWAP_ORDERINGS {
324 let a = <$atomic_type>::new(0);
325 a.add(10, order);
326 assert_eq!(a.load(Ordering::Relaxed), 10);
327 let a = <$atomic_type>::new($int_type::MAX);
328 a.add(1, order);
329 assert_eq!(a.load(Ordering::Relaxed), $int_type::MAX.wrapping_add(1));
330 }
331 }
332 #[test]
333 fn fetch_sub() {
334 let a = <$atomic_type>::new(20);
335 test_swap_ordering(|order| a.fetch_sub(0, order));
336 for &order in &test_helper::SWAP_ORDERINGS {
337 let a = <$atomic_type>::new(20);
338 assert_eq!(a.fetch_sub(10, order), 20);
339 assert_eq!(a.load(Ordering::Relaxed), 10);
340 let a = <$atomic_type>::new($int_type::MIN);
341 assert_eq!(a.fetch_sub(1, order), $int_type::MIN);
342 assert_eq!(a.load(Ordering::Relaxed), $int_type::MIN.wrapping_sub(1));
343 }
344 }
345 #[test]
346 fn sub() {
347 let a = <$atomic_type>::new(20);
348 test_swap_ordering(|order| a.sub(0, order));
349 for &order in &test_helper::SWAP_ORDERINGS {
350 let a = <$atomic_type>::new(20);
351 a.sub(10, order);
352 assert_eq!(a.load(Ordering::Relaxed), 10);
353 let a = <$atomic_type>::new($int_type::MIN);
354 a.sub(1, order);
355 assert_eq!(a.load(Ordering::Relaxed), $int_type::MIN.wrapping_sub(1));
356 }
357 }
358 #[test]
359 fn fetch_and() {
360 let a = <$atomic_type>::new(0b101101);
361 test_swap_ordering(|order| a.fetch_and(0b101101, order));
362 for &order in &test_helper::SWAP_ORDERINGS {
363 let a = <$atomic_type>::new(0b101101);
364 assert_eq!(a.fetch_and(0b110011, order), 0b101101);
365 assert_eq!(a.load(Ordering::Relaxed), 0b100001);
366 }
367 }
368 #[test]
369 fn and() {
370 let a = <$atomic_type>::new(0b101101);
371 test_swap_ordering(|order| a.and(0b101101, order));
372 for &order in &test_helper::SWAP_ORDERINGS {
373 let a = <$atomic_type>::new(0b101101);
374 a.and(0b110011, order);
375 assert_eq!(a.load(Ordering::Relaxed), 0b100001);
376 }
377 }
378 #[test]
379 fn fetch_nand() {
380 let a = <$atomic_type>::new(0x13);
381 test_swap_ordering(|order| a.fetch_nand(0x31, order));
382 for &order in &test_helper::SWAP_ORDERINGS {
383 let a = <$atomic_type>::new(0x13);
384 assert_eq!(a.fetch_nand(0x31, order), 0x13);
385 assert_eq!(a.load(Ordering::Relaxed), !(0x13 & 0x31));
386 }
387 }
388 #[test]
389 fn fetch_or() {
390 let a = <$atomic_type>::new(0b101101);
391 test_swap_ordering(|order| a.fetch_or(0, order));
392 for &order in &test_helper::SWAP_ORDERINGS {
393 let a = <$atomic_type>::new(0b101101);
394 assert_eq!(a.fetch_or(0b110011, order), 0b101101);
395 assert_eq!(a.load(Ordering::Relaxed), 0b111111);
396 }
397 }
398 #[test]
399 fn or() {
400 let a = <$atomic_type>::new(0b101101);
401 test_swap_ordering(|order| a.or(0, order));
402 for &order in &test_helper::SWAP_ORDERINGS {
403 let a = <$atomic_type>::new(0b101101);
404 a.or(0b110011, order);
405 assert_eq!(a.load(Ordering::Relaxed), 0b111111);
406 }
407 }
408 #[test]
409 fn fetch_xor() {
410 let a = <$atomic_type>::new(0b101101);
411 test_swap_ordering(|order| a.fetch_xor(0, order));
412 for &order in &test_helper::SWAP_ORDERINGS {
413 let a = <$atomic_type>::new(0b101101);
414 assert_eq!(a.fetch_xor(0b110011, order), 0b101101);
415 assert_eq!(a.load(Ordering::Relaxed), 0b011110);
416 }
417 }
418 #[test]
419 fn xor() {
420 let a = <$atomic_type>::new(0b101101);
421 test_swap_ordering(|order| a.xor(0, order));
422 for &order in &test_helper::SWAP_ORDERINGS {
423 let a = <$atomic_type>::new(0b101101);
424 a.xor(0b110011, order);
425 assert_eq!(a.load(Ordering::Relaxed), 0b011110);
426 }
427 }
428 #[test]
429 fn fetch_max() {
430 let a = <$atomic_type>::new(23);
431 test_swap_ordering(|order| a.fetch_max(23, order));
432 for &order in &test_helper::SWAP_ORDERINGS {
433 let a = <$atomic_type>::new(23);
434 assert_eq!(a.fetch_max(22, order), 23);
435 assert_eq!(a.load(Ordering::Relaxed), 23);
436 assert_eq!(a.fetch_max(24, order), 23);
437 assert_eq!(a.load(Ordering::Relaxed), 24);
438 let a = <$atomic_type>::new(0);
439 assert_eq!(a.fetch_max(1, order), 0);
440 assert_eq!(a.load(Ordering::Relaxed), 1);
441 assert_eq!(a.fetch_max(0, order), 1);
442 assert_eq!(a.load(Ordering::Relaxed), 1);
443 let a = <$atomic_type>::new(!0);
444 assert_eq!(a.fetch_max(0, order), !0);
445 assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(!0, 0));
446 }
447 }
448 #[test]
449 fn fetch_min() {
450 let a = <$atomic_type>::new(23);
451 test_swap_ordering(|order| a.fetch_min(23, order));
452 for &order in &test_helper::SWAP_ORDERINGS {
453 let a = <$atomic_type>::new(23);
454 assert_eq!(a.fetch_min(24, order), 23);
455 assert_eq!(a.load(Ordering::Relaxed), 23);
456 assert_eq!(a.fetch_min(22, order), 23);
457 assert_eq!(a.load(Ordering::Relaxed), 22);
458 let a = <$atomic_type>::new(1);
459 assert_eq!(a.fetch_min(0, order), 1);
460 assert_eq!(a.load(Ordering::Relaxed), 0);
461 assert_eq!(a.fetch_min(1, order), 0);
462 assert_eq!(a.load(Ordering::Relaxed), 0);
463 let a = <$atomic_type>::new(!0);
464 assert_eq!(a.fetch_min(0, order), !0);
465 assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(!0, 0));
466 }
467 }
468 #[test]
469 fn fetch_not() {
470 let a = <$atomic_type>::new(1);
471 test_swap_ordering(|order| a.fetch_not(order));
472 for &order in &test_helper::SWAP_ORDERINGS {
473 let a = <$atomic_type>::new(1);
474 assert_eq!(a.fetch_not(order), 1);
475 assert_eq!(a.load(Ordering::Relaxed), !1);
476 }
477 }
478 #[test]
479 fn not() {
480 let a = <$atomic_type>::new(1);
481 test_swap_ordering(|order| a.not(order));
482 for &order in &test_helper::SWAP_ORDERINGS {
483 let a = <$atomic_type>::new(1);
484 a.not(order);
485 assert_eq!(a.load(Ordering::Relaxed), !1);
486 }
487 }
488 #[test]
489 fn fetch_neg() {
490 let a = <$atomic_type>::new(5);
491 test_swap_ordering(|order| a.fetch_neg(order));
492 for &order in &test_helper::SWAP_ORDERINGS {
493 let a = <$atomic_type>::new(5);
494 assert_eq!(a.fetch_neg(order), 5);
495 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::wrapping_neg(5));
496 assert_eq!(a.fetch_neg(order), <$int_type>::wrapping_neg(5));
497 assert_eq!(a.load(Ordering::Relaxed), 5);
498 let a = <$atomic_type>::new(<$int_type>::MIN);
499 assert_eq!(a.fetch_neg(order), <$int_type>::MIN);
500 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN.wrapping_neg());
501 assert_eq!(a.fetch_neg(order), <$int_type>::MIN.wrapping_neg());
502 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN);
503 }
504 }
505 #[test]
506 fn neg() {
507 let a = <$atomic_type>::new(5);
508 test_swap_ordering(|order| a.neg(order));
509 for &order in &test_helper::SWAP_ORDERINGS {
510 let a = <$atomic_type>::new(5);
511 a.neg(order);
512 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::wrapping_neg(5));
513 a.neg(order);
514 assert_eq!(a.load(Ordering::Relaxed), 5);
515 let a = <$atomic_type>::new(<$int_type>::MIN);
516 a.neg(order);
517 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN.wrapping_neg());
518 a.neg(order);
519 assert_eq!(a.load(Ordering::Relaxed), <$int_type>::MIN);
520 }
521 }
522 #[test]
523 fn bit_set() {
524 let a = <$atomic_type>::new(0b0001);
525 test_swap_ordering(|order| assert!(a.bit_set(0, order)));
526 for &order in &test_helper::SWAP_ORDERINGS {
527 let a = <$atomic_type>::new(0b0000);
528 assert!(!a.bit_set(0, order));
529 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
530 assert!(a.bit_set(0, order));
531 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
532 }
533 }
534 #[test]
535 fn bit_clear() {
536 let a = <$atomic_type>::new(0b0000);
537 test_swap_ordering(|order| assert!(!a.bit_clear(0, order)));
538 for &order in &test_helper::SWAP_ORDERINGS {
539 let a = <$atomic_type>::new(0b0001);
540 assert!(a.bit_clear(0, order));
541 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
542 assert!(!a.bit_clear(0, order));
543 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
544 }
545 }
546 #[test]
547 fn bit_toggle() {
548 let a = <$atomic_type>::new(0b0000);
549 test_swap_ordering(|order| a.bit_toggle(0, order));
550 for &order in &test_helper::SWAP_ORDERINGS {
551 let a = <$atomic_type>::new(0b0000);
552 assert!(!a.bit_toggle(0, order));
553 assert_eq!(a.load(Ordering::Relaxed), 0b0001);
554 assert!(a.bit_toggle(0, order));
555 assert_eq!(a.load(Ordering::Relaxed), 0b0000);
556 }
557 }
558 ::quickcheck::quickcheck! {
559 fn quickcheck_swap(x: $int_type, y: $int_type) -> bool {
560 for &order in &test_helper::SWAP_ORDERINGS {
561 let a = <$atomic_type>::new(x);
562 assert_eq!(a.swap(y, order), x);
563 assert_eq!(a.swap(x, order), y);
564 }
565 true
566 }
567 fn quickcheck_compare_exchange(x: $int_type, y: $int_type) -> bool {
568 #[cfg(all(
569 target_arch = "arm",
570 not(any(target_feature = "v6", portable_atomic_target_feature = "v6")),
571 ))]
572 {
573 // TODO: LLVM bug:
574 // https://github.com/llvm/llvm-project/issues/61880
575 // https://github.com/taiki-e/portable-atomic/issues/2
576 if core::mem::size_of::<$int_type>() <= 2 {
577 return true;
578 }
579 }
580 let z = loop {
581 let z = fastrand::$int_type(..);
582 if z != y {
583 break z;
584 }
585 };
586 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
587 let a = <$atomic_type>::new(x);
588 assert_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
589 assert_eq!(a.load(Ordering::Relaxed), y);
590 assert_eq!(a.compare_exchange(z, x, success, failure).unwrap_err(), y);
591 assert_eq!(a.load(Ordering::Relaxed), y);
592 }
593 true
594 }
595 fn quickcheck_fetch_add(x: $int_type, y: $int_type) -> bool {
596 for &order in &test_helper::SWAP_ORDERINGS {
597 let a = <$atomic_type>::new(x);
598 assert_eq!(a.fetch_add(y, order), x);
599 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_add(y));
600 let a = <$atomic_type>::new(y);
601 assert_eq!(a.fetch_add(x, order), y);
602 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_add(x));
603 }
604 true
605 }
606 fn quickcheck_add(x: $int_type, y: $int_type) -> bool {
607 for &order in &test_helper::SWAP_ORDERINGS {
608 let a = <$atomic_type>::new(x);
609 a.add(y, order);
610 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_add(y));
611 let a = <$atomic_type>::new(y);
612 a.add(x, order);
613 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_add(x));
614 }
615 true
616 }
617 fn quickcheck_fetch_sub(x: $int_type, y: $int_type) -> bool {
618 for &order in &test_helper::SWAP_ORDERINGS {
619 let a = <$atomic_type>::new(x);
620 assert_eq!(a.fetch_sub(y, order), x);
621 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_sub(y));
622 let a = <$atomic_type>::new(y);
623 assert_eq!(a.fetch_sub(x, order), y);
624 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_sub(x));
625 }
626 true
627 }
628 fn quickcheck_sub(x: $int_type, y: $int_type) -> bool {
629 for &order in &test_helper::SWAP_ORDERINGS {
630 let a = <$atomic_type>::new(x);
631 a.sub(y, order);
632 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_sub(y));
633 let a = <$atomic_type>::new(y);
634 a.sub(x, order);
635 assert_eq!(a.load(Ordering::Relaxed), y.wrapping_sub(x));
636 }
637 true
638 }
639 fn quickcheck_fetch_and(x: $int_type, y: $int_type) -> bool {
640 for &order in &test_helper::SWAP_ORDERINGS {
641 let a = <$atomic_type>::new(x);
642 assert_eq!(a.fetch_and(y, order), x);
643 assert_eq!(a.load(Ordering::Relaxed), x & y);
644 let a = <$atomic_type>::new(y);
645 assert_eq!(a.fetch_and(x, order), y);
646 assert_eq!(a.load(Ordering::Relaxed), y & x);
647 }
648 true
649 }
650 fn quickcheck_and(x: $int_type, y: $int_type) -> bool {
651 for &order in &test_helper::SWAP_ORDERINGS {
652 let a = <$atomic_type>::new(x);
653 a.and(y, order);
654 assert_eq!(a.load(Ordering::Relaxed), x & y);
655 let a = <$atomic_type>::new(y);
656 a.and(x, order);
657 assert_eq!(a.load(Ordering::Relaxed), y & x);
658 }
659 true
660 }
661 fn quickcheck_fetch_nand(x: $int_type, y: $int_type) -> bool {
662 for &order in &test_helper::SWAP_ORDERINGS {
663 let a = <$atomic_type>::new(x);
664 assert_eq!(a.fetch_nand(y, order), x);
665 assert_eq!(a.load(Ordering::Relaxed), !(x & y));
666 let a = <$atomic_type>::new(y);
667 assert_eq!(a.fetch_nand(x, order), y);
668 assert_eq!(a.load(Ordering::Relaxed), !(y & x));
669 }
670 true
671 }
672 fn quickcheck_fetch_or(x: $int_type, y: $int_type) -> bool {
673 for &order in &test_helper::SWAP_ORDERINGS {
674 let a = <$atomic_type>::new(x);
675 assert_eq!(a.fetch_or(y, order), x);
676 assert_eq!(a.load(Ordering::Relaxed), x | y);
677 let a = <$atomic_type>::new(y);
678 assert_eq!(a.fetch_or(x, order), y);
679 assert_eq!(a.load(Ordering::Relaxed), y | x);
680 }
681 true
682 }
683 fn quickcheck_or(x: $int_type, y: $int_type) -> bool {
684 for &order in &test_helper::SWAP_ORDERINGS {
685 let a = <$atomic_type>::new(x);
686 a.or(y, order);
687 assert_eq!(a.load(Ordering::Relaxed), x | y);
688 let a = <$atomic_type>::new(y);
689 a.or(x, order);
690 assert_eq!(a.load(Ordering::Relaxed), y | x);
691 }
692 true
693 }
694 fn quickcheck_fetch_xor(x: $int_type, y: $int_type) -> bool {
695 for &order in &test_helper::SWAP_ORDERINGS {
696 let a = <$atomic_type>::new(x);
697 assert_eq!(a.fetch_xor(y, order), x);
698 assert_eq!(a.load(Ordering::Relaxed), x ^ y);
699 let a = <$atomic_type>::new(y);
700 assert_eq!(a.fetch_xor(x, order), y);
701 assert_eq!(a.load(Ordering::Relaxed), y ^ x);
702 }
703 true
704 }
705 fn quickcheck_xor(x: $int_type, y: $int_type) -> bool {
706 for &order in &test_helper::SWAP_ORDERINGS {
707 let a = <$atomic_type>::new(x);
708 a.xor(y, order);
709 assert_eq!(a.load(Ordering::Relaxed), x ^ y);
710 let a = <$atomic_type>::new(y);
711 a.xor(x, order);
712 assert_eq!(a.load(Ordering::Relaxed), y ^ x);
713 }
714 true
715 }
716 fn quickcheck_fetch_max(x: $int_type, y: $int_type) -> bool {
717 for &order in &test_helper::SWAP_ORDERINGS {
718 let a = <$atomic_type>::new(x);
719 assert_eq!(a.fetch_max(y, order), x);
720 assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(x, y));
721 let a = <$atomic_type>::new(y);
722 assert_eq!(a.fetch_max(x, order), y);
723 assert_eq!(a.load(Ordering::Relaxed), core::cmp::max(y, x));
724 }
725 true
726 }
727 fn quickcheck_fetch_min(x: $int_type, y: $int_type) -> bool {
728 for &order in &test_helper::SWAP_ORDERINGS {
729 let a = <$atomic_type>::new(x);
730 assert_eq!(a.fetch_min(y, order), x);
731 assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(x, y));
732 let a = <$atomic_type>::new(y);
733 assert_eq!(a.fetch_min(x, order), y);
734 assert_eq!(a.load(Ordering::Relaxed), core::cmp::min(y, x));
735 }
736 true
737 }
738 fn quickcheck_fetch_not(x: $int_type) -> bool {
739 for &order in &test_helper::SWAP_ORDERINGS {
740 let a = <$atomic_type>::new(x);
741 assert_eq!(a.fetch_not(order), x);
742 assert_eq!(a.load(Ordering::Relaxed), !x);
743 assert_eq!(a.fetch_not(order), !x);
744 assert_eq!(a.load(Ordering::Relaxed), x);
745 }
746 true
747 }
748 fn quickcheck_not(x: $int_type) -> bool {
749 for &order in &test_helper::SWAP_ORDERINGS {
750 let a = <$atomic_type>::new(x);
751 a.not(order);
752 assert_eq!(a.load(Ordering::Relaxed), !x);
753 a.not(order);
754 assert_eq!(a.load(Ordering::Relaxed), x);
755 }
756 true
757 }
758 fn quickcheck_fetch_neg(x: $int_type) -> bool {
759 for &order in &test_helper::SWAP_ORDERINGS {
760 let a = <$atomic_type>::new(x);
761 assert_eq!(a.fetch_neg(order), x);
762 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_neg());
763 assert_eq!(a.fetch_neg(order), x.wrapping_neg());
764 assert_eq!(a.load(Ordering::Relaxed), x);
765 }
766 true
767 }
768 fn quickcheck_neg(x: $int_type) -> bool {
769 for &order in &test_helper::SWAP_ORDERINGS {
770 let a = <$atomic_type>::new(x);
771 a.neg(order);
772 assert_eq!(a.load(Ordering::Relaxed), x.wrapping_neg());
773 a.neg(order);
774 assert_eq!(a.load(Ordering::Relaxed), x);
775 }
776 true
777 }
778 fn quickcheck_bit_set(x: $int_type, bit: u32) -> bool {
779 for &order in &test_helper::SWAP_ORDERINGS {
780 let a = <$atomic_type>::new(x);
781 let b = a.bit_set(bit, order);
782 let mask = <$int_type>::wrapping_shl(1, bit);
783 assert_eq!(a.load(Ordering::Relaxed), x | mask);
784 assert_eq!(b, x & mask != 0);
785 }
786 true
787 }
788 fn quickcheck_bit_clear(x: $int_type, bit: u32) -> bool {
789 for &order in &test_helper::SWAP_ORDERINGS {
790 let a = <$atomic_type>::new(x);
791 let b = a.bit_clear(bit, order);
792 let mask = <$int_type>::wrapping_shl(1, bit);
793 assert_eq!(a.load(Ordering::Relaxed), x & !mask);
794 assert_eq!(b, x & mask != 0);
795 }
796 true
797 }
798 fn quickcheck_bit_toggle(x: $int_type, bit: u32) -> bool {
799 for &order in &test_helper::SWAP_ORDERINGS {
800 let a = <$atomic_type>::new(x);
801 let b = a.bit_toggle(bit, order);
802 let mask = <$int_type>::wrapping_shl(1, bit);
803 assert_eq!(a.load(Ordering::Relaxed), x ^ mask);
804 assert_eq!(b, x & mask != 0);
805 }
806 true
807 }
808 }
809 };
810 ($atomic_type:ty, $int_type:ident) => {
811 __test_atomic_int!($atomic_type, $int_type, single_thread);
812
813 #[test]
814 fn stress_swap() {
815 let (iterations, threads) = stress_test_config();
816 let data1 = &(0..threads)
817 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
818 .collect::<Vec<_>>();
819 let data2 = &(0..threads)
820 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
821 .collect::<Vec<_>>();
822 let set = &data1
823 .iter()
824 .flat_map(|v| v.iter().copied())
825 .chain(data2.iter().flat_map(|v| v.iter().copied()))
826 .collect::<BTreeSet<_>>();
827 let a = &<$atomic_type>::new(data2[0][fastrand::usize(0..iterations)]);
828 let now = &std::time::Instant::now();
829 thread::scope(|s| {
830 for thread in 0..threads {
831 if thread % 2 == 0 {
832 s.spawn(move |_| {
833 let now = *now;
834 for i in 0..iterations {
835 a.store(data1[thread][i], rand_store_ordering());
836 }
837 std::eprintln!("store end={:?}", now.elapsed());
838 });
839 } else {
840 s.spawn(|_| {
841 let now = *now;
842 let mut v = vec![0; iterations];
843 for i in 0..iterations {
844 v[i] = a.load(rand_load_ordering());
845 }
846 std::eprintln!("load end={:?}", now.elapsed());
847 for v in v {
848 assert!(set.contains(&v), "v={}", v);
849 }
850 });
851 }
852 s.spawn(move |_| {
853 let now = *now;
854 let mut v = vec![0; iterations];
855 for i in 0..iterations {
856 v[i] = a.swap(data2[thread][i], rand_swap_ordering());
857 }
858 std::eprintln!("swap end={:?}", now.elapsed());
859 for v in v {
860 assert!(set.contains(&v), "v={}", v);
861 }
862 });
863 }
864 })
865 .unwrap();
866 }
867 #[test]
868 fn stress_compare_exchange() {
869 let (iterations, threads) = stress_test_config();
870 let data1 = &(0..threads)
871 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
872 .collect::<Vec<_>>();
873 let data2 = &(0..threads)
874 .map(|_| (0..iterations).map(|_| fastrand::$int_type(..)).collect::<Vec<_>>())
875 .collect::<Vec<_>>();
876 let set = &data1
877 .iter()
878 .flat_map(|v| v.iter().copied())
879 .chain(data2.iter().flat_map(|v| v.iter().copied()))
880 .collect::<BTreeSet<_>>();
881 let a = &<$atomic_type>::new(data2[0][fastrand::usize(0..iterations)]);
882 let now = &std::time::Instant::now();
883 thread::scope(|s| {
884 for thread in 0..threads {
885 s.spawn(move |_| {
886 let now = *now;
887 for i in 0..iterations {
888 a.store(data1[thread][i], rand_store_ordering());
889 }
890 std::eprintln!("store end={:?}", now.elapsed());
891 });
892 s.spawn(|_| {
893 let now = *now;
894 let mut v = vec![data2[0][0]; iterations];
895 for i in 0..iterations {
896 v[i] = a.load(rand_load_ordering());
897 }
898 std::eprintln!("load end={:?}", now.elapsed());
899 for v in v {
900 assert!(set.contains(&v), "v={}", v);
901 }
902 });
903 s.spawn(move |_| {
904 let now = *now;
905 let mut v = vec![data2[0][0]; iterations];
906 for i in 0..iterations {
907 let old = if i % 2 == 0 {
908 fastrand::$int_type(..)
909 } else {
910 a.load(Ordering::Relaxed)
911 };
912 let new = data2[thread][i];
913 let o = rand_compare_exchange_ordering();
914 match a.compare_exchange(old, new, o.0, o.1) {
915 Ok(r) => assert_eq!(old, r),
916 Err(r) => v[i] = r,
917 }
918 }
919 std::eprintln!("compare_exchange end={:?}", now.elapsed());
920 for v in v {
921 assert!(set.contains(&v), "v={}", v);
922 }
923 });
924 }
925 })
926 .unwrap();
927 }
928 };
929 }
930 macro_rules! __test_atomic_float {
931 ($atomic_type:ty, $float_type:ident, single_thread) => {
932 #[test]
933 fn swap() {
934 let a = <$atomic_type>::new(5.);
935 test_swap_ordering(|order| a.swap(5., order));
936 for &order in &test_helper::SWAP_ORDERINGS {
937 assert_eq!(a.swap(10., order), 5.);
938 assert_eq!(a.swap(5., order), 10.);
939 }
940 }
941 #[test]
942 fn compare_exchange() {
943 let a = <$atomic_type>::new(5.);
944 test_compare_exchange_ordering(|success, failure| {
945 a.compare_exchange(5., 5., success, failure)
946 });
947 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
948 let a = <$atomic_type>::new(5.);
949 assert_eq!(a.compare_exchange(5., 10., success, failure), Ok(5.));
950 assert_eq!(a.load(Ordering::Relaxed), 10.);
951 assert_eq!(a.compare_exchange(6., 12., success, failure), Err(10.));
952 assert_eq!(a.load(Ordering::Relaxed), 10.);
953 }
954 }
955 #[test]
956 fn compare_exchange_weak() {
957 let a = <$atomic_type>::new(4.);
958 test_compare_exchange_ordering(|success, failure| {
959 a.compare_exchange_weak(4., 4., success, failure)
960 });
961 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
962 let a = <$atomic_type>::new(4.);
963 assert_eq!(a.compare_exchange_weak(6., 8., success, failure), Err(4.));
964 let mut old = a.load(Ordering::Relaxed);
965 loop {
966 let new = old * 2.;
967 match a.compare_exchange_weak(old, new, success, failure) {
968 Ok(_) => break,
969 Err(x) => old = x,
970 }
971 }
972 assert_eq!(a.load(Ordering::Relaxed), 8.);
973 }
974 }
975 #[test]
976 fn fetch_add() {
977 let a = <$atomic_type>::new(0.);
978 test_swap_ordering(|order| a.fetch_add(0., order));
979 for &order in &test_helper::SWAP_ORDERINGS {
980 let a = <$atomic_type>::new(0.);
981 assert_eq!(a.fetch_add(10., order), 0.);
982 assert_eq!(a.load(Ordering::Relaxed), 10.);
983 let a = <$atomic_type>::new($float_type::MAX);
984 assert_eq!(a.fetch_add(1., order), $float_type::MAX);
985 assert_eq!(a.load(Ordering::Relaxed), $float_type::MAX + 1.);
986 }
987 }
988 #[test]
989 fn fetch_sub() {
990 let a = <$atomic_type>::new(20.);
991 test_swap_ordering(|order| a.fetch_sub(0., order));
992 for &order in &test_helper::SWAP_ORDERINGS {
993 let a = <$atomic_type>::new(20.);
994 assert_eq!(a.fetch_sub(10., order), 20.);
995 assert_eq!(a.load(Ordering::Relaxed), 10.);
996 let a = <$atomic_type>::new($float_type::MIN);
997 assert_eq!(a.fetch_sub(1., order), $float_type::MIN);
998 assert_eq!(a.load(Ordering::Relaxed), $float_type::MIN - 1.);
999 }
1000 }
1001 #[test]
1002 fn fetch_max() {
1003 let a = <$atomic_type>::new(23.);
1004 test_swap_ordering(|order| a.fetch_max(23., order));
1005 for &order in &test_helper::SWAP_ORDERINGS {
1006 let a = <$atomic_type>::new(23.);
1007 assert_eq!(a.fetch_max(22., order), 23.);
1008 assert_eq!(a.load(Ordering::Relaxed), 23.);
1009 assert_eq!(a.fetch_max(24., order), 23.);
1010 assert_eq!(a.load(Ordering::Relaxed), 24.);
1011 }
1012 }
1013 #[test]
1014 fn fetch_min() {
1015 let a = <$atomic_type>::new(23.);
1016 test_swap_ordering(|order| a.fetch_min(23., order));
1017 for &order in &test_helper::SWAP_ORDERINGS {
1018 let a = <$atomic_type>::new(23.);
1019 assert_eq!(a.fetch_min(24., order), 23.);
1020 assert_eq!(a.load(Ordering::Relaxed), 23.);
1021 assert_eq!(a.fetch_min(22., order), 23.);
1022 assert_eq!(a.load(Ordering::Relaxed), 22.);
1023 }
1024 }
1025 #[test]
1026 fn fetch_neg() {
1027 let a = <$atomic_type>::new(5.);
1028 test_swap_ordering(|order| a.fetch_neg(order));
1029 for &order in &test_helper::SWAP_ORDERINGS {
1030 let a = <$atomic_type>::new(5.);
1031 assert_eq!(a.fetch_neg(order), 5.);
1032 assert_eq!(a.load(Ordering::Relaxed), -5.);
1033 assert_eq!(a.fetch_neg(order), -5.);
1034 assert_eq!(a.load(Ordering::Relaxed), 5.);
1035 }
1036 }
1037 #[test]
1038 fn fetch_abs() {
1039 let a = <$atomic_type>::new(23.);
1040 test_swap_ordering(|order| a.fetch_abs(order));
1041 for &order in &test_helper::SWAP_ORDERINGS {
1042 let a = <$atomic_type>::new(-23.);
1043 assert_eq!(a.fetch_abs(order), -23.);
1044 assert_eq!(a.load(Ordering::Relaxed), 23.);
1045 assert_eq!(a.fetch_abs(order), 23.);
1046 assert_eq!(a.load(Ordering::Relaxed), 23.);
1047 }
1048 }
1049 ::quickcheck::quickcheck! {
1050 fn quickcheck_swap(x: $float_type, y: $float_type) -> bool {
1051 for &order in &test_helper::SWAP_ORDERINGS {
1052 let a = <$atomic_type>::new(x);
1053 assert_float_op_eq!(a.swap(y, order), x);
1054 assert_float_op_eq!(a.swap(x, order), y);
1055 }
1056 true
1057 }
1058 fn quickcheck_compare_exchange(x: $float_type, y: $float_type) -> bool {
1059 let z = loop {
1060 let z = fastrand::$float_type();
1061 if z != y {
1062 break z;
1063 }
1064 };
1065 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1066 let a = <$atomic_type>::new(x);
1067 assert_float_op_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
1068 assert_float_op_eq!(a.load(Ordering::Relaxed), y);
1069 assert_float_op_eq!(
1070 a.compare_exchange(z, x, success, failure).unwrap_err(),
1071 y,
1072 );
1073 assert_float_op_eq!(a.load(Ordering::Relaxed), y);
1074 }
1075 true
1076 }
1077 fn quickcheck_fetch_add(x: $float_type, y: $float_type) -> bool {
1078 if cfg!(all(not(debug_assertions), target_arch = "x86", not(target_feature = "sse2"))) {
1079 // TODO: rustc bug:
1080 // https://github.com/rust-lang/rust/issues/72327
1081 // https://github.com/rust-lang/rust/issues/73288
1082 return true;
1083 }
1084 for &order in &test_helper::SWAP_ORDERINGS {
1085 let a = <$atomic_type>::new(x);
1086 assert_float_op_eq!(a.fetch_add(y, order), x);
1087 assert_float_op_eq!(a.load(Ordering::Relaxed), x + y);
1088 let a = <$atomic_type>::new(y);
1089 assert_float_op_eq!(a.fetch_add(x, order), y);
1090 assert_float_op_eq!(a.load(Ordering::Relaxed), y + x);
1091 }
1092 true
1093 }
1094 fn quickcheck_fetch_sub(x: $float_type, y: $float_type) -> bool {
1095 if cfg!(all(not(debug_assertions), target_arch = "x86", not(target_feature = "sse2"))) {
1096 // TODO: rustc bug:
1097 // https://github.com/rust-lang/rust/issues/72327
1098 // https://github.com/rust-lang/rust/issues/73288
1099 return true;
1100 }
1101 for &order in &test_helper::SWAP_ORDERINGS {
1102 let a = <$atomic_type>::new(x);
1103 assert_float_op_eq!(a.fetch_sub(y, order), x);
1104 assert_float_op_eq!(a.load(Ordering::Relaxed), x - y);
1105 let a = <$atomic_type>::new(y);
1106 assert_float_op_eq!(a.fetch_sub(x, order), y);
1107 assert_float_op_eq!(a.load(Ordering::Relaxed), y - x);
1108 }
1109 true
1110 }
1111 fn quickcheck_fetch_max(x: $float_type, y: $float_type) -> bool {
1112 for &order in &test_helper::SWAP_ORDERINGS {
1113 let a = <$atomic_type>::new(x);
1114 assert_float_op_eq!(a.fetch_max(y, order), x);
1115 assert_float_op_eq!(a.load(Ordering::Relaxed), x.max(y));
1116 let a = <$atomic_type>::new(y);
1117 assert_float_op_eq!(a.fetch_max(x, order), y);
1118 assert_float_op_eq!(a.load(Ordering::Relaxed), y.max(x));
1119 }
1120 true
1121 }
1122 fn quickcheck_fetch_min(x: $float_type, y: $float_type) -> bool {
1123 for &order in &test_helper::SWAP_ORDERINGS {
1124 let a = <$atomic_type>::new(x);
1125 assert_float_op_eq!(a.fetch_min(y, order), x);
1126 assert_float_op_eq!(a.load(Ordering::Relaxed), x.min(y));
1127 let a = <$atomic_type>::new(y);
1128 assert_float_op_eq!(a.fetch_min(x, order), y);
1129 assert_float_op_eq!(a.load(Ordering::Relaxed), y.min(x));
1130 }
1131 true
1132 }
1133 fn quickcheck_fetch_neg(x: $float_type) -> bool {
1134 for &order in &test_helper::SWAP_ORDERINGS {
1135 let a = <$atomic_type>::new(x);
1136 assert_float_op_eq!(a.fetch_neg(order), x);
1137 assert_float_op_eq!(a.load(Ordering::Relaxed), -x);
1138 assert_float_op_eq!(a.fetch_neg(order), -x);
1139 assert_float_op_eq!(a.load(Ordering::Relaxed), x);
1140 }
1141 true
1142 }
1143 fn quickcheck_fetch_abs(x: $float_type) -> bool {
1144 for &order in &test_helper::SWAP_ORDERINGS {
1145 let a = <$atomic_type>::new(x);
1146 assert_float_op_eq!(a.fetch_abs(order), x);
1147 assert_float_op_eq!(a.fetch_abs(order), x.abs());
1148 assert_float_op_eq!(a.load(Ordering::Relaxed), x.abs());
1149 }
1150 true
1151 }
1152 }
1153 };
1154 ($atomic_type:ty, $float_type:ident) => {
1155 __test_atomic_float!($atomic_type, $float_type, single_thread);
1156 // TODO: multi thread
1157 };
1158 }
1159 macro_rules! __test_atomic_bool {
1160 ($atomic_type:ty, single_thread) => {
1161 #[test]
1162 fn swap() {
1163 let a = <$atomic_type>::new(true);
1164 test_swap_ordering(|order| a.swap(true, order));
1165 for &order in &test_helper::SWAP_ORDERINGS {
1166 assert_eq!(a.swap(true, order), true);
1167 assert_eq!(a.swap(false, order), true);
1168 assert_eq!(a.swap(false, order), false);
1169 assert_eq!(a.swap(true, order), false);
1170 }
1171 }
1172 #[test]
1173 fn compare_exchange() {
1174 let a = <$atomic_type>::new(true);
1175 test_compare_exchange_ordering(|success, failure| {
1176 a.compare_exchange(true, true, success, failure)
1177 });
1178 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1179 let a = <$atomic_type>::new(true);
1180 assert_eq!(a.compare_exchange(true, false, success, failure), Ok(true));
1181 assert_eq!(a.load(Ordering::Relaxed), false);
1182 assert_eq!(a.compare_exchange(true, true, success, failure), Err(false));
1183 assert_eq!(a.load(Ordering::Relaxed), false);
1184 }
1185 }
1186 #[test]
1187 fn compare_exchange_weak() {
1188 let a = <$atomic_type>::new(false);
1189 test_compare_exchange_ordering(|success, failure| {
1190 a.compare_exchange_weak(false, false, success, failure)
1191 });
1192 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1193 let a = <$atomic_type>::new(false);
1194 assert_eq!(a.compare_exchange_weak(true, true, success, failure), Err(false));
1195 let mut old = a.load(Ordering::Relaxed);
1196 let new = true;
1197 loop {
1198 match a.compare_exchange_weak(old, new, success, failure) {
1199 Ok(_) => break,
1200 Err(x) => old = x,
1201 }
1202 }
1203 assert_eq!(a.load(Ordering::Relaxed), true);
1204 }
1205 }
1206 #[test]
1207 fn fetch_and() {
1208 let a = <$atomic_type>::new(true);
1209 test_swap_ordering(|order| assert_eq!(a.fetch_and(true, order), true));
1210 for &order in &test_helper::SWAP_ORDERINGS {
1211 let a = <$atomic_type>::new(true);
1212 assert_eq!(a.fetch_and(false, order), true);
1213 assert_eq!(a.load(Ordering::Relaxed), false);
1214 let a = <$atomic_type>::new(true);
1215 assert_eq!(a.fetch_and(true, order), true);
1216 assert_eq!(a.load(Ordering::Relaxed), true);
1217 let a = <$atomic_type>::new(false);
1218 assert_eq!(a.fetch_and(false, order), false);
1219 assert_eq!(a.load(Ordering::Relaxed), false);
1220 let a = <$atomic_type>::new(false);
1221 assert_eq!(a.fetch_and(true, order), false);
1222 assert_eq!(a.load(Ordering::Relaxed), false);
1223 }
1224 }
1225 #[test]
1226 fn and() {
1227 let a = <$atomic_type>::new(true);
1228 test_swap_ordering(|order| a.and(true, order));
1229 for &order in &test_helper::SWAP_ORDERINGS {
1230 let a = <$atomic_type>::new(true);
1231 a.and(false, order);
1232 assert_eq!(a.load(Ordering::Relaxed), false);
1233 let a = <$atomic_type>::new(true);
1234 a.and(true, order);
1235 assert_eq!(a.load(Ordering::Relaxed), true);
1236 let a = <$atomic_type>::new(false);
1237 a.and(false, order);
1238 assert_eq!(a.load(Ordering::Relaxed), false);
1239 let a = <$atomic_type>::new(false);
1240 a.and(true, order);
1241 assert_eq!(a.load(Ordering::Relaxed), false);
1242 }
1243 }
1244 #[test]
1245 fn fetch_or() {
1246 let a = <$atomic_type>::new(true);
1247 test_swap_ordering(|order| assert_eq!(a.fetch_or(false, order), true));
1248 for &order in &test_helper::SWAP_ORDERINGS {
1249 let a = <$atomic_type>::new(true);
1250 assert_eq!(a.fetch_or(false, order), true);
1251 assert_eq!(a.load(Ordering::Relaxed), true);
1252 let a = <$atomic_type>::new(true);
1253 assert_eq!(a.fetch_or(true, order), true);
1254 assert_eq!(a.load(Ordering::Relaxed), true);
1255 let a = <$atomic_type>::new(false);
1256 assert_eq!(a.fetch_or(false, order), false);
1257 assert_eq!(a.load(Ordering::Relaxed), false);
1258 let a = <$atomic_type>::new(false);
1259 assert_eq!(a.fetch_or(true, order), false);
1260 assert_eq!(a.load(Ordering::Relaxed), true);
1261 }
1262 }
1263 #[test]
1264 fn or() {
1265 let a = <$atomic_type>::new(true);
1266 test_swap_ordering(|order| a.or(false, order));
1267 for &order in &test_helper::SWAP_ORDERINGS {
1268 let a = <$atomic_type>::new(true);
1269 a.or(false, order);
1270 assert_eq!(a.load(Ordering::Relaxed), true);
1271 let a = <$atomic_type>::new(true);
1272 a.or(true, order);
1273 assert_eq!(a.load(Ordering::Relaxed), true);
1274 let a = <$atomic_type>::new(false);
1275 a.or(false, order);
1276 assert_eq!(a.load(Ordering::Relaxed), false);
1277 let a = <$atomic_type>::new(false);
1278 a.or(true, order);
1279 assert_eq!(a.load(Ordering::Relaxed), true);
1280 }
1281 }
1282 #[test]
1283 fn fetch_xor() {
1284 let a = <$atomic_type>::new(true);
1285 test_swap_ordering(|order| assert_eq!(a.fetch_xor(false, order), true));
1286 for &order in &test_helper::SWAP_ORDERINGS {
1287 let a = <$atomic_type>::new(true);
1288 assert_eq!(a.fetch_xor(false, order), true);
1289 assert_eq!(a.load(Ordering::Relaxed), true);
1290 let a = <$atomic_type>::new(true);
1291 assert_eq!(a.fetch_xor(true, order), true);
1292 assert_eq!(a.load(Ordering::Relaxed), false);
1293 let a = <$atomic_type>::new(false);
1294 assert_eq!(a.fetch_xor(false, order), false);
1295 assert_eq!(a.load(Ordering::Relaxed), false);
1296 let a = <$atomic_type>::new(false);
1297 assert_eq!(a.fetch_xor(true, order), false);
1298 assert_eq!(a.load(Ordering::Relaxed), true);
1299 }
1300 }
1301 #[test]
1302 fn xor() {
1303 let a = <$atomic_type>::new(true);
1304 test_swap_ordering(|order| a.xor(false, order));
1305 for &order in &test_helper::SWAP_ORDERINGS {
1306 let a = <$atomic_type>::new(true);
1307 a.xor(false, order);
1308 assert_eq!(a.load(Ordering::Relaxed), true);
1309 let a = <$atomic_type>::new(true);
1310 a.xor(true, order);
1311 assert_eq!(a.load(Ordering::Relaxed), false);
1312 let a = <$atomic_type>::new(false);
1313 a.xor(false, order);
1314 assert_eq!(a.load(Ordering::Relaxed), false);
1315 let a = <$atomic_type>::new(false);
1316 a.xor(true, order);
1317 assert_eq!(a.load(Ordering::Relaxed), true);
1318 }
1319 }
1320 ::quickcheck::quickcheck! {
1321 fn quickcheck_compare_exchange(x: bool, y: bool) -> bool {
1322 let z = !y;
1323 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1324 let a = <$atomic_type>::new(x);
1325 assert_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
1326 assert_eq!(a.load(Ordering::Relaxed), y);
1327 assert_eq!(a.compare_exchange(z, x, success, failure).unwrap_err(), y);
1328 assert_eq!(a.load(Ordering::Relaxed), y);
1329 }
1330 true
1331 }
1332 }
1333 };
1334 ($atomic_type:ty) => {
1335 __test_atomic_bool!($atomic_type, single_thread);
1336 // TODO: multi thread
1337 };
1338 }
1339 macro_rules! __test_atomic_ptr {
1340 ($atomic_type:ty, single_thread) => {
1341 #[test]
1342 fn swap() {
1343 let a = <$atomic_type>::new(ptr::null_mut());
1344 test_swap_ordering(|order| a.swap(ptr::null_mut(), order));
1345 let x = &mut 1;
1346 for &order in &test_helper::SWAP_ORDERINGS {
1347 assert_eq!(a.swap(x, order), ptr::null_mut());
1348 assert_eq!(a.swap(ptr::null_mut(), order), x as *mut _);
1349 }
1350 }
1351 #[test]
1352 fn compare_exchange() {
1353 let a = <$atomic_type>::new(ptr::null_mut());
1354 test_compare_exchange_ordering(|success, failure| {
1355 a.compare_exchange(ptr::null_mut(), ptr::null_mut(), success, failure)
1356 });
1357 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1358 let a = <$atomic_type>::new(ptr::null_mut());
1359 let x = &mut 1;
1360 assert_eq!(
1361 a.compare_exchange(ptr::null_mut(), x, success, failure),
1362 Ok(ptr::null_mut()),
1363 );
1364 assert_eq!(a.load(Ordering::Relaxed), x as *mut _);
1365 assert_eq!(
1366 a.compare_exchange(ptr::null_mut(), ptr::null_mut(), success, failure),
1367 Err(x as *mut _),
1368 );
1369 assert_eq!(a.load(Ordering::Relaxed), x as *mut _);
1370 }
1371 }
1372 #[test]
1373 fn compare_exchange_weak() {
1374 let a = <$atomic_type>::new(ptr::null_mut());
1375 test_compare_exchange_ordering(|success, failure| {
1376 a.compare_exchange_weak(ptr::null_mut(), ptr::null_mut(), success, failure)
1377 });
1378 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1379 let a = <$atomic_type>::new(ptr::null_mut());
1380 let x = &mut 1;
1381 assert_eq!(a.compare_exchange_weak(x, x, success, failure), Err(ptr::null_mut()));
1382 let mut old = a.load(Ordering::Relaxed);
1383 loop {
1384 match a.compare_exchange_weak(old, x, success, failure) {
1385 Ok(_) => break,
1386 Err(x) => old = x,
1387 }
1388 }
1389 assert_eq!(a.load(Ordering::Relaxed), x as *mut _);
1390 }
1391 }
1392 ::quickcheck::quickcheck! {
1393 fn quickcheck_swap(x: usize, y: usize) -> bool {
1394 let x = sptr::invalid_mut(x);
1395 let y = sptr::invalid_mut(y);
1396 for &order in &test_helper::SWAP_ORDERINGS {
1397 let a = <$atomic_type>::new(x);
1398 assert_eq!(a.swap(y, order), x);
1399 assert_eq!(a.swap(x, order), y);
1400 }
1401 true
1402 }
1403 fn quickcheck_compare_exchange(x: usize, y: usize) -> bool {
1404 let z = loop {
1405 let z = fastrand::usize(..);
1406 if z != y {
1407 break z;
1408 }
1409 };
1410 let x = sptr::invalid_mut(x);
1411 let y = sptr::invalid_mut(y);
1412 let z = sptr::invalid_mut(z);
1413 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1414 let a = <$atomic_type>::new(x);
1415 assert_eq!(a.compare_exchange(x, y, success, failure).unwrap(), x);
1416 assert_eq!(a.load(Ordering::Relaxed), y);
1417 assert_eq!(a.compare_exchange(z, x, success, failure).unwrap_err(), y);
1418 assert_eq!(a.load(Ordering::Relaxed), y);
1419 }
1420 true
1421 }
1422 }
1423 };
1424 ($atomic_type:ty) => {
1425 __test_atomic_ptr!($atomic_type, single_thread);
1426 // TODO: multi thread
1427 };
1428 }
1429
1430 macro_rules! __test_atomic_int_pub {
1431 ($atomic_type:ty, $int_type:ident) => {
1432 __test_atomic_pub_common!($atomic_type, $int_type);
1433 use std::{boxed::Box, mem};
1434 #[test]
1435 fn fetch_update() {
1436 let a = <$atomic_type>::new(7);
1437 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1438 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1439 let a = <$atomic_type>::new(7);
1440 assert_eq!(a.fetch_update(success, failure, |_| None), Err(7));
1441 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1)), Ok(7));
1442 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1)), Ok(8));
1443 assert_eq!(a.load(Ordering::SeqCst), 9);
1444 }
1445 }
1446 #[test]
1447 fn impls() {
1448 #[cfg(not(portable_atomic_no_const_transmute))]
1449 const _: $int_type = {
1450 let a = <$atomic_type>::new(10);
1451 a.into_inner()
1452 };
1453 let a = <$atomic_type>::default();
1454 let b = <$atomic_type>::from(0);
1455 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1456 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
1457 assert_eq!(a.into_inner(), 0);
1458 assert_eq!(b.into_inner(), 0);
1459
1460 unsafe {
1461 let ptr: *mut Align16<$int_type> = Box::into_raw(Box::new(Align16(0)));
1462 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1463 {
1464 let a = <$atomic_type>::from_ptr(ptr.cast::<$int_type>());
1465 *a.as_ptr() = 1;
1466 }
1467 assert_eq!((*ptr).0, 1);
1468 drop(Box::from_raw(ptr));
1469 }
1470 }
1471 ::quickcheck::quickcheck! {
1472 fn quickcheck_fetch_update(x: $int_type, y: $int_type) -> bool {
1473 let z = loop {
1474 let z = fastrand::$int_type(..);
1475 if z != y {
1476 break z;
1477 }
1478 };
1479 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1480 let a = <$atomic_type>::new(x);
1481 assert_eq!(
1482 a.fetch_update(success, failure, |_| Some(y))
1483 .unwrap(),
1484 x
1485 );
1486 assert_eq!(
1487 a.fetch_update(success, failure, |_| Some(z))
1488 .unwrap(),
1489 y
1490 );
1491 assert_eq!(a.load(Ordering::Relaxed), z);
1492 assert_eq!(
1493 a.fetch_update(success, failure, |z| if z == y { Some(z) } else { None })
1494 .unwrap_err(),
1495 z
1496 );
1497 assert_eq!(a.load(Ordering::Relaxed), z);
1498 }
1499 true
1500 }
1501 }
1502 };
1503 }
1504 macro_rules! __test_atomic_float_pub {
1505 ($atomic_type:ty, $float_type:ident) => {
1506 __test_atomic_pub_common!($atomic_type, $float_type);
1507 use std::{boxed::Box, mem};
1508 #[test]
1509 fn fetch_update() {
1510 let a = <$atomic_type>::new(7.);
1511 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1512 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1513 let a = <$atomic_type>::new(7.);
1514 assert_eq!(a.fetch_update(success, failure, |_| None), Err(7.));
1515 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1.)), Ok(7.));
1516 assert_eq!(a.fetch_update(success, failure, |x| Some(x + 1.)), Ok(8.));
1517 assert_eq!(a.load(Ordering::SeqCst), 9.);
1518 }
1519 }
1520 #[test]
1521 fn impls() {
1522 #[cfg(not(portable_atomic_no_const_transmute))]
1523 const _: $float_type = {
1524 let a = <$atomic_type>::new(10.);
1525 a.into_inner()
1526 };
1527 let a = <$atomic_type>::default();
1528 let b = <$atomic_type>::from(0.);
1529 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1530 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
1531 assert_eq!(a.into_inner(), 0.);
1532 assert_eq!(b.into_inner(), 0.);
1533
1534 unsafe {
1535 let ptr: *mut Align16<$float_type> = Box::into_raw(Box::new(Align16(0.)));
1536 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1537 {
1538 let a = <$atomic_type>::from_ptr(ptr.cast::<$float_type>());
1539 *a.as_ptr() = 1.;
1540 }
1541 assert_eq!((*ptr).0, 1.);
1542 drop(Box::from_raw(ptr));
1543 }
1544 }
1545 };
1546 }
1547 macro_rules! __test_atomic_bool_pub {
1548 ($atomic_type:ty) => {
1549 __test_atomic_pub_common!($atomic_type, bool);
1550 use std::{boxed::Box, mem};
1551 #[test]
1552 fn fetch_nand() {
1553 let a = <$atomic_type>::new(true);
1554 test_swap_ordering(|order| assert_eq!(a.fetch_nand(false, order), true));
1555 for &order in &test_helper::SWAP_ORDERINGS {
1556 let a = <$atomic_type>::new(true);
1557 assert_eq!(a.fetch_nand(false, order), true);
1558 assert_eq!(a.load(Ordering::Relaxed), true);
1559 let a = <$atomic_type>::new(true);
1560 assert_eq!(a.fetch_nand(true, order), true);
1561 assert_eq!(a.load(Ordering::Relaxed) as usize, 0);
1562 assert_eq!(a.load(Ordering::Relaxed), false);
1563 let a = <$atomic_type>::new(false);
1564 assert_eq!(a.fetch_nand(false, order), false);
1565 assert_eq!(a.load(Ordering::Relaxed), true);
1566 let a = <$atomic_type>::new(false);
1567 assert_eq!(a.fetch_nand(true, order), false);
1568 assert_eq!(a.load(Ordering::Relaxed), true);
1569 }
1570 }
1571 #[test]
1572 fn fetch_not() {
1573 let a = <$atomic_type>::new(true);
1574 test_swap_ordering(|order| a.fetch_not(order));
1575 for &order in &test_helper::SWAP_ORDERINGS {
1576 let a = <$atomic_type>::new(true);
1577 assert_eq!(a.fetch_not(order), true);
1578 assert_eq!(a.load(Ordering::Relaxed), false);
1579 let a = <$atomic_type>::new(false);
1580 assert_eq!(a.fetch_not(order), false);
1581 assert_eq!(a.load(Ordering::Relaxed), true);
1582 }
1583 }
1584 #[test]
1585 fn not() {
1586 let a = <$atomic_type>::new(true);
1587 test_swap_ordering(|order| a.fetch_not(order));
1588 for &order in &test_helper::SWAP_ORDERINGS {
1589 let a = <$atomic_type>::new(true);
1590 a.not(order);
1591 assert_eq!(a.load(Ordering::Relaxed), false);
1592 let a = <$atomic_type>::new(false);
1593 a.not(order);
1594 assert_eq!(a.load(Ordering::Relaxed), true);
1595 }
1596 }
1597 #[test]
1598 fn fetch_update() {
1599 let a = <$atomic_type>::new(false);
1600 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1601 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1602 let a = <$atomic_type>::new(false);
1603 assert_eq!(a.fetch_update(success, failure, |_| None), Err(false));
1604 assert_eq!(a.fetch_update(success, failure, |x| Some(!x)), Ok(false));
1605 assert_eq!(a.fetch_update(success, failure, |x| Some(!x)), Ok(true));
1606 assert_eq!(a.load(Ordering::SeqCst), false);
1607 }
1608 }
1609 #[test]
1610 fn impls() {
1611 #[cfg(not(portable_atomic_no_const_transmute))]
1612 const _: bool = {
1613 let a = <$atomic_type>::new(true);
1614 a.into_inner()
1615 };
1616 let a = <$atomic_type>::default();
1617 let b = <$atomic_type>::from(false);
1618 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1619 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
1620 assert_eq!(a.into_inner(), false);
1621 assert_eq!(b.into_inner(), false);
1622
1623 unsafe {
1624 let ptr: *mut bool = Box::into_raw(Box::new(false));
1625 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1626 {
1627 let a = <$atomic_type>::from_ptr(ptr);
1628 *a.as_ptr() = true;
1629 }
1630 assert_eq!((*ptr), true);
1631 drop(Box::from_raw(ptr));
1632 }
1633 }
1634 };
1635 }
1636 macro_rules! __test_atomic_ptr_pub {
1637 ($atomic_type:ty) => {
1638 __test_atomic_pub_common!($atomic_type, *mut u8);
1639 use sptr::Strict;
1640 use std::{boxed::Box, mem};
1641 #[test]
1642 fn fetch_update() {
1643 let a = <$atomic_type>::new(ptr::null_mut());
1644 test_compare_exchange_ordering(|set, fetch| a.fetch_update(set, fetch, |x| Some(x)));
1645 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
1646 let a = <$atomic_type>::new(ptr::null_mut());
1647 assert_eq!(a.fetch_update(success, failure, |_| None), Err(ptr::null_mut()));
1648 assert_eq!(
1649 a.fetch_update(success, failure, |_| Some(&a as *const _ as *mut _)),
1650 Ok(ptr::null_mut())
1651 );
1652 assert_eq!(a.load(Ordering::SeqCst), &a as *const _ as *mut _);
1653 }
1654 }
1655 #[test]
1656 fn impls() {
1657 #[cfg(not(portable_atomic_no_const_transmute))]
1658 const _: *mut u8 = {
1659 let a = <$atomic_type>::new(ptr::null_mut());
1660 a.into_inner()
1661 };
1662 let a = <$atomic_type>::default();
1663 let b = <$atomic_type>::from(ptr::null_mut());
1664 assert_eq!(a.load(Ordering::SeqCst), b.load(Ordering::SeqCst));
1665 assert_eq!(std::format!("{:?}", a), std::format!("{:?}", a.load(Ordering::SeqCst)));
1666 assert_eq!(std::format!("{:p}", a), std::format!("{:p}", a.load(Ordering::SeqCst)));
1667 assert_eq!(a.into_inner(), ptr::null_mut());
1668 assert_eq!(b.into_inner(), ptr::null_mut());
1669
1670 unsafe {
1671 let ptr: *mut Align16<*mut u8> = Box::into_raw(Box::new(Align16(ptr::null_mut())));
1672 assert!(ptr as usize % mem::align_of::<$atomic_type>() == 0);
1673 {
1674 let a = <$atomic_type>::from_ptr(ptr.cast::<*mut u8>());
1675 *a.as_ptr() = ptr::null_mut::<u8>().wrapping_add(1);
1676 }
1677 assert_eq!((*ptr).0, ptr::null_mut::<u8>().wrapping_add(1));
1678 drop(Box::from_raw(ptr));
1679 }
1680 }
1681 // https://github.com/rust-lang/rust/blob/1.80.0/library/core/tests/atomic.rs#L130-L213
1682 #[test]
1683 fn ptr_add_null() {
1684 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1685 assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst).addr(), 0);
1686 assert_eq!(atom.load(Ordering::SeqCst).addr(), 8);
1687
1688 assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst).addr(), 8);
1689 assert_eq!(atom.load(Ordering::SeqCst).addr(), 9);
1690
1691 assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst).addr(), 9);
1692 assert_eq!(atom.load(Ordering::SeqCst).addr(), 1);
1693
1694 assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst).addr(), 1);
1695 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0);
1696 }
1697 #[test]
1698 fn ptr_add_data() {
1699 let num = 0i64;
1700 let n = &num as *const i64 as *mut _;
1701 let atom = AtomicPtr::<i64>::new(n);
1702 assert_eq!(atom.fetch_ptr_add(1, Ordering::SeqCst), n);
1703 assert_eq!(atom.load(Ordering::SeqCst), n.wrapping_add(1));
1704
1705 assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst), n.wrapping_add(1));
1706 assert_eq!(atom.load(Ordering::SeqCst), n);
1707 #[allow(clippy::cast_ptr_alignment)]
1708 let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();
1709
1710 assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst), n);
1711 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(1));
1712
1713 assert_eq!(atom.fetch_byte_add(5, Ordering::SeqCst), bytes_from_n(1));
1714 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(6));
1715
1716 assert_eq!(atom.fetch_byte_sub(1, Ordering::SeqCst), bytes_from_n(6));
1717 assert_eq!(atom.load(Ordering::SeqCst), bytes_from_n(5));
1718
1719 assert_eq!(atom.fetch_byte_sub(5, Ordering::SeqCst), bytes_from_n(5));
1720 assert_eq!(atom.load(Ordering::SeqCst), n);
1721 }
1722 #[test]
1723 fn ptr_bitops() {
1724 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1725 assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst).addr(), 0);
1726 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0111);
1727
1728 assert_eq!(atom.fetch_and(0b1101, Ordering::SeqCst).addr(), 0b0111);
1729 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b0101);
1730
1731 assert_eq!(atom.fetch_xor(0b1111, Ordering::SeqCst).addr(), 0b0101);
1732 assert_eq!(atom.load(Ordering::SeqCst).addr(), 0b1010);
1733 }
1734 #[test]
1735 fn ptr_bitops_tagging() {
1736 const MASK_TAG: usize = 0b1111;
1737 const MASK_PTR: usize = !MASK_TAG;
1738
1739 #[repr(align(16))]
1740 struct Tagme(#[allow(dead_code)] u128);
1741
1742 let tagme = Tagme(1000);
1743 let ptr = &tagme as *const Tagme as *mut Tagme;
1744 let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
1745
1746 assert_eq!(ptr.addr() & MASK_TAG, 0);
1747
1748 assert_eq!(atom.fetch_or(0b0111, Ordering::SeqCst), ptr);
1749 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b111));
1750
1751 assert_eq!(
1752 atom.fetch_and(MASK_PTR | 0b0010, Ordering::SeqCst),
1753 ptr.map_addr(|a| a | 0b111)
1754 );
1755 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010));
1756
1757 assert_eq!(atom.fetch_xor(0b1011, Ordering::SeqCst), ptr.map_addr(|a| a | 0b0010));
1758 assert_eq!(atom.load(Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001));
1759
1760 assert_eq!(atom.fetch_and(MASK_PTR, Ordering::SeqCst), ptr.map_addr(|a| a | 0b1001));
1761 assert_eq!(atom.load(Ordering::SeqCst), ptr);
1762 }
1763 #[test]
1764 fn bit_set() {
1765 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>().map_addr(|a| a | 1));
1766 test_swap_ordering(|order| assert!(a.bit_set(0, order)));
1767 for &order in &test_helper::SWAP_ORDERINGS {
1768 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1769 let atom = <$atomic_type>::new(pointer);
1770 // Tag the bottom bit of the pointer.
1771 assert!(!atom.bit_set(0, order));
1772 // Extract and untag.
1773 let tagged = atom.load(Ordering::Relaxed);
1774 assert_eq!(tagged.addr() & 1, 1);
1775 assert_eq!(tagged.map_addr(|p| p & !1), pointer);
1776 }
1777 }
1778 #[test]
1779 fn bit_clear() {
1780 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>());
1781 test_swap_ordering(|order| assert!(!a.bit_clear(0, order)));
1782 for &order in &test_helper::SWAP_ORDERINGS {
1783 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1784 // A tagged pointer
1785 let atom = <$atomic_type>::new(pointer.map_addr(|a| a | 1));
1786 assert!(atom.bit_set(0, order));
1787 // Untag
1788 assert!(atom.bit_clear(0, order));
1789 }
1790 }
1791 #[test]
1792 fn bit_toggle() {
1793 let a = <$atomic_type>::new(ptr::null_mut::<u64>().cast::<u8>());
1794 test_swap_ordering(|order| a.bit_toggle(0, order));
1795 for &order in &test_helper::SWAP_ORDERINGS {
1796 let pointer = &mut 1u64 as *mut u64 as *mut u8;
1797 let atom = <$atomic_type>::new(pointer);
1798 // Toggle a tag bit on the pointer.
1799 atom.bit_toggle(0, order);
1800 assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
1801 }
1802 }
1803 };
1804 }
1805
1806 macro_rules! test_atomic_int_load_store {
1807 ($int_type:ident) => {
1808 paste::paste! {
1809 #[allow(
1810 clippy::alloc_instead_of_core,
1811 clippy::std_instead_of_alloc,
1812 clippy::std_instead_of_core,
1813 clippy::undocumented_unsafe_blocks
1814 )]
1815 mod [<test_atomic_ $int_type>] {
1816 use super::*;
1817 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1818 }
1819 }
1820 };
1821 }
1822 macro_rules! test_atomic_ptr_load_store {
1823 () => {
1824 #[allow(
1825 clippy::alloc_instead_of_core,
1826 clippy::std_instead_of_alloc,
1827 clippy::std_instead_of_core,
1828 clippy::undocumented_unsafe_blocks
1829 )]
1830 mod test_atomic_ptr {
1831 use super::*;
1832 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1833 }
1834 };
1835 }
1836
1837 macro_rules! test_atomic_int_single_thread {
1838 ($int_type:ident) => {
1839 paste::paste! {
1840 #[allow(
1841 clippy::alloc_instead_of_core,
1842 clippy::std_instead_of_alloc,
1843 clippy::std_instead_of_core,
1844 clippy::undocumented_unsafe_blocks
1845 )]
1846 mod [<test_atomic_ $int_type>] {
1847 use super::*;
1848 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type, single_thread);
1849 __test_atomic_int!([<Atomic $int_type:camel>], $int_type, single_thread);
1850 }
1851 }
1852 };
1853 }
1854 macro_rules! test_atomic_ptr_single_thread {
1855 () => {
1856 #[allow(
1857 clippy::alloc_instead_of_core,
1858 clippy::std_instead_of_alloc,
1859 clippy::std_instead_of_core,
1860 clippy::undocumented_unsafe_blocks
1861 )]
1862 mod test_atomic_ptr {
1863 use super::*;
1864 __test_atomic_ptr_load_store!(AtomicPtr<u8>, single_thread);
1865 __test_atomic_ptr!(AtomicPtr<u8>, single_thread);
1866 }
1867 };
1868 }
1869
1870 macro_rules! test_atomic_int {
1871 ($int_type:ident) => {
1872 paste::paste! {
1873 #[allow(
1874 clippy::alloc_instead_of_core,
1875 clippy::std_instead_of_alloc,
1876 clippy::std_instead_of_core,
1877 clippy::undocumented_unsafe_blocks
1878 )]
1879 mod [<test_atomic_ $int_type>] {
1880 use super::*;
1881 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1882 __test_atomic_int!([<Atomic $int_type:camel>], $int_type);
1883 }
1884 }
1885 };
1886 }
1887 macro_rules! test_atomic_ptr {
1888 () => {
1889 #[allow(
1890 clippy::alloc_instead_of_core,
1891 clippy::std_instead_of_alloc,
1892 clippy::std_instead_of_core,
1893 clippy::undocumented_unsafe_blocks
1894 )]
1895 #[allow(unstable_name_collisions)] // for sptr crate
1896 mod test_atomic_ptr {
1897 use super::*;
1898 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1899 __test_atomic_ptr!(AtomicPtr<u8>);
1900 }
1901 };
1902 }
1903
1904 macro_rules! test_atomic_int_pub {
1905 ($int_type:ident) => {
1906 paste::paste! {
1907 #[allow(
1908 clippy::alloc_instead_of_core,
1909 clippy::std_instead_of_alloc,
1910 clippy::std_instead_of_core,
1911 clippy::undocumented_unsafe_blocks
1912 )]
1913 mod [<test_atomic_ $int_type>] {
1914 use super::*;
1915 __test_atomic_int_load_store!([<Atomic $int_type:camel>], $int_type);
1916 __test_atomic_int!([<Atomic $int_type:camel>], $int_type);
1917 __test_atomic_int_pub!([<Atomic $int_type:camel>], $int_type);
1918 }
1919 }
1920 };
1921 }
1922 #[cfg(feature = "float")]
1923 macro_rules! test_atomic_float_pub {
1924 ($float_type:ident) => {
1925 paste::paste! {
1926 #[allow(
1927 clippy::alloc_instead_of_core,
1928 clippy::float_arithmetic,
1929 clippy::std_instead_of_alloc,
1930 clippy::std_instead_of_core,
1931 clippy::undocumented_unsafe_blocks
1932 )]
1933 mod [<test_atomic_ $float_type>] {
1934 use super::*;
1935 __test_atomic_float_load_store!([<Atomic $float_type:camel>], $float_type);
1936 __test_atomic_float!([<Atomic $float_type:camel>], $float_type);
1937 __test_atomic_float_pub!([<Atomic $float_type:camel>], $float_type);
1938 }
1939 }
1940 };
1941 }
1942 macro_rules! test_atomic_bool_pub {
1943 () => {
1944 #[allow(
1945 clippy::alloc_instead_of_core,
1946 clippy::std_instead_of_alloc,
1947 clippy::std_instead_of_core,
1948 clippy::undocumented_unsafe_blocks
1949 )]
1950 mod test_atomic_bool {
1951 use super::*;
1952 __test_atomic_bool_load_store!(AtomicBool);
1953 __test_atomic_bool!(AtomicBool);
1954 __test_atomic_bool_pub!(AtomicBool);
1955 }
1956 };
1957 }
1958 macro_rules! test_atomic_ptr_pub {
1959 () => {
1960 #[allow(
1961 clippy::alloc_instead_of_core,
1962 clippy::std_instead_of_alloc,
1963 clippy::std_instead_of_core,
1964 clippy::undocumented_unsafe_blocks
1965 )]
1966 #[allow(unstable_name_collisions)] // for sptr crate
1967 mod test_atomic_ptr {
1968 use super::*;
1969 __test_atomic_ptr_load_store!(AtomicPtr<u8>);
1970 __test_atomic_ptr!(AtomicPtr<u8>);
1971 __test_atomic_ptr_pub!(AtomicPtr<u8>);
1972 }
1973 };
1974 }
1975
1976 // Asserts that `$a` and `$b` have performed equivalent operations.
1977 #[cfg(feature = "float")]
1978 macro_rules! assert_float_op_eq {
1979 ($a:expr, $b:expr $(,)?) => {{
1980 // See also:
1981 // - https://github.com/rust-lang/unsafe-code-guidelines/issues/237.
1982 // - https://github.com/rust-lang/portable-simd/issues/39.
1983 let a = $a;
1984 let b = $b;
1985 if a.is_nan() && b.is_nan() // don't check sign of NaN: https://github.com/rust-lang/rust/issues/55131
1986 || a.is_infinite()
1987 && b.is_infinite()
1988 && a.is_sign_positive() == b.is_sign_positive()
1989 && a.is_sign_negative() == b.is_sign_negative()
1990 {
1991 // ok
1992 } else {
1993 assert_eq!(a, b);
1994 }
1995 }};
1996 }
1997
1998 #[allow(unused_unsafe)] // for old rustc
1999 #[cfg_attr(not(portable_atomic_no_track_caller), track_caller)]
assert_panic<T: std::fmt::Debug>(f: impl FnOnce() -> T) -> std::string::String2000 pub(crate) fn assert_panic<T: std::fmt::Debug>(f: impl FnOnce() -> T) -> std::string::String {
2001 let backtrace = std::env::var_os("RUST_BACKTRACE");
2002 let hook = std::panic::take_hook();
2003 // set_var/remove_var is fine as we run tests with RUST_TEST_THREADS=1
2004 // std::panic::set_backtrace_style is better way here, but is unstable.
2005 unsafe { std::env::set_var("RUST_BACKTRACE", "0") } // Suppress backtrace
2006 std::panic::set_hook(std::boxed::Box::new(|_| {})); // Suppress panic msg
2007 let res = std::panic::catch_unwind(std::panic::AssertUnwindSafe(f));
2008 std::panic::set_hook(hook);
2009 match backtrace {
2010 Some(v) => unsafe { std::env::set_var("RUST_BACKTRACE", v) },
2011 None => unsafe { std::env::remove_var("RUST_BACKTRACE") },
2012 }
2013 let msg = res.unwrap_err();
2014 msg.downcast_ref::<std::string::String>()
2015 .cloned()
2016 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into())
2017 }
rand_load_ordering() -> Ordering2018 pub(crate) fn rand_load_ordering() -> Ordering {
2019 test_helper::LOAD_ORDERINGS[fastrand::usize(0..test_helper::LOAD_ORDERINGS.len())]
2020 }
test_load_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T)2021 pub(crate) fn test_load_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2022 for &order in &test_helper::LOAD_ORDERINGS {
2023 f(order);
2024 }
2025
2026 if !skip_should_panic_test() {
2027 assert_eq!(
2028 assert_panic(|| f(Ordering::Release)),
2029 "there is no such thing as a release load"
2030 );
2031 assert_eq!(
2032 assert_panic(|| f(Ordering::AcqRel)),
2033 "there is no such thing as an acquire-release load"
2034 );
2035 }
2036 }
rand_store_ordering() -> Ordering2037 pub(crate) fn rand_store_ordering() -> Ordering {
2038 test_helper::STORE_ORDERINGS[fastrand::usize(0..test_helper::STORE_ORDERINGS.len())]
2039 }
test_store_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T)2040 pub(crate) fn test_store_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2041 for &order in &test_helper::STORE_ORDERINGS {
2042 f(order);
2043 }
2044
2045 if !skip_should_panic_test() {
2046 assert_eq!(
2047 assert_panic(|| f(Ordering::Acquire)),
2048 "there is no such thing as an acquire store"
2049 );
2050 assert_eq!(
2051 assert_panic(|| f(Ordering::AcqRel)),
2052 "there is no such thing as an acquire-release store"
2053 );
2054 }
2055 }
rand_compare_exchange_ordering() -> (Ordering, Ordering)2056 pub(crate) fn rand_compare_exchange_ordering() -> (Ordering, Ordering) {
2057 test_helper::COMPARE_EXCHANGE_ORDERINGS
2058 [fastrand::usize(0..test_helper::COMPARE_EXCHANGE_ORDERINGS.len())]
2059 }
test_compare_exchange_ordering<T: std::fmt::Debug>( f: impl Fn(Ordering, Ordering) -> T, )2060 pub(crate) fn test_compare_exchange_ordering<T: std::fmt::Debug>(
2061 f: impl Fn(Ordering, Ordering) -> T,
2062 ) {
2063 for &(success, failure) in &test_helper::COMPARE_EXCHANGE_ORDERINGS {
2064 f(success, failure);
2065 }
2066
2067 if !skip_should_panic_test() {
2068 for &order in &test_helper::SWAP_ORDERINGS {
2069 let msg = assert_panic(|| f(order, Ordering::AcqRel));
2070 assert!(
2071 msg == "there is no such thing as an acquire-release failure ordering"
2072 || msg == "there is no such thing as an acquire-release load",
2073 "{}",
2074 msg
2075 );
2076 let msg = assert_panic(|| f(order, Ordering::Release));
2077 assert!(
2078 msg == "there is no such thing as a release failure ordering"
2079 || msg == "there is no such thing as a release load",
2080 "{}",
2081 msg
2082 );
2083 }
2084 }
2085 }
rand_swap_ordering() -> Ordering2086 pub(crate) fn rand_swap_ordering() -> Ordering {
2087 test_helper::SWAP_ORDERINGS[fastrand::usize(0..test_helper::SWAP_ORDERINGS.len())]
2088 }
test_swap_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T)2089 pub(crate) fn test_swap_ordering<T: std::fmt::Debug>(f: impl Fn(Ordering) -> T) {
2090 for &order in &test_helper::SWAP_ORDERINGS {
2091 f(order);
2092 }
2093 }
2094 // for stress test generated by __test_atomic_* macros
stress_test_config() -> (usize, usize)2095 pub(crate) fn stress_test_config() -> (usize, usize) {
2096 let iterations = if cfg!(miri) {
2097 50
2098 } else if cfg!(debug_assertions) {
2099 5_000
2100 } else {
2101 25_000
2102 };
2103 let threads = if cfg!(debug_assertions) { 2 } else { fastrand::usize(2..=8) };
2104 std::eprintln!("threads={}", threads);
2105 (iterations, threads)
2106 }
skip_should_panic_test() -> bool2107 fn skip_should_panic_test() -> bool {
2108 // Miri's panic handling is slow
2109 // MSAN false positive: https://gist.github.com/taiki-e/dd6269a8ffec46284fdc764a4849f884
2110 is_panic_abort()
2111 || cfg!(miri)
2112 || option_env!("CARGO_PROFILE_RELEASE_LTO").map_or(false, |v| v == "fat")
2113 && build_context::SANITIZE.contains("memory")
2114 }
2115
2116 // For -C panic=abort -Z panic_abort_tests: https://github.com/rust-lang/rust/issues/67650
is_panic_abort() -> bool2117 fn is_panic_abort() -> bool {
2118 build_context::PANIC.contains("abort")
2119 }
2120
2121 #[repr(C, align(16))]
2122 pub(crate) struct Align16<T>(pub(crate) T);
2123
2124 // Test the cases that should not fail if the memory ordering is implemented correctly.
2125 // This is still not exhaustive and only tests a few cases.
2126 // This currently only supports 32-bit or more integers.
2127 macro_rules! __stress_test_acquire_release {
2128 (should_pass, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2129 paste::paste! {
2130 #[test]
2131 #[allow(clippy::cast_possible_truncation)]
2132 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2133 __stress_test_acquire_release!([<Atomic $int_type:camel>],
2134 $int_type, $write, $load_order, $store_order);
2135 }
2136 }
2137 };
2138 (can_panic, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2139 paste::paste! {
2140 // Currently, to make this test work well enough outside of Miri, tens of thousands
2141 // of iterations are needed, but this test is slow in some environments.
2142 // So, ignore on non-Miri environments by default. See also catch_unwind_on_weak_memory_arch.
2143 #[test]
2144 #[cfg_attr(not(miri), ignore)]
2145 #[allow(clippy::cast_possible_truncation)]
2146 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2147 can_panic("a=", || __stress_test_acquire_release!([<Atomic $int_type:camel>],
2148 $int_type, $write, $load_order, $store_order));
2149 }
2150 }
2151 };
2152 ($atomic_type:ident, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {{
2153 use super::*;
2154 use crossbeam_utils::thread;
2155 use std::{
2156 convert::TryFrom,
2157 sync::atomic::{AtomicUsize, Ordering},
2158 };
2159 let mut n: usize = if cfg!(miri) { 10 } else { 50_000 };
2160 // This test is relatively fast because it spawns only one thread, but
2161 // the iterations are limited to a maximum value of integers.
2162 if $int_type::try_from(n).is_err() {
2163 n = $int_type::MAX as usize;
2164 }
2165 let a = &$atomic_type::new(0);
2166 let b = &AtomicUsize::new(0);
2167 thread::scope(|s| {
2168 s.spawn(|_| {
2169 for i in 0..n {
2170 b.store(i, Ordering::Relaxed);
2171 a.$write(i as $int_type, Ordering::$store_order);
2172 }
2173 });
2174 loop {
2175 let a = a.load(Ordering::$load_order);
2176 let b = b.load(Ordering::Relaxed);
2177 assert!(a as usize <= b, "a={},b={}", a, b);
2178 if a as usize == n - 1 {
2179 break;
2180 }
2181 }
2182 })
2183 .unwrap();
2184 }};
2185 }
2186 macro_rules! __stress_test_seqcst {
2187 (should_pass, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2188 paste::paste! {
2189 // Currently, to make this test work well enough outside of Miri, tens of thousands
2190 // of iterations are needed, but this test is very slow in some environments because
2191 // it creates two threads for each iteration.
2192 // So, ignore on QEMU by default.
2193 #[test]
2194 #[cfg_attr(qemu, ignore)]
2195 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2196 __stress_test_seqcst!([<Atomic $int_type:camel>],
2197 $write, $load_order, $store_order);
2198 }
2199 }
2200 };
2201 (can_panic, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
2202 paste::paste! {
2203 // Currently, to make this test work well enough outside of Miri, tens of thousands
2204 // of iterations are needed, but this test is very slow in some environments because
2205 // it creates two threads for each iteration.
2206 // So, ignore on non-Miri environments by default. See also catch_unwind_on_non_seqcst_arch.
2207 #[test]
2208 #[cfg_attr(not(miri), ignore)]
2209 fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
2210 can_panic("c=2", || __stress_test_seqcst!([<Atomic $int_type:camel>],
2211 $write, $load_order, $store_order));
2212 }
2213 }
2214 };
2215 ($atomic_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {{
2216 use super::*;
2217 use crossbeam_utils::thread;
2218 use std::sync::atomic::{AtomicUsize, Ordering};
2219 let n: usize = if cfg!(miri) {
2220 8
2221 } else if cfg!(valgrind)
2222 || build_context::SANITIZE.contains("address")
2223 || build_context::SANITIZE.contains("memory")
2224 {
2225 50
2226 } else if option_env!("GITHUB_ACTIONS").is_some() && cfg!(not(target_os = "linux")) {
2227 // GitHub Actions' macOS and Windows runners are slow.
2228 5_000
2229 } else {
2230 50_000
2231 };
2232 let a = &$atomic_type::new(0);
2233 let b = &$atomic_type::new(0);
2234 let c = &AtomicUsize::new(0);
2235 let ready = &AtomicUsize::new(0);
2236 thread::scope(|s| {
2237 for n in 0..n {
2238 a.store(0, Ordering::Relaxed);
2239 b.store(0, Ordering::Relaxed);
2240 c.store(0, Ordering::Relaxed);
2241 let h_a = s.spawn(|_| {
2242 while ready.load(Ordering::Relaxed) == 0 {}
2243 a.$write(1, Ordering::$store_order);
2244 if b.load(Ordering::$load_order) == 0 {
2245 c.fetch_add(1, Ordering::Relaxed);
2246 }
2247 });
2248 let h_b = s.spawn(|_| {
2249 while ready.load(Ordering::Relaxed) == 0 {}
2250 b.$write(1, Ordering::$store_order);
2251 if a.load(Ordering::$load_order) == 0 {
2252 c.fetch_add(1, Ordering::Relaxed);
2253 }
2254 });
2255 ready.store(1, Ordering::Relaxed);
2256 h_a.join().unwrap();
2257 h_b.join().unwrap();
2258 let c = c.load(Ordering::Relaxed);
2259 assert!(c == 0 || c == 1, "c={},n={}", c, n);
2260 }
2261 })
2262 .unwrap();
2263 }};
2264 }
2265 // Catches unwinding panic on architectures with weak memory models.
2266 #[allow(dead_code)]
catch_unwind_on_weak_memory_arch(pat: &str, f: impl Fn())2267 pub(crate) fn catch_unwind_on_weak_memory_arch(pat: &str, f: impl Fn()) {
2268 // With x86 TSO, RISC-V TSO (optional, not default), SPARC TSO (optional, default),
2269 // and IBM-370 memory models should never be a panic here.
2270 // Miri emulates weak memory models regardless of target architectures.
2271 if cfg!(all(
2272 any(
2273 target_arch = "x86",
2274 target_arch = "x86_64",
2275 target_arch = "s390x",
2276 target_arch = "sparc",
2277 target_arch = "sparc64",
2278 ),
2279 not(any(miri)),
2280 )) {
2281 f();
2282 } else if !is_panic_abort() {
2283 // This could be is_err on architectures with weak memory models.
2284 // However, this does not necessarily mean that it will always be panic,
2285 // and implementing it with stronger orderings is also okay.
2286 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
2287 Ok(()) => {
2288 // panic!();
2289 }
2290 Err(msg) => {
2291 let msg = msg
2292 .downcast_ref::<std::string::String>()
2293 .cloned()
2294 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into());
2295 assert!(msg.contains(pat), "{}", msg);
2296 }
2297 }
2298 }
2299 }
2300 // Catches unwinding panic on architectures with non-sequentially consistent memory models.
2301 #[allow(dead_code)]
catch_unwind_on_non_seqcst_arch(pat: &str, f: impl Fn())2302 pub(crate) fn catch_unwind_on_non_seqcst_arch(pat: &str, f: impl Fn()) {
2303 if !is_panic_abort() {
2304 // This could be Err on architectures with non-sequentially consistent memory models.
2305 // However, this does not necessarily mean that it will always be panic,
2306 // and implementing it with stronger orderings is also okay.
2307 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
2308 Ok(()) => {
2309 // panic!();
2310 }
2311 Err(msg) => {
2312 let msg = msg
2313 .downcast_ref::<std::string::String>()
2314 .cloned()
2315 .unwrap_or_else(|| msg.downcast_ref::<&'static str>().copied().unwrap().into());
2316 assert!(msg.contains(pat), "{}", msg);
2317 }
2318 }
2319 }
2320 }
2321 macro_rules! stress_test_load_store {
2322 ($int_type:ident) => {
2323 // debug mode is slow.
2324 #[cfg(any(not(debug_assertions), miri))]
2325 paste::paste! {
2326 #[allow(
2327 clippy::alloc_instead_of_core,
2328 clippy::std_instead_of_alloc,
2329 clippy::std_instead_of_core,
2330 clippy::undocumented_unsafe_blocks
2331 )]
2332 mod [<stress_acquire_release_load_store_ $int_type>] {
2333 use crate::tests::helper::catch_unwind_on_weak_memory_arch as can_panic;
2334 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, Relaxed);
2335 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, Release);
2336 __stress_test_acquire_release!(can_panic, $int_type, store, Relaxed, SeqCst);
2337 __stress_test_acquire_release!(can_panic, $int_type, store, Acquire, Relaxed);
2338 __stress_test_acquire_release!(should_pass, $int_type, store, Acquire, Release);
2339 __stress_test_acquire_release!(should_pass, $int_type, store, Acquire, SeqCst);
2340 __stress_test_acquire_release!(can_panic, $int_type, store, SeqCst, Relaxed);
2341 __stress_test_acquire_release!(should_pass, $int_type, store, SeqCst, Release);
2342 __stress_test_acquire_release!(should_pass, $int_type, store, SeqCst, SeqCst);
2343 }
2344 #[allow(
2345 clippy::alloc_instead_of_core,
2346 clippy::std_instead_of_alloc,
2347 clippy::std_instead_of_core,
2348 clippy::undocumented_unsafe_blocks
2349 )]
2350 mod [<stress_seqcst_load_store_ $int_type>] {
2351 use crate::tests::helper::catch_unwind_on_non_seqcst_arch as can_panic;
2352 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, Relaxed);
2353 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, Release);
2354 __stress_test_seqcst!(can_panic, $int_type, store, Relaxed, SeqCst);
2355 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, Relaxed);
2356 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, Release);
2357 __stress_test_seqcst!(can_panic, $int_type, store, Acquire, SeqCst);
2358 __stress_test_seqcst!(can_panic, $int_type, store, SeqCst, Relaxed);
2359 __stress_test_seqcst!(can_panic, $int_type, store, SeqCst, Release);
2360 __stress_test_seqcst!(should_pass, $int_type, store, SeqCst, SeqCst);
2361 }
2362 }
2363 };
2364 }
2365 macro_rules! stress_test {
2366 ($int_type:ident) => {
2367 stress_test_load_store!($int_type);
2368 // debug mode is slow.
2369 #[cfg(any(not(debug_assertions), miri))]
2370 paste::paste! {
2371 #[allow(
2372 clippy::alloc_instead_of_core,
2373 clippy::std_instead_of_alloc,
2374 clippy::std_instead_of_core,
2375 clippy::undocumented_unsafe_blocks
2376 )]
2377 mod [<stress_acquire_release_load_swap_ $int_type>] {
2378 use crate::tests::helper::catch_unwind_on_weak_memory_arch as can_panic;
2379 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Relaxed);
2380 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Acquire);
2381 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, Release);
2382 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, AcqRel);
2383 __stress_test_acquire_release!(can_panic, $int_type, swap, Relaxed, SeqCst);
2384 __stress_test_acquire_release!(can_panic, $int_type, swap, Acquire, Relaxed);
2385 __stress_test_acquire_release!(can_panic, $int_type, swap, Acquire, Acquire);
2386 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, Release);
2387 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, AcqRel);
2388 __stress_test_acquire_release!(should_pass, $int_type, swap, Acquire, SeqCst);
2389 __stress_test_acquire_release!(can_panic, $int_type, swap, SeqCst, Relaxed);
2390 __stress_test_acquire_release!(can_panic, $int_type, swap, SeqCst, Acquire);
2391 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, Release);
2392 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, AcqRel);
2393 __stress_test_acquire_release!(should_pass, $int_type, swap, SeqCst, SeqCst);
2394 }
2395 #[allow(
2396 clippy::alloc_instead_of_core,
2397 clippy::std_instead_of_alloc,
2398 clippy::std_instead_of_core,
2399 clippy::undocumented_unsafe_blocks
2400 )]
2401 mod [<stress_seqcst_load_swap_ $int_type>] {
2402 use crate::tests::helper::catch_unwind_on_non_seqcst_arch as can_panic;
2403 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Relaxed);
2404 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Acquire);
2405 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, Release);
2406 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, AcqRel);
2407 __stress_test_seqcst!(can_panic, $int_type, swap, Relaxed, SeqCst);
2408 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Relaxed);
2409 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Acquire);
2410 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, Release);
2411 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, AcqRel);
2412 __stress_test_seqcst!(can_panic, $int_type, swap, Acquire, SeqCst);
2413 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Relaxed);
2414 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Acquire);
2415 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, Release);
2416 __stress_test_seqcst!(can_panic, $int_type, swap, SeqCst, AcqRel);
2417 __stress_test_seqcst!(should_pass, $int_type, swap, SeqCst, SeqCst);
2418 }
2419 }
2420 };
2421 }
2422