1 use core::sync::atomic::Ordering::SeqCst;
2 use core::sync::atomic::*;
3
4 #[test]
bool_()5 fn bool_() {
6 let a = AtomicBool::new(false);
7 assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
8 assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Err(true));
9
10 a.store(false, SeqCst);
11 assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
12 }
13
14 #[test]
bool_and()15 fn bool_and() {
16 let a = AtomicBool::new(true);
17 assert_eq!(a.fetch_and(false, SeqCst), true);
18 assert_eq!(a.load(SeqCst), false);
19 }
20
21 #[test]
bool_nand()22 fn bool_nand() {
23 let a = AtomicBool::new(false);
24 assert_eq!(a.fetch_nand(false, SeqCst), false);
25 assert_eq!(a.load(SeqCst), true);
26 assert_eq!(a.fetch_nand(false, SeqCst), true);
27 assert_eq!(a.load(SeqCst), true);
28 assert_eq!(a.fetch_nand(true, SeqCst), true);
29 assert_eq!(a.load(SeqCst), false);
30 assert_eq!(a.fetch_nand(true, SeqCst), false);
31 assert_eq!(a.load(SeqCst), true);
32 }
33
34 #[test]
uint_and()35 fn uint_and() {
36 let x = AtomicUsize::new(0xf731);
37 assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
38 assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
39 }
40
41 #[test]
uint_nand()42 fn uint_nand() {
43 let x = AtomicUsize::new(0xf731);
44 assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
45 assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
46 }
47
48 #[test]
uint_or()49 fn uint_or() {
50 let x = AtomicUsize::new(0xf731);
51 assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
52 assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
53 }
54
55 #[test]
uint_xor()56 fn uint_xor() {
57 let x = AtomicUsize::new(0xf731);
58 assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
59 assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
60 }
61
62 #[test]
63 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
uint_min()64 fn uint_min() {
65 let x = AtomicUsize::new(0xf731);
66 assert_eq!(x.fetch_min(0x137f, SeqCst), 0xf731);
67 assert_eq!(x.load(SeqCst), 0x137f);
68 assert_eq!(x.fetch_min(0xf731, SeqCst), 0x137f);
69 assert_eq!(x.load(SeqCst), 0x137f);
70 }
71
72 #[test]
73 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
uint_max()74 fn uint_max() {
75 let x = AtomicUsize::new(0x137f);
76 assert_eq!(x.fetch_max(0xf731, SeqCst), 0x137f);
77 assert_eq!(x.load(SeqCst), 0xf731);
78 assert_eq!(x.fetch_max(0x137f, SeqCst), 0xf731);
79 assert_eq!(x.load(SeqCst), 0xf731);
80 }
81
82 #[test]
int_and()83 fn int_and() {
84 let x = AtomicIsize::new(0xf731);
85 assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
86 assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
87 }
88
89 #[test]
int_nand()90 fn int_nand() {
91 let x = AtomicIsize::new(0xf731);
92 assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
93 assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
94 }
95
96 #[test]
int_or()97 fn int_or() {
98 let x = AtomicIsize::new(0xf731);
99 assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
100 assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
101 }
102
103 #[test]
int_xor()104 fn int_xor() {
105 let x = AtomicIsize::new(0xf731);
106 assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
107 assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
108 }
109
110 #[test]
111 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
int_min()112 fn int_min() {
113 let x = AtomicIsize::new(0xf731);
114 assert_eq!(x.fetch_min(0x137f, SeqCst), 0xf731);
115 assert_eq!(x.load(SeqCst), 0x137f);
116 assert_eq!(x.fetch_min(0xf731, SeqCst), 0x137f);
117 assert_eq!(x.load(SeqCst), 0x137f);
118 }
119
120 #[test]
121 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
int_max()122 fn int_max() {
123 let x = AtomicIsize::new(0x137f);
124 assert_eq!(x.fetch_max(0xf731, SeqCst), 0x137f);
125 assert_eq!(x.load(SeqCst), 0xf731);
126 assert_eq!(x.fetch_max(0x137f, SeqCst), 0xf731);
127 assert_eq!(x.load(SeqCst), 0xf731);
128 }
129
130 #[test]
131 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
ptr_add_null()132 fn ptr_add_null() {
133 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
134 assert_eq!(atom.fetch_ptr_add(1, SeqCst).addr(), 0);
135 assert_eq!(atom.load(SeqCst).addr(), 8);
136
137 assert_eq!(atom.fetch_byte_add(1, SeqCst).addr(), 8);
138 assert_eq!(atom.load(SeqCst).addr(), 9);
139
140 assert_eq!(atom.fetch_ptr_sub(1, SeqCst).addr(), 9);
141 assert_eq!(atom.load(SeqCst).addr(), 1);
142
143 assert_eq!(atom.fetch_byte_sub(1, SeqCst).addr(), 1);
144 assert_eq!(atom.load(SeqCst).addr(), 0);
145 }
146
147 #[test]
148 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
ptr_add_data()149 fn ptr_add_data() {
150 let num = 0i64;
151 let n = &num as *const i64 as *mut _;
152 let atom = AtomicPtr::<i64>::new(n);
153 assert_eq!(atom.fetch_ptr_add(1, SeqCst), n);
154 assert_eq!(atom.load(SeqCst), n.wrapping_add(1));
155
156 assert_eq!(atom.fetch_ptr_sub(1, SeqCst), n.wrapping_add(1));
157 assert_eq!(atom.load(SeqCst), n);
158 let bytes_from_n = |b| n.wrapping_byte_add(b);
159
160 assert_eq!(atom.fetch_byte_add(1, SeqCst), n);
161 assert_eq!(atom.load(SeqCst), bytes_from_n(1));
162
163 assert_eq!(atom.fetch_byte_add(5, SeqCst), bytes_from_n(1));
164 assert_eq!(atom.load(SeqCst), bytes_from_n(6));
165
166 assert_eq!(atom.fetch_byte_sub(1, SeqCst), bytes_from_n(6));
167 assert_eq!(atom.load(SeqCst), bytes_from_n(5));
168
169 assert_eq!(atom.fetch_byte_sub(5, SeqCst), bytes_from_n(5));
170 assert_eq!(atom.load(SeqCst), n);
171 }
172
173 #[test]
174 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
ptr_bitops()175 fn ptr_bitops() {
176 let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
177 assert_eq!(atom.fetch_or(0b0111, SeqCst).addr(), 0);
178 assert_eq!(atom.load(SeqCst).addr(), 0b0111);
179
180 assert_eq!(atom.fetch_and(0b1101, SeqCst).addr(), 0b0111);
181 assert_eq!(atom.load(SeqCst).addr(), 0b0101);
182
183 assert_eq!(atom.fetch_xor(0b1111, SeqCst).addr(), 0b0101);
184 assert_eq!(atom.load(SeqCst).addr(), 0b1010);
185 }
186
187 #[test]
188 #[cfg(any(not(target_arch = "arm"), target_os = "linux"))] // Missing intrinsic in compiler-builtins
ptr_bitops_tagging()189 fn ptr_bitops_tagging() {
190 #[repr(align(16))]
191 struct Tagme(u128);
192
193 let tagme = Tagme(1000);
194 let ptr = &tagme as *const Tagme as *mut Tagme;
195 let atom: AtomicPtr<Tagme> = AtomicPtr::new(ptr);
196
197 const MASK_TAG: usize = 0b1111;
198 const MASK_PTR: usize = !MASK_TAG;
199
200 assert_eq!(ptr.addr() & MASK_TAG, 0);
201
202 assert_eq!(atom.fetch_or(0b0111, SeqCst), ptr);
203 assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b111));
204
205 assert_eq!(atom.fetch_and(MASK_PTR | 0b0010, SeqCst), ptr.map_addr(|a| a | 0b111));
206 assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b0010));
207
208 assert_eq!(atom.fetch_xor(0b1011, SeqCst), ptr.map_addr(|a| a | 0b0010));
209 assert_eq!(atom.load(SeqCst), ptr.map_addr(|a| a | 0b1001));
210
211 assert_eq!(atom.fetch_and(MASK_PTR, SeqCst), ptr.map_addr(|a| a | 0b1001));
212 assert_eq!(atom.load(SeqCst), ptr);
213 }
214
215 static S_FALSE: AtomicBool = AtomicBool::new(false);
216 static S_TRUE: AtomicBool = AtomicBool::new(true);
217 static S_INT: AtomicIsize = AtomicIsize::new(0);
218 static S_UINT: AtomicUsize = AtomicUsize::new(0);
219
220 #[test]
static_init()221 fn static_init() {
222 // Note that we're not really testing the mutability here but it's important
223 // on Android at the moment (#49775)
224 assert!(!S_FALSE.swap(true, SeqCst));
225 assert!(S_TRUE.swap(false, SeqCst));
226 assert!(S_INT.fetch_add(1, SeqCst) == 0);
227 assert!(S_UINT.fetch_add(1, SeqCst) == 0);
228 }
229
230 #[test]
atomic_access_bool()231 fn atomic_access_bool() {
232 static mut ATOMIC: AtomicBool = AtomicBool::new(false);
233
234 unsafe {
235 assert_eq!(*ATOMIC.get_mut(), false);
236 ATOMIC.store(true, SeqCst);
237 assert_eq!(*ATOMIC.get_mut(), true);
238 ATOMIC.fetch_or(false, SeqCst);
239 assert_eq!(*ATOMIC.get_mut(), true);
240 ATOMIC.fetch_and(false, SeqCst);
241 assert_eq!(*ATOMIC.get_mut(), false);
242 ATOMIC.fetch_nand(true, SeqCst);
243 assert_eq!(*ATOMIC.get_mut(), true);
244 ATOMIC.fetch_xor(true, SeqCst);
245 assert_eq!(*ATOMIC.get_mut(), false);
246 }
247 }
248
249 #[test]
atomic_alignment()250 fn atomic_alignment() {
251 use std::mem::{align_of, size_of};
252
253 #[cfg(target_has_atomic = "8")]
254 assert_eq!(align_of::<AtomicBool>(), size_of::<AtomicBool>());
255 #[cfg(target_has_atomic = "ptr")]
256 assert_eq!(align_of::<AtomicPtr<u8>>(), size_of::<AtomicPtr<u8>>());
257 #[cfg(target_has_atomic = "8")]
258 assert_eq!(align_of::<AtomicU8>(), size_of::<AtomicU8>());
259 #[cfg(target_has_atomic = "8")]
260 assert_eq!(align_of::<AtomicI8>(), size_of::<AtomicI8>());
261 #[cfg(target_has_atomic = "16")]
262 assert_eq!(align_of::<AtomicU16>(), size_of::<AtomicU16>());
263 #[cfg(target_has_atomic = "16")]
264 assert_eq!(align_of::<AtomicI16>(), size_of::<AtomicI16>());
265 #[cfg(target_has_atomic = "32")]
266 assert_eq!(align_of::<AtomicU32>(), size_of::<AtomicU32>());
267 #[cfg(target_has_atomic = "32")]
268 assert_eq!(align_of::<AtomicI32>(), size_of::<AtomicI32>());
269 #[cfg(target_has_atomic = "64")]
270 assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
271 #[cfg(target_has_atomic = "64")]
272 assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
273 #[cfg(target_has_atomic = "128")]
274 assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
275 #[cfg(target_has_atomic = "128")]
276 assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
277 #[cfg(target_has_atomic = "ptr")]
278 assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
279 #[cfg(target_has_atomic = "ptr")]
280 assert_eq!(align_of::<AtomicIsize>(), size_of::<AtomicIsize>());
281 }
282
283 #[test]
atomic_compare_exchange()284 fn atomic_compare_exchange() {
285 use Ordering::*;
286
287 static ATOMIC: AtomicIsize = AtomicIsize::new(0);
288
289 ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed).ok();
290 ATOMIC.compare_exchange(0, 1, Acquire, Relaxed).ok();
291 ATOMIC.compare_exchange(0, 1, Release, Relaxed).ok();
292 ATOMIC.compare_exchange(0, 1, AcqRel, Relaxed).ok();
293 ATOMIC.compare_exchange(0, 1, SeqCst, Relaxed).ok();
294 ATOMIC.compare_exchange(0, 1, Acquire, Acquire).ok();
295 ATOMIC.compare_exchange(0, 1, AcqRel, Acquire).ok();
296 ATOMIC.compare_exchange(0, 1, SeqCst, Acquire).ok();
297 ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();
298 ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed).ok();
299 ATOMIC.compare_exchange_weak(0, 1, Acquire, Relaxed).ok();
300 ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed).ok();
301 ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
302 ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
303 ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();
304 ATOMIC.compare_exchange_weak(0, 1, AcqRel, Acquire).ok();
305 ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok();
306 ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok();
307 }
308
309 /* FIXME(#110395)
310 #[test]
311 fn atomic_const_from() {
312 const _ATOMIC_U8: AtomicU8 = AtomicU8::from(1);
313 const _ATOMIC_BOOL: AtomicBool = AtomicBool::from(true);
314 const _ATOMIC_PTR: AtomicPtr<u32> = AtomicPtr::from(core::ptr::null_mut());
315 }
316 */
317