1 use std::sync::atomic::AtomicUsize;
2 use std::sync::atomic::Ordering::SeqCst;
3
4 use crossbeam_utils::atomic::AtomicCell;
5
6 #[test]
is_lock_free()7 fn is_lock_free() {
8 struct UsizeWrap(usize);
9 struct U8Wrap(bool);
10 struct I16Wrap(i16);
11
12 assert_eq!(AtomicCell::<usize>::is_lock_free(), true);
13 assert_eq!(AtomicCell::<isize>::is_lock_free(), true);
14 assert_eq!(AtomicCell::<UsizeWrap>::is_lock_free(), true);
15
16 assert_eq!(AtomicCell::<u8>::is_lock_free(), cfg!(has_atomic_u8));
17 assert_eq!(AtomicCell::<bool>::is_lock_free(), cfg!(has_atomic_u8));
18 assert_eq!(AtomicCell::<U8Wrap>::is_lock_free(), cfg!(has_atomic_u8));
19
20 assert_eq!(AtomicCell::<I16Wrap>::is_lock_free(), cfg!(has_atomic_u16));
21
22 assert_eq!(AtomicCell::<u128>::is_lock_free(), cfg!(has_atomic_u128));
23 }
24
25 #[test]
const_is_lock_free()26 fn const_is_lock_free() {
27 const _U: bool = AtomicCell::<usize>::is_lock_free();
28 const _I: bool = AtomicCell::<isize>::is_lock_free();
29 }
30
31 #[test]
drops_unit()32 fn drops_unit() {
33 static CNT: AtomicUsize = AtomicUsize::new(0);
34 CNT.store(0, SeqCst);
35
36 #[derive(Debug, PartialEq, Eq)]
37 struct Foo();
38
39 impl Foo {
40 fn new() -> Foo {
41 CNT.fetch_add(1, SeqCst);
42 Foo()
43 }
44 }
45
46 impl Drop for Foo {
47 fn drop(&mut self) {
48 CNT.fetch_sub(1, SeqCst);
49 }
50 }
51
52 impl Default for Foo {
53 fn default() -> Foo {
54 Foo::new()
55 }
56 }
57
58 let a = AtomicCell::new(Foo::new());
59
60 assert_eq!(a.swap(Foo::new()), Foo::new());
61 assert_eq!(CNT.load(SeqCst), 1);
62
63 a.store(Foo::new());
64 assert_eq!(CNT.load(SeqCst), 1);
65
66 assert_eq!(a.swap(Foo::default()), Foo::new());
67 assert_eq!(CNT.load(SeqCst), 1);
68
69 drop(a);
70 assert_eq!(CNT.load(SeqCst), 0);
71 }
72
73 #[test]
drops_u8()74 fn drops_u8() {
75 static CNT: AtomicUsize = AtomicUsize::new(0);
76 CNT.store(0, SeqCst);
77
78 #[derive(Debug, PartialEq, Eq)]
79 struct Foo(u8);
80
81 impl Foo {
82 fn new(val: u8) -> Foo {
83 CNT.fetch_add(1, SeqCst);
84 Foo(val)
85 }
86 }
87
88 impl Drop for Foo {
89 fn drop(&mut self) {
90 CNT.fetch_sub(1, SeqCst);
91 }
92 }
93
94 impl Default for Foo {
95 fn default() -> Foo {
96 Foo::new(0)
97 }
98 }
99
100 let a = AtomicCell::new(Foo::new(5));
101
102 assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
103 assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
104 assert_eq!(CNT.load(SeqCst), 1);
105
106 a.store(Foo::new(2));
107 assert_eq!(CNT.load(SeqCst), 1);
108
109 assert_eq!(a.swap(Foo::default()), Foo::new(2));
110 assert_eq!(CNT.load(SeqCst), 1);
111
112 assert_eq!(a.swap(Foo::default()), Foo::new(0));
113 assert_eq!(CNT.load(SeqCst), 1);
114
115 drop(a);
116 assert_eq!(CNT.load(SeqCst), 0);
117 }
118
119 #[test]
drops_usize()120 fn drops_usize() {
121 static CNT: AtomicUsize = AtomicUsize::new(0);
122 CNT.store(0, SeqCst);
123
124 #[derive(Debug, PartialEq, Eq)]
125 struct Foo(usize);
126
127 impl Foo {
128 fn new(val: usize) -> Foo {
129 CNT.fetch_add(1, SeqCst);
130 Foo(val)
131 }
132 }
133
134 impl Drop for Foo {
135 fn drop(&mut self) {
136 CNT.fetch_sub(1, SeqCst);
137 }
138 }
139
140 impl Default for Foo {
141 fn default() -> Foo {
142 Foo::new(0)
143 }
144 }
145
146 let a = AtomicCell::new(Foo::new(5));
147
148 assert_eq!(a.swap(Foo::new(6)), Foo::new(5));
149 assert_eq!(a.swap(Foo::new(1)), Foo::new(6));
150 assert_eq!(CNT.load(SeqCst), 1);
151
152 a.store(Foo::new(2));
153 assert_eq!(CNT.load(SeqCst), 1);
154
155 assert_eq!(a.swap(Foo::default()), Foo::new(2));
156 assert_eq!(CNT.load(SeqCst), 1);
157
158 assert_eq!(a.swap(Foo::default()), Foo::new(0));
159 assert_eq!(CNT.load(SeqCst), 1);
160
161 drop(a);
162 assert_eq!(CNT.load(SeqCst), 0);
163 }
164
165 #[test]
modular_u8()166 fn modular_u8() {
167 #[derive(Clone, Copy, Eq, Debug, Default)]
168 struct Foo(u8);
169
170 impl PartialEq for Foo {
171 fn eq(&self, other: &Foo) -> bool {
172 self.0 % 5 == other.0 % 5
173 }
174 }
175
176 let a = AtomicCell::new(Foo(1));
177
178 assert_eq!(a.load(), Foo(1));
179 assert_eq!(a.swap(Foo(2)), Foo(11));
180 assert_eq!(a.load(), Foo(52));
181
182 a.store(Foo(0));
183 assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
184 assert_eq!(a.load().0, 5);
185 assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
186 assert_eq!(a.load().0, 15);
187 }
188
189 #[test]
modular_usize()190 fn modular_usize() {
191 #[derive(Clone, Copy, Eq, Debug, Default)]
192 struct Foo(usize);
193
194 impl PartialEq for Foo {
195 fn eq(&self, other: &Foo) -> bool {
196 self.0 % 5 == other.0 % 5
197 }
198 }
199
200 let a = AtomicCell::new(Foo(1));
201
202 assert_eq!(a.load(), Foo(1));
203 assert_eq!(a.swap(Foo(2)), Foo(11));
204 assert_eq!(a.load(), Foo(52));
205
206 a.store(Foo(0));
207 assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100)));
208 assert_eq!(a.load().0, 5);
209 assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100)));
210 assert_eq!(a.load().0, 15);
211 }
212
213 #[test]
garbage_padding()214 fn garbage_padding() {
215 #[derive(Copy, Clone, Eq, PartialEq)]
216 struct Object {
217 a: i64,
218 b: i32,
219 }
220
221 let cell = AtomicCell::new(Object { a: 0, b: 0 });
222 let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed
223 let next = Object { a: 0, b: 0 };
224
225 let prev = cell.load();
226 assert!(cell.compare_exchange(prev, next).is_ok());
227 println!();
228 }
229
230 #[test]
const_atomic_cell_new()231 fn const_atomic_cell_new() {
232 static CELL: AtomicCell<usize> = AtomicCell::new(0);
233
234 CELL.store(1);
235 assert_eq!(CELL.load(), 1);
236 }
237