• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::unwind;
2 use crate::ThreadPoolBuilder;
3 use crate::{scope, scope_fifo, Scope, ScopeFifo};
4 use rand::{Rng, SeedableRng};
5 use rand_xorshift::XorShiftRng;
6 use std::cmp;
7 use std::iter::once;
8 use std::sync::atomic::{AtomicUsize, Ordering};
9 use std::sync::Mutex;
10 use std::vec;
11 
12 #[test]
scope_empty()13 fn scope_empty() {
14     scope(|_| {});
15 }
16 
17 #[test]
scope_result()18 fn scope_result() {
19     let x = scope(|_| 22);
20     assert_eq!(x, 22);
21 }
22 
23 #[test]
scope_two()24 fn scope_two() {
25     let counter = &AtomicUsize::new(0);
26     scope(|s| {
27         s.spawn(move |_| {
28             counter.fetch_add(1, Ordering::SeqCst);
29         });
30         s.spawn(move |_| {
31             counter.fetch_add(10, Ordering::SeqCst);
32         });
33     });
34 
35     let v = counter.load(Ordering::SeqCst);
36     assert_eq!(v, 11);
37 }
38 
39 #[test]
scope_divide_and_conquer()40 fn scope_divide_and_conquer() {
41     let counter_p = &AtomicUsize::new(0);
42     scope(|s| s.spawn(move |s| divide_and_conquer(s, counter_p, 1024)));
43 
44     let counter_s = &AtomicUsize::new(0);
45     divide_and_conquer_seq(&counter_s, 1024);
46 
47     let p = counter_p.load(Ordering::SeqCst);
48     let s = counter_s.load(Ordering::SeqCst);
49     assert_eq!(p, s);
50 }
51 
divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize)52 fn divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize) {
53     if size > 1 {
54         scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
55         scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
56     } else {
57         // count the leaves
58         counter.fetch_add(1, Ordering::SeqCst);
59     }
60 }
61 
divide_and_conquer_seq(counter: &AtomicUsize, size: usize)62 fn divide_and_conquer_seq(counter: &AtomicUsize, size: usize) {
63     if size > 1 {
64         divide_and_conquer_seq(counter, size / 2);
65         divide_and_conquer_seq(counter, size / 2);
66     } else {
67         // count the leaves
68         counter.fetch_add(1, Ordering::SeqCst);
69     }
70 }
71 
72 struct Tree<T: Send> {
73     value: T,
74     children: Vec<Tree<T>>,
75 }
76 
77 impl<T: Send> Tree<T> {
iter<'s>(&'s self) -> vec::IntoIter<&'s T>78     fn iter<'s>(&'s self) -> vec::IntoIter<&'s T> {
79         once(&self.value)
80             .chain(self.children.iter().flat_map(Tree::iter))
81             .collect::<Vec<_>>() // seems like it shouldn't be needed... but prevents overflow
82             .into_iter()
83     }
84 
update<OP>(&mut self, op: OP) where OP: Fn(&mut T) + Sync, T: Send,85     fn update<OP>(&mut self, op: OP)
86     where
87         OP: Fn(&mut T) + Sync,
88         T: Send,
89     {
90         scope(|s| self.update_in_scope(&op, s));
91     }
92 
update_in_scope<'scope, OP>(&'scope mut self, op: &'scope OP, scope: &Scope<'scope>) where OP: Fn(&mut T) + Sync,93     fn update_in_scope<'scope, OP>(&'scope mut self, op: &'scope OP, scope: &Scope<'scope>)
94     where
95         OP: Fn(&mut T) + Sync,
96     {
97         let Tree {
98             ref mut value,
99             ref mut children,
100         } = *self;
101         scope.spawn(move |scope| {
102             for child in children {
103                 scope.spawn(move |scope| child.update_in_scope(op, scope));
104             }
105         });
106 
107         op(value);
108     }
109 }
110 
random_tree(depth: usize) -> Tree<u32>111 fn random_tree(depth: usize) -> Tree<u32> {
112     assert!(depth > 0);
113     let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
114     (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
115     let mut rng = XorShiftRng::from_seed(seed);
116     random_tree1(depth, &mut rng)
117 }
118 
random_tree1(depth: usize, rng: &mut XorShiftRng) -> Tree<u32>119 fn random_tree1(depth: usize, rng: &mut XorShiftRng) -> Tree<u32> {
120     let children = if depth == 0 {
121         vec![]
122     } else {
123         (0..rng.gen_range(0, 4)) // somewhere between 0 and 3 children at each level
124             .map(|_| random_tree1(depth - 1, rng))
125             .collect()
126     };
127 
128     Tree {
129         value: rng.gen_range(0, 1_000_000),
130         children,
131     }
132 }
133 
134 #[test]
update_tree()135 fn update_tree() {
136     let mut tree: Tree<u32> = random_tree(10);
137     let values: Vec<u32> = tree.iter().cloned().collect();
138     tree.update(|v| *v += 1);
139     let new_values: Vec<u32> = tree.iter().cloned().collect();
140     assert_eq!(values.len(), new_values.len());
141     for (&i, &j) in values.iter().zip(&new_values) {
142         assert_eq!(i + 1, j);
143     }
144 }
145 
146 /// Check that if you have a chain of scoped tasks where T0 spawns T1
147 /// spawns T2 and so forth down to Tn, the stack space should not grow
148 /// linearly with N. We test this by some unsafe hackery and
149 /// permitting an approx 10% change with a 10x input change.
150 #[test]
linear_stack_growth()151 fn linear_stack_growth() {
152     let builder = ThreadPoolBuilder::new().num_threads(1);
153     let pool = builder.build().unwrap();
154     pool.install(|| {
155         let mut max_diff = Mutex::new(0);
156         let bottom_of_stack = 0;
157         scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 5));
158         let diff_when_5 = *max_diff.get_mut().unwrap() as f64;
159 
160         scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 500));
161         let diff_when_500 = *max_diff.get_mut().unwrap() as f64;
162 
163         let ratio = diff_when_5 / diff_when_500;
164         assert!(
165             ratio > 0.9 && ratio < 1.1,
166             "stack usage ratio out of bounds: {}",
167             ratio
168         );
169     });
170 }
171 
the_final_countdown<'scope>( s: &Scope<'scope>, bottom_of_stack: &'scope i32, max: &'scope Mutex<usize>, n: usize, )172 fn the_final_countdown<'scope>(
173     s: &Scope<'scope>,
174     bottom_of_stack: &'scope i32,
175     max: &'scope Mutex<usize>,
176     n: usize,
177 ) {
178     let top_of_stack = 0;
179     let p = bottom_of_stack as *const i32 as usize;
180     let q = &top_of_stack as *const i32 as usize;
181     let diff = if p > q { p - q } else { q - p };
182 
183     let mut data = max.lock().unwrap();
184     *data = cmp::max(diff, *data);
185 
186     if n > 0 {
187         s.spawn(move |s| the_final_countdown(s, bottom_of_stack, max, n - 1));
188     }
189 }
190 
191 #[test]
192 #[should_panic(expected = "Hello, world!")]
panic_propagate_scope()193 fn panic_propagate_scope() {
194     scope(|_| panic!("Hello, world!"));
195 }
196 
197 #[test]
198 #[should_panic(expected = "Hello, world!")]
panic_propagate_spawn()199 fn panic_propagate_spawn() {
200     scope(|s| s.spawn(|_| panic!("Hello, world!")));
201 }
202 
203 #[test]
204 #[should_panic(expected = "Hello, world!")]
panic_propagate_nested_spawn()205 fn panic_propagate_nested_spawn() {
206     scope(|s| s.spawn(|s| s.spawn(|s| s.spawn(|_| panic!("Hello, world!")))));
207 }
208 
209 #[test]
210 #[should_panic(expected = "Hello, world!")]
panic_propagate_nested_scope_spawn()211 fn panic_propagate_nested_scope_spawn() {
212     scope(|s| s.spawn(|_| scope(|s| s.spawn(|_| panic!("Hello, world!")))));
213 }
214 
215 #[test]
panic_propagate_still_execute_1()216 fn panic_propagate_still_execute_1() {
217     let mut x = false;
218     match unwind::halt_unwinding(|| {
219         scope(|s| {
220             s.spawn(|_| panic!("Hello, world!")); // job A
221             s.spawn(|_| x = true); // job B, should still execute even though A panics
222         });
223     }) {
224         Ok(_) => panic!("failed to propagate panic"),
225         Err(_) => assert!(x, "job b failed to execute"),
226     }
227 }
228 
229 #[test]
panic_propagate_still_execute_2()230 fn panic_propagate_still_execute_2() {
231     let mut x = false;
232     match unwind::halt_unwinding(|| {
233         scope(|s| {
234             s.spawn(|_| x = true); // job B, should still execute even though A panics
235             s.spawn(|_| panic!("Hello, world!")); // job A
236         });
237     }) {
238         Ok(_) => panic!("failed to propagate panic"),
239         Err(_) => assert!(x, "job b failed to execute"),
240     }
241 }
242 
243 #[test]
panic_propagate_still_execute_3()244 fn panic_propagate_still_execute_3() {
245     let mut x = false;
246     match unwind::halt_unwinding(|| {
247         scope(|s| {
248             s.spawn(|_| x = true); // spanwed job should still execute despite later panic
249             panic!("Hello, world!");
250         });
251     }) {
252         Ok(_) => panic!("failed to propagate panic"),
253         Err(_) => assert!(x, "panic after spawn, spawn failed to execute"),
254     }
255 }
256 
257 #[test]
panic_propagate_still_execute_4()258 fn panic_propagate_still_execute_4() {
259     let mut x = false;
260     match unwind::halt_unwinding(|| {
261         scope(|s| {
262             s.spawn(|_| panic!("Hello, world!"));
263             x = true;
264         });
265     }) {
266         Ok(_) => panic!("failed to propagate panic"),
267         Err(_) => assert!(x, "panic in spawn tainted scope"),
268     }
269 }
270 
271 macro_rules! test_order {
272     ($scope:ident => $spawn:ident) => {{
273         let builder = ThreadPoolBuilder::new().num_threads(1);
274         let pool = builder.build().unwrap();
275         pool.install(|| {
276             let vec = Mutex::new(vec![]);
277             $scope(|scope| {
278                 let vec = &vec;
279                 for i in 0..10 {
280                     scope.$spawn(move |scope| {
281                         for j in 0..10 {
282                             scope.$spawn(move |_| {
283                                 vec.lock().unwrap().push(i * 10 + j);
284                             });
285                         }
286                     });
287                 }
288             });
289             vec.into_inner().unwrap()
290         })
291     }};
292 }
293 
294 #[test]
lifo_order()295 fn lifo_order() {
296     // In the absense of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
297     let vec = test_order!(scope => spawn);
298     let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
299     assert_eq!(vec, expected);
300 }
301 
302 #[test]
fifo_order()303 fn fifo_order() {
304     // In the absense of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
305     let vec = test_order!(scope_fifo => spawn_fifo);
306     let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
307     assert_eq!(vec, expected);
308 }
309 
310 macro_rules! test_nested_order {
311     ($outer_scope:ident => $outer_spawn:ident,
312      $inner_scope:ident => $inner_spawn:ident) => {{
313         let builder = ThreadPoolBuilder::new().num_threads(1);
314         let pool = builder.build().unwrap();
315         pool.install(|| {
316             let vec = Mutex::new(vec![]);
317             $outer_scope(|scope| {
318                 let vec = &vec;
319                 for i in 0..10 {
320                     scope.$outer_spawn(move |_| {
321                         $inner_scope(|scope| {
322                             for j in 0..10 {
323                                 scope.$inner_spawn(move |_| {
324                                     vec.lock().unwrap().push(i * 10 + j);
325                                 });
326                             }
327                         });
328                     });
329                 }
330             });
331             vec.into_inner().unwrap()
332         })
333     }};
334 }
335 
336 #[test]
nested_lifo_order()337 fn nested_lifo_order() {
338     // In the absense of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
339     let vec = test_nested_order!(scope => spawn, scope => spawn);
340     let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
341     assert_eq!(vec, expected);
342 }
343 
344 #[test]
nested_fifo_order()345 fn nested_fifo_order() {
346     // In the absense of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
347     let vec = test_nested_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
348     let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
349     assert_eq!(vec, expected);
350 }
351 
352 #[test]
nested_lifo_fifo_order()353 fn nested_lifo_fifo_order() {
354     // LIFO on the outside, FIFO on the inside
355     let vec = test_nested_order!(scope => spawn, scope_fifo => spawn_fifo);
356     let expected: Vec<i32> = (0..10)
357         .rev()
358         .flat_map(|i| (0..10).map(move |j| i * 10 + j))
359         .collect();
360     assert_eq!(vec, expected);
361 }
362 
363 #[test]
nested_fifo_lifo_order()364 fn nested_fifo_lifo_order() {
365     // FIFO on the outside, LIFO on the inside
366     let vec = test_nested_order!(scope_fifo => spawn_fifo, scope => spawn);
367     let expected: Vec<i32> = (0..10)
368         .flat_map(|i| (0..10).rev().map(move |j| i * 10 + j))
369         .collect();
370     assert_eq!(vec, expected);
371 }
372 
373 macro_rules! spawn_push {
374     ($scope:ident . $spawn:ident, $vec:ident, $i:expr) => {{
375         $scope.$spawn(move |_| $vec.lock().unwrap().push($i));
376     }};
377 }
378 
379 /// Test spawns pushing a series of numbers, interleaved
380 /// such that negative values are using an inner scope.
381 macro_rules! test_mixed_order {
382     ($outer_scope:ident => $outer_spawn:ident,
383      $inner_scope:ident => $inner_spawn:ident) => {{
384         let builder = ThreadPoolBuilder::new().num_threads(1);
385         let pool = builder.build().unwrap();
386         pool.install(|| {
387             let vec = Mutex::new(vec![]);
388             $outer_scope(|outer_scope| {
389                 let vec = &vec;
390                 spawn_push!(outer_scope.$outer_spawn, vec, 0);
391                 $inner_scope(|inner_scope| {
392                     spawn_push!(inner_scope.$inner_spawn, vec, -1);
393                     spawn_push!(outer_scope.$outer_spawn, vec, 1);
394                     spawn_push!(inner_scope.$inner_spawn, vec, -2);
395                     spawn_push!(outer_scope.$outer_spawn, vec, 2);
396                     spawn_push!(inner_scope.$inner_spawn, vec, -3);
397                 });
398                 spawn_push!(outer_scope.$outer_spawn, vec, 3);
399             });
400             vec.into_inner().unwrap()
401         })
402     }};
403 }
404 
405 #[test]
mixed_lifo_order()406 fn mixed_lifo_order() {
407     // NB: the end of the inner scope makes us execute some of the outer scope
408     // before they've all been spawned, so they're not perfectly LIFO.
409     let vec = test_mixed_order!(scope => spawn, scope => spawn);
410     let expected = vec![-3, 2, -2, 1, -1, 3, 0];
411     assert_eq!(vec, expected);
412 }
413 
414 #[test]
mixed_fifo_order()415 fn mixed_fifo_order() {
416     let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
417     let expected = vec![-1, 0, -2, 1, -3, 2, 3];
418     assert_eq!(vec, expected);
419 }
420 
421 #[test]
mixed_lifo_fifo_order()422 fn mixed_lifo_fifo_order() {
423     // NB: the end of the inner scope makes us execute some of the outer scope
424     // before they've all been spawned, so they're not perfectly LIFO.
425     let vec = test_mixed_order!(scope => spawn, scope_fifo => spawn_fifo);
426     let expected = vec![-1, 2, -2, 1, -3, 3, 0];
427     assert_eq!(vec, expected);
428 }
429 
430 #[test]
mixed_fifo_lifo_order()431 fn mixed_fifo_lifo_order() {
432     let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope => spawn);
433     let expected = vec![-3, 0, -2, 1, -1, 2, 3];
434     assert_eq!(vec, expected);
435 }
436 
437 #[test]
static_scope()438 fn static_scope() {
439     static COUNTER: AtomicUsize = AtomicUsize::new(0);
440 
441     let mut range = 0..100;
442     let sum = range.clone().sum();
443     let iter = &mut range;
444 
445     COUNTER.store(0, Ordering::Relaxed);
446     scope(|s: &Scope<'static>| {
447         // While we're allowed the locally borrowed iterator,
448         // the spawns must be static.
449         for i in iter {
450             s.spawn(move |_| {
451                 COUNTER.fetch_add(i, Ordering::Relaxed);
452             });
453         }
454     });
455 
456     assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
457 }
458 
459 #[test]
static_scope_fifo()460 fn static_scope_fifo() {
461     static COUNTER: AtomicUsize = AtomicUsize::new(0);
462 
463     let mut range = 0..100;
464     let sum = range.clone().sum();
465     let iter = &mut range;
466 
467     COUNTER.store(0, Ordering::Relaxed);
468     scope_fifo(|s: &ScopeFifo<'static>| {
469         // While we're allowed the locally borrowed iterator,
470         // the spawns must be static.
471         for i in iter {
472             s.spawn_fifo(move |_| {
473                 COUNTER.fetch_add(i, Ordering::Relaxed);
474             });
475         }
476     });
477 
478     assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
479 }
480 
481 #[test]
mixed_lifetime_scope()482 fn mixed_lifetime_scope() {
483     fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
484         scope(move |s: &Scope<'counter>| {
485             // We can borrow 'slice here, but the spawns can only borrow 'counter.
486             for &c in counters {
487                 s.spawn(move |_| {
488                     c.fetch_add(1, Ordering::Relaxed);
489                 });
490             }
491         });
492     }
493 
494     let counter = AtomicUsize::new(0);
495     increment(&[&counter; 100]);
496     assert_eq!(counter.into_inner(), 100);
497 }
498 
499 #[test]
mixed_lifetime_scope_fifo()500 fn mixed_lifetime_scope_fifo() {
501     fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
502         scope_fifo(move |s: &ScopeFifo<'counter>| {
503             // We can borrow 'slice here, but the spawns can only borrow 'counter.
504             for &c in counters {
505                 s.spawn_fifo(move |_| {
506                     c.fetch_add(1, Ordering::Relaxed);
507                 });
508             }
509         });
510     }
511 
512     let counter = AtomicUsize::new(0);
513     increment(&[&counter; 100]);
514     assert_eq!(counter.into_inner(), 100);
515 }
516