1 use crate::unwind;
2 use crate::ThreadPoolBuilder;
3 use crate::{scope, scope_fifo, Scope, ScopeFifo};
4 use rand::{Rng, SeedableRng};
5 use rand_xorshift::XorShiftRng;
6 use std::cmp;
7 use std::iter::once;
8 use std::sync::atomic::{AtomicUsize, Ordering};
9 use std::sync::{Barrier, Mutex};
10 use std::vec;
11
12 #[test]
scope_empty()13 fn scope_empty() {
14 scope(|_| {});
15 }
16
17 #[test]
scope_result()18 fn scope_result() {
19 let x = scope(|_| 22);
20 assert_eq!(x, 22);
21 }
22
23 #[test]
scope_two()24 fn scope_two() {
25 let counter = &AtomicUsize::new(0);
26 scope(|s| {
27 s.spawn(move |_| {
28 counter.fetch_add(1, Ordering::SeqCst);
29 });
30 s.spawn(move |_| {
31 counter.fetch_add(10, Ordering::SeqCst);
32 });
33 });
34
35 let v = counter.load(Ordering::SeqCst);
36 assert_eq!(v, 11);
37 }
38
39 #[test]
scope_divide_and_conquer()40 fn scope_divide_and_conquer() {
41 let counter_p = &AtomicUsize::new(0);
42 scope(|s| s.spawn(move |s| divide_and_conquer(s, counter_p, 1024)));
43
44 let counter_s = &AtomicUsize::new(0);
45 divide_and_conquer_seq(counter_s, 1024);
46
47 let p = counter_p.load(Ordering::SeqCst);
48 let s = counter_s.load(Ordering::SeqCst);
49 assert_eq!(p, s);
50 }
51
divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize)52 fn divide_and_conquer<'scope>(scope: &Scope<'scope>, counter: &'scope AtomicUsize, size: usize) {
53 if size > 1 {
54 scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
55 scope.spawn(move |scope| divide_and_conquer(scope, counter, size / 2));
56 } else {
57 // count the leaves
58 counter.fetch_add(1, Ordering::SeqCst);
59 }
60 }
61
divide_and_conquer_seq(counter: &AtomicUsize, size: usize)62 fn divide_and_conquer_seq(counter: &AtomicUsize, size: usize) {
63 if size > 1 {
64 divide_and_conquer_seq(counter, size / 2);
65 divide_and_conquer_seq(counter, size / 2);
66 } else {
67 // count the leaves
68 counter.fetch_add(1, Ordering::SeqCst);
69 }
70 }
71
72 struct Tree<T: Send> {
73 value: T,
74 children: Vec<Tree<T>>,
75 }
76
77 impl<T: Send> Tree<T> {
iter(&self) -> vec::IntoIter<&T>78 fn iter(&self) -> vec::IntoIter<&T> {
79 once(&self.value)
80 .chain(self.children.iter().flat_map(Tree::iter))
81 .collect::<Vec<_>>() // seems like it shouldn't be needed... but prevents overflow
82 .into_iter()
83 }
84
update<OP>(&mut self, op: OP) where OP: Fn(&mut T) + Sync, T: Send,85 fn update<OP>(&mut self, op: OP)
86 where
87 OP: Fn(&mut T) + Sync,
88 T: Send,
89 {
90 scope(|s| self.update_in_scope(&op, s));
91 }
92
update_in_scope<'scope, OP>(&'scope mut self, op: &'scope OP, scope: &Scope<'scope>) where OP: Fn(&mut T) + Sync,93 fn update_in_scope<'scope, OP>(&'scope mut self, op: &'scope OP, scope: &Scope<'scope>)
94 where
95 OP: Fn(&mut T) + Sync,
96 {
97 let Tree {
98 ref mut value,
99 ref mut children,
100 } = *self;
101 scope.spawn(move |scope| {
102 for child in children {
103 scope.spawn(move |scope| child.update_in_scope(op, scope));
104 }
105 });
106
107 op(value);
108 }
109 }
110
random_tree(depth: usize) -> Tree<u32>111 fn random_tree(depth: usize) -> Tree<u32> {
112 assert!(depth > 0);
113 let mut seed = <XorShiftRng as SeedableRng>::Seed::default();
114 (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i);
115 let mut rng = XorShiftRng::from_seed(seed);
116 random_tree1(depth, &mut rng)
117 }
118
random_tree1(depth: usize, rng: &mut XorShiftRng) -> Tree<u32>119 fn random_tree1(depth: usize, rng: &mut XorShiftRng) -> Tree<u32> {
120 let children = if depth == 0 {
121 vec![]
122 } else {
123 (0..rng.gen_range(0..4)) // somewhere between 0 and 3 children at each level
124 .map(|_| random_tree1(depth - 1, rng))
125 .collect()
126 };
127
128 Tree {
129 value: rng.gen_range(0..1_000_000),
130 children,
131 }
132 }
133
134 #[test]
update_tree()135 fn update_tree() {
136 let mut tree: Tree<u32> = random_tree(10);
137 let values: Vec<u32> = tree.iter().cloned().collect();
138 tree.update(|v| *v += 1);
139 let new_values: Vec<u32> = tree.iter().cloned().collect();
140 assert_eq!(values.len(), new_values.len());
141 for (&i, &j) in values.iter().zip(&new_values) {
142 assert_eq!(i + 1, j);
143 }
144 }
145
146 /// Check that if you have a chain of scoped tasks where T0 spawns T1
147 /// spawns T2 and so forth down to Tn, the stack space should not grow
148 /// linearly with N. We test this by some unsafe hackery and
149 /// permitting an approx 10% change with a 10x input change.
150 #[test]
151 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
linear_stack_growth()152 fn linear_stack_growth() {
153 let builder = ThreadPoolBuilder::new().num_threads(1);
154 let pool = builder.build().unwrap();
155 pool.install(|| {
156 let mut max_diff = Mutex::new(0);
157 let bottom_of_stack = 0;
158 scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 5));
159 let diff_when_5 = *max_diff.get_mut().unwrap() as f64;
160
161 scope(|s| the_final_countdown(s, &bottom_of_stack, &max_diff, 500));
162 let diff_when_500 = *max_diff.get_mut().unwrap() as f64;
163
164 let ratio = diff_when_5 / diff_when_500;
165 assert!(
166 ratio > 0.9 && ratio < 1.1,
167 "stack usage ratio out of bounds: {}",
168 ratio
169 );
170 });
171 }
172
the_final_countdown<'scope>( s: &Scope<'scope>, bottom_of_stack: &'scope i32, max: &'scope Mutex<usize>, n: usize, )173 fn the_final_countdown<'scope>(
174 s: &Scope<'scope>,
175 bottom_of_stack: &'scope i32,
176 max: &'scope Mutex<usize>,
177 n: usize,
178 ) {
179 let top_of_stack = 0;
180 let p = bottom_of_stack as *const i32 as usize;
181 let q = &top_of_stack as *const i32 as usize;
182 let diff = if p > q { p - q } else { q - p };
183
184 let mut data = max.lock().unwrap();
185 *data = cmp::max(diff, *data);
186
187 if n > 0 {
188 s.spawn(move |s| the_final_countdown(s, bottom_of_stack, max, n - 1));
189 }
190 }
191
192 #[test]
193 #[should_panic(expected = "Hello, world!")]
panic_propagate_scope()194 fn panic_propagate_scope() {
195 scope(|_| panic!("Hello, world!"));
196 }
197
198 #[test]
199 #[should_panic(expected = "Hello, world!")]
panic_propagate_spawn()200 fn panic_propagate_spawn() {
201 scope(|s| s.spawn(|_| panic!("Hello, world!")));
202 }
203
204 #[test]
205 #[should_panic(expected = "Hello, world!")]
panic_propagate_nested_spawn()206 fn panic_propagate_nested_spawn() {
207 scope(|s| s.spawn(|s| s.spawn(|s| s.spawn(|_| panic!("Hello, world!")))));
208 }
209
210 #[test]
211 #[should_panic(expected = "Hello, world!")]
panic_propagate_nested_scope_spawn()212 fn panic_propagate_nested_scope_spawn() {
213 scope(|s| s.spawn(|_| scope(|s| s.spawn(|_| panic!("Hello, world!")))));
214 }
215
216 #[test]
217 #[cfg_attr(not(panic = "unwind"), ignore)]
panic_propagate_still_execute_1()218 fn panic_propagate_still_execute_1() {
219 let mut x = false;
220 match unwind::halt_unwinding(|| {
221 scope(|s| {
222 s.spawn(|_| panic!("Hello, world!")); // job A
223 s.spawn(|_| x = true); // job B, should still execute even though A panics
224 });
225 }) {
226 Ok(_) => panic!("failed to propagate panic"),
227 Err(_) => assert!(x, "job b failed to execute"),
228 }
229 }
230
231 #[test]
232 #[cfg_attr(not(panic = "unwind"), ignore)]
panic_propagate_still_execute_2()233 fn panic_propagate_still_execute_2() {
234 let mut x = false;
235 match unwind::halt_unwinding(|| {
236 scope(|s| {
237 s.spawn(|_| x = true); // job B, should still execute even though A panics
238 s.spawn(|_| panic!("Hello, world!")); // job A
239 });
240 }) {
241 Ok(_) => panic!("failed to propagate panic"),
242 Err(_) => assert!(x, "job b failed to execute"),
243 }
244 }
245
246 #[test]
247 #[cfg_attr(not(panic = "unwind"), ignore)]
panic_propagate_still_execute_3()248 fn panic_propagate_still_execute_3() {
249 let mut x = false;
250 match unwind::halt_unwinding(|| {
251 scope(|s| {
252 s.spawn(|_| x = true); // spawned job should still execute despite later panic
253 panic!("Hello, world!");
254 });
255 }) {
256 Ok(_) => panic!("failed to propagate panic"),
257 Err(_) => assert!(x, "panic after spawn, spawn failed to execute"),
258 }
259 }
260
261 #[test]
262 #[cfg_attr(not(panic = "unwind"), ignore)]
panic_propagate_still_execute_4()263 fn panic_propagate_still_execute_4() {
264 let mut x = false;
265 match unwind::halt_unwinding(|| {
266 scope(|s| {
267 s.spawn(|_| panic!("Hello, world!"));
268 x = true;
269 });
270 }) {
271 Ok(_) => panic!("failed to propagate panic"),
272 Err(_) => assert!(x, "panic in spawn tainted scope"),
273 }
274 }
275
276 macro_rules! test_order {
277 ($scope:ident => $spawn:ident) => {{
278 let builder = ThreadPoolBuilder::new().num_threads(1);
279 let pool = builder.build().unwrap();
280 pool.install(|| {
281 let vec = Mutex::new(vec![]);
282 $scope(|scope| {
283 let vec = &vec;
284 for i in 0..10 {
285 scope.$spawn(move |scope| {
286 for j in 0..10 {
287 scope.$spawn(move |_| {
288 vec.lock().unwrap().push(i * 10 + j);
289 });
290 }
291 });
292 }
293 });
294 vec.into_inner().unwrap()
295 })
296 }};
297 }
298
299 #[test]
300 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
lifo_order()301 fn lifo_order() {
302 // In the absence of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
303 let vec = test_order!(scope => spawn);
304 let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
305 assert_eq!(vec, expected);
306 }
307
308 #[test]
309 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
fifo_order()310 fn fifo_order() {
311 // In the absence of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
312 let vec = test_order!(scope_fifo => spawn_fifo);
313 let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
314 assert_eq!(vec, expected);
315 }
316
317 macro_rules! test_nested_order {
318 ($outer_scope:ident => $outer_spawn:ident,
319 $inner_scope:ident => $inner_spawn:ident) => {{
320 let builder = ThreadPoolBuilder::new().num_threads(1);
321 let pool = builder.build().unwrap();
322 pool.install(|| {
323 let vec = Mutex::new(vec![]);
324 $outer_scope(|scope| {
325 let vec = &vec;
326 for i in 0..10 {
327 scope.$outer_spawn(move |_| {
328 $inner_scope(|scope| {
329 for j in 0..10 {
330 scope.$inner_spawn(move |_| {
331 vec.lock().unwrap().push(i * 10 + j);
332 });
333 }
334 });
335 });
336 }
337 });
338 vec.into_inner().unwrap()
339 })
340 }};
341 }
342
343 #[test]
344 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
nested_lifo_order()345 fn nested_lifo_order() {
346 // In the absence of stealing, `scope()` runs its `spawn()` jobs in LIFO order.
347 let vec = test_nested_order!(scope => spawn, scope => spawn);
348 let expected: Vec<i32> = (0..100).rev().collect(); // LIFO -> reversed
349 assert_eq!(vec, expected);
350 }
351
352 #[test]
353 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
nested_fifo_order()354 fn nested_fifo_order() {
355 // In the absence of stealing, `scope_fifo()` runs its `spawn_fifo()` jobs in FIFO order.
356 let vec = test_nested_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
357 let expected: Vec<i32> = (0..100).collect(); // FIFO -> natural order
358 assert_eq!(vec, expected);
359 }
360
361 #[test]
362 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
nested_lifo_fifo_order()363 fn nested_lifo_fifo_order() {
364 // LIFO on the outside, FIFO on the inside
365 let vec = test_nested_order!(scope => spawn, scope_fifo => spawn_fifo);
366 let expected: Vec<i32> = (0..10)
367 .rev()
368 .flat_map(|i| (0..10).map(move |j| i * 10 + j))
369 .collect();
370 assert_eq!(vec, expected);
371 }
372
373 #[test]
374 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
nested_fifo_lifo_order()375 fn nested_fifo_lifo_order() {
376 // FIFO on the outside, LIFO on the inside
377 let vec = test_nested_order!(scope_fifo => spawn_fifo, scope => spawn);
378 let expected: Vec<i32> = (0..10)
379 .flat_map(|i| (0..10).rev().map(move |j| i * 10 + j))
380 .collect();
381 assert_eq!(vec, expected);
382 }
383
384 macro_rules! spawn_push {
385 ($scope:ident . $spawn:ident, $vec:ident, $i:expr) => {{
386 $scope.$spawn(move |_| $vec.lock().unwrap().push($i));
387 }};
388 }
389
390 /// Test spawns pushing a series of numbers, interleaved
391 /// such that negative values are using an inner scope.
392 macro_rules! test_mixed_order {
393 ($outer_scope:ident => $outer_spawn:ident,
394 $inner_scope:ident => $inner_spawn:ident) => {{
395 let builder = ThreadPoolBuilder::new().num_threads(1);
396 let pool = builder.build().unwrap();
397 pool.install(|| {
398 let vec = Mutex::new(vec![]);
399 $outer_scope(|outer_scope| {
400 let vec = &vec;
401 spawn_push!(outer_scope.$outer_spawn, vec, 0);
402 $inner_scope(|inner_scope| {
403 spawn_push!(inner_scope.$inner_spawn, vec, -1);
404 spawn_push!(outer_scope.$outer_spawn, vec, 1);
405 spawn_push!(inner_scope.$inner_spawn, vec, -2);
406 spawn_push!(outer_scope.$outer_spawn, vec, 2);
407 spawn_push!(inner_scope.$inner_spawn, vec, -3);
408 });
409 spawn_push!(outer_scope.$outer_spawn, vec, 3);
410 });
411 vec.into_inner().unwrap()
412 })
413 }};
414 }
415
416 #[test]
417 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
mixed_lifo_order()418 fn mixed_lifo_order() {
419 // NB: the end of the inner scope makes us execute some of the outer scope
420 // before they've all been spawned, so they're not perfectly LIFO.
421 let vec = test_mixed_order!(scope => spawn, scope => spawn);
422 let expected = vec![-3, 2, -2, 1, -1, 3, 0];
423 assert_eq!(vec, expected);
424 }
425
426 #[test]
427 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
mixed_fifo_order()428 fn mixed_fifo_order() {
429 let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope_fifo => spawn_fifo);
430 let expected = vec![-1, 0, -2, 1, -3, 2, 3];
431 assert_eq!(vec, expected);
432 }
433
434 #[test]
435 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
mixed_lifo_fifo_order()436 fn mixed_lifo_fifo_order() {
437 // NB: the end of the inner scope makes us execute some of the outer scope
438 // before they've all been spawned, so they're not perfectly LIFO.
439 let vec = test_mixed_order!(scope => spawn, scope_fifo => spawn_fifo);
440 let expected = vec![-1, 2, -2, 1, -3, 3, 0];
441 assert_eq!(vec, expected);
442 }
443
444 #[test]
445 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
mixed_fifo_lifo_order()446 fn mixed_fifo_lifo_order() {
447 let vec = test_mixed_order!(scope_fifo => spawn_fifo, scope => spawn);
448 let expected = vec![-3, 0, -2, 1, -1, 2, 3];
449 assert_eq!(vec, expected);
450 }
451
452 #[test]
static_scope()453 fn static_scope() {
454 static COUNTER: AtomicUsize = AtomicUsize::new(0);
455
456 let mut range = 0..100;
457 let sum = range.clone().sum();
458 let iter = &mut range;
459
460 COUNTER.store(0, Ordering::Relaxed);
461 scope(|s: &Scope<'static>| {
462 // While we're allowed the locally borrowed iterator,
463 // the spawns must be static.
464 for i in iter {
465 s.spawn(move |_| {
466 COUNTER.fetch_add(i, Ordering::Relaxed);
467 });
468 }
469 });
470
471 assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
472 }
473
474 #[test]
static_scope_fifo()475 fn static_scope_fifo() {
476 static COUNTER: AtomicUsize = AtomicUsize::new(0);
477
478 let mut range = 0..100;
479 let sum = range.clone().sum();
480 let iter = &mut range;
481
482 COUNTER.store(0, Ordering::Relaxed);
483 scope_fifo(|s: &ScopeFifo<'static>| {
484 // While we're allowed the locally borrowed iterator,
485 // the spawns must be static.
486 for i in iter {
487 s.spawn_fifo(move |_| {
488 COUNTER.fetch_add(i, Ordering::Relaxed);
489 });
490 }
491 });
492
493 assert_eq!(COUNTER.load(Ordering::Relaxed), sum);
494 }
495
496 #[test]
mixed_lifetime_scope()497 fn mixed_lifetime_scope() {
498 fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
499 scope(move |s: &Scope<'counter>| {
500 // We can borrow 'slice here, but the spawns can only borrow 'counter.
501 for &c in counters {
502 s.spawn(move |_| {
503 c.fetch_add(1, Ordering::Relaxed);
504 });
505 }
506 });
507 }
508
509 let counter = AtomicUsize::new(0);
510 increment(&[&counter; 100]);
511 assert_eq!(counter.into_inner(), 100);
512 }
513
514 #[test]
mixed_lifetime_scope_fifo()515 fn mixed_lifetime_scope_fifo() {
516 fn increment<'slice, 'counter>(counters: &'slice [&'counter AtomicUsize]) {
517 scope_fifo(move |s: &ScopeFifo<'counter>| {
518 // We can borrow 'slice here, but the spawns can only borrow 'counter.
519 for &c in counters {
520 s.spawn_fifo(move |_| {
521 c.fetch_add(1, Ordering::Relaxed);
522 });
523 }
524 });
525 }
526
527 let counter = AtomicUsize::new(0);
528 increment(&[&counter; 100]);
529 assert_eq!(counter.into_inner(), 100);
530 }
531
532 #[test]
scope_spawn_broadcast()533 fn scope_spawn_broadcast() {
534 let sum = AtomicUsize::new(0);
535 let n = scope(|s| {
536 s.spawn_broadcast(|_, ctx| {
537 sum.fetch_add(ctx.index(), Ordering::Relaxed);
538 });
539 crate::current_num_threads()
540 });
541 assert_eq!(sum.into_inner(), n * (n - 1) / 2);
542 }
543
544 #[test]
scope_fifo_spawn_broadcast()545 fn scope_fifo_spawn_broadcast() {
546 let sum = AtomicUsize::new(0);
547 let n = scope_fifo(|s| {
548 s.spawn_broadcast(|_, ctx| {
549 sum.fetch_add(ctx.index(), Ordering::Relaxed);
550 });
551 crate::current_num_threads()
552 });
553 assert_eq!(sum.into_inner(), n * (n - 1) / 2);
554 }
555
556 #[test]
scope_spawn_broadcast_nested()557 fn scope_spawn_broadcast_nested() {
558 let sum = AtomicUsize::new(0);
559 let n = scope(|s| {
560 s.spawn_broadcast(|s, _| {
561 s.spawn_broadcast(|_, ctx| {
562 sum.fetch_add(ctx.index(), Ordering::Relaxed);
563 });
564 });
565 crate::current_num_threads()
566 });
567 assert_eq!(sum.into_inner(), n * n * (n - 1) / 2);
568 }
569
570 #[test]
571 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
scope_spawn_broadcast_barrier()572 fn scope_spawn_broadcast_barrier() {
573 let barrier = Barrier::new(8);
574 let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
575 pool.in_place_scope(|s| {
576 s.spawn_broadcast(|_, _| {
577 barrier.wait();
578 });
579 barrier.wait();
580 });
581 }
582
583 #[test]
584 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
scope_spawn_broadcast_panic_one()585 fn scope_spawn_broadcast_panic_one() {
586 let count = AtomicUsize::new(0);
587 let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
588 let result = crate::unwind::halt_unwinding(|| {
589 pool.scope(|s| {
590 s.spawn_broadcast(|_, ctx| {
591 count.fetch_add(1, Ordering::Relaxed);
592 if ctx.index() == 3 {
593 panic!("Hello, world!");
594 }
595 });
596 });
597 });
598 assert_eq!(count.into_inner(), 7);
599 assert!(result.is_err(), "broadcast panic should propagate!");
600 }
601
602 #[test]
603 #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)]
scope_spawn_broadcast_panic_many()604 fn scope_spawn_broadcast_panic_many() {
605 let count = AtomicUsize::new(0);
606 let pool = ThreadPoolBuilder::new().num_threads(7).build().unwrap();
607 let result = crate::unwind::halt_unwinding(|| {
608 pool.scope(|s| {
609 s.spawn_broadcast(|_, ctx| {
610 count.fetch_add(1, Ordering::Relaxed);
611 if ctx.index() % 2 == 0 {
612 panic!("Hello, world!");
613 }
614 });
615 });
616 });
617 assert_eq!(count.into_inner(), 7);
618 assert!(result.is_err(), "broadcast panic should propagate!");
619 }
620