1 //! Parallel quicksort.
2 //!
3 //! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized.
4 //! The only difference from the original is that calls to `recurse` are executed in parallel using
5 //! `rayon_core::join`.
6
7 use std::cmp;
8 use std::mem::{self, MaybeUninit};
9 use std::ptr;
10
11 /// When dropped, copies from `src` into `dest`.
12 struct CopyOnDrop<T> {
13 src: *const T,
14 dest: *mut T,
15 }
16
17 impl<T> Drop for CopyOnDrop<T> {
drop(&mut self)18 fn drop(&mut self) {
19 // SAFETY: This is a helper class.
20 // Please refer to its usage for correctness.
21 // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
22 unsafe {
23 ptr::copy_nonoverlapping(self.src, self.dest, 1);
24 }
25 }
26 }
27
28 /// Shifts the first element to the right until it encounters a greater or equal element.
shift_head<T, F>(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool,29 fn shift_head<T, F>(v: &mut [T], is_less: &F)
30 where
31 F: Fn(&T, &T) -> bool,
32 {
33 let len = v.len();
34 // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a
35 // pointer) and copying memory (`ptr::copy_nonoverlapping`).
36 //
37 // a. Indexing:
38 // 1. We checked the size of the array to >=2.
39 // 2. All the indexing that we will do is always between {0 <= index < len} at most.
40 //
41 // b. Memory copying
42 // 1. We are obtaining pointers to references which are guaranteed to be valid.
43 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
44 // Namely, `i` and `i-1`.
45 // 3. If the slice is properly aligned, the elements are properly aligned.
46 // It is the caller's responsibility to make sure the slice is properly aligned.
47 //
48 // See comments below for further detail.
49 unsafe {
50 // If the first two elements are out-of-order...
51 if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
52 // Read the first element into a stack-allocated variable. If a following comparison
53 // operation panics, `hole` will get dropped and automatically write the element back
54 // into the slice.
55 let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
56 let v = v.as_mut_ptr();
57 let mut hole = CopyOnDrop {
58 src: &*tmp,
59 dest: v.add(1),
60 };
61 ptr::copy_nonoverlapping(v.add(1), v.add(0), 1);
62
63 for i in 2..len {
64 if !is_less(&*v.add(i), &*tmp) {
65 break;
66 }
67
68 // Move `i`-th element one place to the left, thus shifting the hole to the right.
69 ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1);
70 hole.dest = v.add(i);
71 }
72 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
73 }
74 }
75 }
76
77 /// Shifts the last element to the left until it encounters a smaller or equal element.
shift_tail<T, F>(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool,78 fn shift_tail<T, F>(v: &mut [T], is_less: &F)
79 where
80 F: Fn(&T, &T) -> bool,
81 {
82 let len = v.len();
83 // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a
84 // pointer) and copying memory (`ptr::copy_nonoverlapping`).
85 //
86 // a. Indexing:
87 // 1. We checked the size of the array to >= 2.
88 // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
89 //
90 // b. Memory copying
91 // 1. We are obtaining pointers to references which are guaranteed to be valid.
92 // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
93 // Namely, `i` and `i+1`.
94 // 3. If the slice is properly aligned, the elements are properly aligned.
95 // It is the caller's responsibility to make sure the slice is properly aligned.
96 //
97 // See comments below for further detail.
98 unsafe {
99 // If the last two elements are out-of-order...
100 if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
101 // Read the last element into a stack-allocated variable. If a following comparison
102 // operation panics, `hole` will get dropped and automatically write the element back
103 // into the slice.
104 let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
105 let v = v.as_mut_ptr();
106 let mut hole = CopyOnDrop {
107 src: &*tmp,
108 dest: v.add(len - 2),
109 };
110 ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1);
111
112 for i in (0..len - 2).rev() {
113 if !is_less(&*tmp, &*v.add(i)) {
114 break;
115 }
116
117 // Move `i`-th element one place to the right, thus shifting the hole to the left.
118 ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1);
119 hole.dest = v.add(i);
120 }
121 // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
122 }
123 }
124 }
125
126 /// Partially sorts a slice by shifting several out-of-order elements around.
127 ///
128 /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
129 #[cold]
partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool where F: Fn(&T, &T) -> bool,130 fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
131 where
132 F: Fn(&T, &T) -> bool,
133 {
134 // Maximum number of adjacent out-of-order pairs that will get shifted.
135 const MAX_STEPS: usize = 5;
136 // If the slice is shorter than this, don't shift any elements.
137 const SHORTEST_SHIFTING: usize = 50;
138
139 let len = v.len();
140 let mut i = 1;
141
142 for _ in 0..MAX_STEPS {
143 // SAFETY: We already explicitly did the bound checking with `i < len`.
144 // All our subsequent indexing is only in the range `0 <= index < len`
145 unsafe {
146 // Find the next pair of adjacent out-of-order elements.
147 while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
148 i += 1;
149 }
150 }
151
152 // Are we done?
153 if i == len {
154 return true;
155 }
156
157 // Don't shift elements on short arrays, that has a performance cost.
158 if len < SHORTEST_SHIFTING {
159 return false;
160 }
161
162 // Swap the found pair of elements. This puts them in correct order.
163 v.swap(i - 1, i);
164
165 // Shift the smaller element to the left.
166 shift_tail(&mut v[..i], is_less);
167 // Shift the greater element to the right.
168 shift_head(&mut v[i..], is_less);
169 }
170
171 // Didn't manage to sort the slice in the limited number of steps.
172 false
173 }
174
175 /// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
insertion_sort<T, F>(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool,176 fn insertion_sort<T, F>(v: &mut [T], is_less: &F)
177 where
178 F: Fn(&T, &T) -> bool,
179 {
180 for i in 1..v.len() {
181 shift_tail(&mut v[..i + 1], is_less);
182 }
183 }
184
185 /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
186 #[cold]
heapsort<T, F>(v: &mut [T], is_less: &F) where F: Fn(&T, &T) -> bool,187 fn heapsort<T, F>(v: &mut [T], is_less: &F)
188 where
189 F: Fn(&T, &T) -> bool,
190 {
191 // This binary heap respects the invariant `parent >= child`.
192 let sift_down = |v: &mut [T], mut node| {
193 loop {
194 // Children of `node`.
195 let mut child = 2 * node + 1;
196 if child >= v.len() {
197 break;
198 }
199
200 // Choose the greater child.
201 if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) {
202 child += 1;
203 }
204
205 // Stop if the invariant holds at `node`.
206 if !is_less(&v[node], &v[child]) {
207 break;
208 }
209
210 // Swap `node` with the greater child, move one step down, and continue sifting.
211 v.swap(node, child);
212 node = child;
213 }
214 };
215
216 // Build the heap in linear time.
217 for i in (0..v.len() / 2).rev() {
218 sift_down(v, i);
219 }
220
221 // Pop maximal elements from the heap.
222 for i in (1..v.len()).rev() {
223 v.swap(0, i);
224 sift_down(&mut v[..i], 0);
225 }
226 }
227
228 /// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
229 /// to `pivot`.
230 ///
231 /// Returns the number of elements smaller than `pivot`.
232 ///
233 /// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
234 /// This idea is presented in the [BlockQuicksort][pdf] paper.
235 ///
236 /// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize where F: Fn(&T, &T) -> bool,237 fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
238 where
239 F: Fn(&T, &T) -> bool,
240 {
241 // Number of elements in a typical block.
242 const BLOCK: usize = 128;
243
244 // The partitioning algorithm repeats the following steps until completion:
245 //
246 // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
247 // 2. Trace a block from the right side to identify elements smaller than the pivot.
248 // 3. Exchange the identified elements between the left and right side.
249 //
250 // We keep the following variables for a block of elements:
251 //
252 // 1. `block` - Number of elements in the block.
253 // 2. `start` - Start pointer into the `offsets` array.
254 // 3. `end` - End pointer into the `offsets` array.
255 // 4. `offsets - Indices of out-of-order elements within the block.
256
257 // The current block on the left side (from `l` to `l.add(block_l)`).
258 let mut l = v.as_mut_ptr();
259 let mut block_l = BLOCK;
260 let mut start_l = ptr::null_mut();
261 let mut end_l = ptr::null_mut();
262 let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
263
264 // The current block on the right side (from `r.sub(block_r)` to `r`).
265 // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
266 let mut r = unsafe { l.add(v.len()) };
267 let mut block_r = BLOCK;
268 let mut start_r = ptr::null_mut();
269 let mut end_r = ptr::null_mut();
270 let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
271
272 // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
273 // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
274
275 // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
276 fn width<T>(l: *mut T, r: *mut T) -> usize {
277 assert!(mem::size_of::<T>() > 0);
278 // FIXME: this should *likely* use `offset_from`, but more
279 // investigation is needed (including running tests in miri).
280 // TODO unstable: (r.addr() - l.addr()) / mem::size_of::<T>()
281 (r as usize - l as usize) / mem::size_of::<T>()
282 }
283
284 loop {
285 // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
286 // some patch-up work in order to partition the remaining elements in between.
287 let is_done = width(l, r) <= 2 * BLOCK;
288
289 if is_done {
290 // Number of remaining elements (still not compared to the pivot).
291 let mut rem = width(l, r);
292 if start_l < end_l || start_r < end_r {
293 rem -= BLOCK;
294 }
295
296 // Adjust block sizes so that the left and right block don't overlap, but get perfectly
297 // aligned to cover the whole remaining gap.
298 if start_l < end_l {
299 block_r = rem;
300 } else if start_r < end_r {
301 block_l = rem;
302 } else {
303 // There were the same number of elements to switch on both blocks during the last
304 // iteration, so there are no remaining elements on either block. Cover the remaining
305 // items with roughly equally-sized blocks.
306 block_l = rem / 2;
307 block_r = rem - block_l;
308 }
309 debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
310 debug_assert!(width(l, r) == block_l + block_r);
311 }
312
313 if start_l == end_l {
314 // Trace `block_l` elements from the left side.
315 // TODO unstable: start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
316 start_l = offsets_l.as_mut_ptr() as *mut u8;
317 end_l = start_l;
318 let mut elem = l;
319
320 for i in 0..block_l {
321 // SAFETY: The unsafety operations below involve the usage of the `offset`.
322 // According to the conditions required by the function, we satisfy them because:
323 // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
324 // 2. The function `is_less` returns a `bool`.
325 // Casting a `bool` will never overflow `isize`.
326 // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
327 // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
328 // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
329 // Another unsafety operation here is dereferencing `elem`.
330 // However, `elem` was initially the begin pointer to the slice which is always valid.
331 unsafe {
332 // Branchless comparison.
333 *end_l = i as u8;
334 end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
335 elem = elem.offset(1);
336 }
337 }
338 }
339
340 if start_r == end_r {
341 // Trace `block_r` elements from the right side.
342 // TODO unstable: start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
343 start_r = offsets_r.as_mut_ptr() as *mut u8;
344 end_r = start_r;
345 let mut elem = r;
346
347 for i in 0..block_r {
348 // SAFETY: The unsafety operations below involve the usage of the `offset`.
349 // According to the conditions required by the function, we satisfy them because:
350 // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
351 // 2. The function `is_less` returns a `bool`.
352 // Casting a `bool` will never overflow `isize`.
353 // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
354 // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
355 // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
356 // Another unsafety operation here is dereferencing `elem`.
357 // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
358 // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
359 unsafe {
360 // Branchless comparison.
361 elem = elem.offset(-1);
362 *end_r = i as u8;
363 end_r = end_r.offset(is_less(&*elem, pivot) as isize);
364 }
365 }
366 }
367
368 // Number of out-of-order elements to swap between the left and right side.
369 let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
370
371 if count > 0 {
372 macro_rules! left {
373 () => {
374 l.offset(*start_l as isize)
375 };
376 }
377 macro_rules! right {
378 () => {
379 r.offset(-(*start_r as isize) - 1)
380 };
381 }
382
383 // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
384 // permutation. This is not strictly equivalent to swapping, but produces a similar
385 // result using fewer memory operations.
386
387 // SAFETY: The use of `ptr::read` is valid because there is at least one element in
388 // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from.
389 //
390 // The uses of `left!` involve calls to `offset` on `l`, which points to the
391 // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so
392 // these `offset` calls are safe as all reads are within the block. The same argument
393 // applies for the uses of `right!`.
394 //
395 // The calls to `start_l.offset` are valid because there are at most `count-1` of them,
396 // plus the final one at the end of the unsafe block, where `count` is the minimum number
397 // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not
398 // being enough elements. The same reasoning applies to the calls to `start_r.offset`.
399 //
400 // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed
401 // not to overlap, and are valid because of the reasoning above.
402 unsafe {
403 let tmp = ptr::read(left!());
404 ptr::copy_nonoverlapping(right!(), left!(), 1);
405
406 for _ in 1..count {
407 start_l = start_l.offset(1);
408 ptr::copy_nonoverlapping(left!(), right!(), 1);
409 start_r = start_r.offset(1);
410 ptr::copy_nonoverlapping(right!(), left!(), 1);
411 }
412
413 ptr::copy_nonoverlapping(&tmp, right!(), 1);
414 mem::forget(tmp);
415 start_l = start_l.offset(1);
416 start_r = start_r.offset(1);
417 }
418 }
419
420 if start_l == end_l {
421 // All out-of-order elements in the left block were moved. Move to the next block.
422
423 // block-width-guarantee
424 // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There
425 // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is
426 // safe. Otherwise, the debug assertions in the `is_done` case guarantee that
427 // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
428 // for the smaller number of remaining elements.
429 l = unsafe { l.add(block_l) };
430 }
431
432 if start_r == end_r {
433 // All out-of-order elements in the right block were moved. Move to the previous block.
434
435 // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
436 // or `block_r` has been adjusted for the last handful of elements.
437 r = unsafe { r.offset(-(block_r as isize)) };
438 }
439
440 if is_done {
441 break;
442 }
443 }
444
445 // All that remains now is at most one block (either the left or the right) with out-of-order
446 // elements that need to be moved. Such remaining elements can be simply shifted to the end
447 // within their block.
448
449 if start_l < end_l {
450 // The left block remains.
451 // Move its remaining out-of-order elements to the far right.
452 debug_assert_eq!(width(l, r), block_l);
453 while start_l < end_l {
454 // remaining-elements-safety
455 // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
456 // is safe to point `end_l` to the previous element.
457 //
458 // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
459 // - Per the debug assert above, the distance between `l` and `r` is `block_l`
460 // elements, so there can be at most `block_l` remaining offsets between `start_l`
461 // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
462 // makes the `r.offset` calls valid (at that point `l == r`).
463 // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
464 // the last block, so the `l.offset` calls are valid.
465 unsafe {
466 end_l = end_l.offset(-1);
467 ptr::swap(l.offset(*end_l as isize), r.offset(-1));
468 r = r.offset(-1);
469 }
470 }
471 width(v.as_mut_ptr(), r)
472 } else if start_r < end_r {
473 // The right block remains.
474 // Move its remaining out-of-order elements to the far left.
475 debug_assert_eq!(width(l, r), block_r);
476 while start_r < end_r {
477 // SAFETY: See the reasoning in [remaining-elements-safety].
478 unsafe {
479 end_r = end_r.offset(-1);
480 ptr::swap(l, r.offset(-(*end_r as isize) - 1));
481 l = l.offset(1);
482 }
483 }
484 width(v.as_mut_ptr(), l)
485 } else {
486 // Nothing else to do, we're done.
487 width(v.as_mut_ptr(), l)
488 }
489 }
490
491 /// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
492 /// equal to `v[pivot]`.
493 ///
494 /// Returns a tuple of:
495 ///
496 /// 1. Number of elements smaller than `v[pivot]`.
497 /// 2. True if `v` was already partitioned.
partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool) where F: Fn(&T, &T) -> bool,498 fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
499 where
500 F: Fn(&T, &T) -> bool,
501 {
502 let (mid, was_partitioned) = {
503 // Place the pivot at the beginning of slice.
504 v.swap(0, pivot);
505 let (pivot, v) = v.split_at_mut(1);
506 let pivot = &mut pivot[0];
507
508 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
509 // operation panics, the pivot will be automatically written back into the slice.
510
511 // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
512 let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
513 let _pivot_guard = CopyOnDrop {
514 src: &*tmp,
515 dest: pivot,
516 };
517 let pivot = &*tmp;
518
519 // Find the first pair of out-of-order elements.
520 let mut l = 0;
521 let mut r = v.len();
522
523 // SAFETY: The unsafety below involves indexing an array.
524 // For the first one: We already do the bounds checking here with `l < r`.
525 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
526 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
527 unsafe {
528 // Find the first element greater than or equal to the pivot.
529 while l < r && is_less(v.get_unchecked(l), pivot) {
530 l += 1;
531 }
532
533 // Find the last element smaller that the pivot.
534 while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
535 r -= 1;
536 }
537 }
538
539 (
540 l + partition_in_blocks(&mut v[l..r], pivot, is_less),
541 l >= r,
542 )
543
544 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
545 // variable) back into the slice where it originally was. This step is critical in ensuring
546 // safety!
547 };
548
549 // Place the pivot between the two partitions.
550 v.swap(0, mid);
551
552 (mid, was_partitioned)
553 }
554
555 /// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
556 ///
557 /// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
558 /// elements smaller than the pivot.
partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize where F: Fn(&T, &T) -> bool,559 fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
560 where
561 F: Fn(&T, &T) -> bool,
562 {
563 // Place the pivot at the beginning of slice.
564 v.swap(0, pivot);
565 let (pivot, v) = v.split_at_mut(1);
566 let pivot = &mut pivot[0];
567
568 // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
569 // operation panics, the pivot will be automatically written back into the slice.
570 // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
571 let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
572 let _pivot_guard = CopyOnDrop {
573 src: &*tmp,
574 dest: pivot,
575 };
576 let pivot = &*tmp;
577
578 // Now partition the slice.
579 let mut l = 0;
580 let mut r = v.len();
581 loop {
582 // SAFETY: The unsafety below involves indexing an array.
583 // For the first one: We already do the bounds checking here with `l < r`.
584 // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
585 // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
586 unsafe {
587 // Find the first element greater than the pivot.
588 while l < r && !is_less(pivot, v.get_unchecked(l)) {
589 l += 1;
590 }
591
592 // Find the last element equal to the pivot.
593 while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
594 r -= 1;
595 }
596
597 // Are we done?
598 if l >= r {
599 break;
600 }
601
602 // Swap the found pair of out-of-order elements.
603 r -= 1;
604 let ptr = v.as_mut_ptr();
605 ptr::swap(ptr.add(l), ptr.add(r));
606 l += 1;
607 }
608 }
609
610 // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
611 l + 1
612
613 // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
614 // back into the slice where it originally was. This step is critical in ensuring safety!
615 }
616
617 /// Scatters some elements around in an attempt to break patterns that might cause imbalanced
618 /// partitions in quicksort.
619 #[cold]
break_patterns<T>(v: &mut [T])620 fn break_patterns<T>(v: &mut [T]) {
621 let len = v.len();
622 if len >= 8 {
623 // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
624 let mut random = len as u32;
625 let mut gen_u32 = || {
626 random ^= random << 13;
627 random ^= random >> 17;
628 random ^= random << 5;
629 random
630 };
631 let mut gen_usize = || {
632 if usize::BITS <= 32 {
633 gen_u32() as usize
634 } else {
635 (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
636 }
637 };
638
639 // Take random numbers modulo this number.
640 // The number fits into `usize` because `len` is not greater than `isize::MAX`.
641 let modulus = len.next_power_of_two();
642
643 // Some pivot candidates will be in the nearby of this index. Let's randomize them.
644 let pos = len / 4 * 2;
645
646 for i in 0..3 {
647 // Generate a random number modulo `len`. However, in order to avoid costly operations
648 // we first take it modulo a power of two, and then decrease by `len` until it fits
649 // into the range `[0, len - 1]`.
650 let mut other = gen_usize() & (modulus - 1);
651
652 // `other` is guaranteed to be less than `2 * len`.
653 if other >= len {
654 other -= len;
655 }
656
657 v.swap(pos - 1 + i, other);
658 }
659 }
660 }
661
662 /// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
663 ///
664 /// Elements in `v` might be reordered in the process.
choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool) where F: Fn(&T, &T) -> bool,665 fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
666 where
667 F: Fn(&T, &T) -> bool,
668 {
669 // Minimum length to choose the median-of-medians method.
670 // Shorter slices use the simple median-of-three method.
671 const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
672 // Maximum number of swaps that can be performed in this function.
673 const MAX_SWAPS: usize = 4 * 3;
674
675 let len = v.len();
676
677 // Three indices near which we are going to choose a pivot.
678 #[allow(clippy::identity_op)]
679 let mut a = len / 4 * 1;
680 let mut b = len / 4 * 2;
681 let mut c = len / 4 * 3;
682
683 // Counts the total number of swaps we are about to perform while sorting indices.
684 let mut swaps = 0;
685
686 if len >= 8 {
687 // Swaps indices so that `v[a] <= v[b]`.
688 // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
689 // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
690 // corresponding calls to `sort3` with valid 3-item neighborhoods around each
691 // pointer, which in turn means the calls to `sort2` are done with valid
692 // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
693 // call.
694 let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
695 if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
696 ptr::swap(a, b);
697 swaps += 1;
698 }
699 };
700
701 // Swaps indices so that `v[a] <= v[b] <= v[c]`.
702 let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
703 sort2(a, b);
704 sort2(b, c);
705 sort2(a, b);
706 };
707
708 if len >= SHORTEST_MEDIAN_OF_MEDIANS {
709 // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
710 let mut sort_adjacent = |a: &mut usize| {
711 let tmp = *a;
712 sort3(&mut (tmp - 1), a, &mut (tmp + 1));
713 };
714
715 // Find medians in the neighborhoods of `a`, `b`, and `c`.
716 sort_adjacent(&mut a);
717 sort_adjacent(&mut b);
718 sort_adjacent(&mut c);
719 }
720
721 // Find the median among `a`, `b`, and `c`.
722 sort3(&mut a, &mut b, &mut c);
723 }
724
725 if swaps < MAX_SWAPS {
726 (b, swaps == 0)
727 } else {
728 // The maximum number of swaps was performed. Chances are the slice is descending or mostly
729 // descending, so reversing will probably help sort it faster.
730 v.reverse();
731 (len - 1 - b, true)
732 }
733 }
734
735 /// Sorts `v` recursively.
736 ///
737 /// If the slice had a predecessor in the original array, it is specified as `pred`.
738 ///
739 /// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
740 /// this function will immediately switch to heapsort.
recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32) where T: Send, F: Fn(&T, &T) -> bool + Sync,741 fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32)
742 where
743 T: Send,
744 F: Fn(&T, &T) -> bool + Sync,
745 {
746 // Slices of up to this length get sorted using insertion sort.
747 const MAX_INSERTION: usize = 20;
748 // If both partitions are up to this length, we continue sequentially. This number is as small
749 // as possible but so that the overhead of Rayon's task scheduling is still negligible.
750 const MAX_SEQUENTIAL: usize = 2000;
751
752 // True if the last partitioning was reasonably balanced.
753 let mut was_balanced = true;
754 // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
755 let mut was_partitioned = true;
756
757 loop {
758 let len = v.len();
759
760 // Very short slices get sorted using insertion sort.
761 if len <= MAX_INSERTION {
762 insertion_sort(v, is_less);
763 return;
764 }
765
766 // If too many bad pivot choices were made, simply fall back to heapsort in order to
767 // guarantee `O(n * log(n))` worst-case.
768 if limit == 0 {
769 heapsort(v, is_less);
770 return;
771 }
772
773 // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
774 // some elements around. Hopefully we'll choose a better pivot this time.
775 if !was_balanced {
776 break_patterns(v);
777 limit -= 1;
778 }
779
780 // Choose a pivot and try guessing whether the slice is already sorted.
781 let (pivot, likely_sorted) = choose_pivot(v, is_less);
782
783 // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
784 // selection predicts the slice is likely already sorted...
785 if was_balanced && was_partitioned && likely_sorted {
786 // Try identifying several out-of-order elements and shifting them to correct
787 // positions. If the slice ends up being completely sorted, we're done.
788 if partial_insertion_sort(v, is_less) {
789 return;
790 }
791 }
792
793 // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
794 // slice. Partition the slice into elements equal to and elements greater than the pivot.
795 // This case is usually hit when the slice contains many duplicate elements.
796 if let Some(ref p) = pred {
797 if !is_less(p, &v[pivot]) {
798 let mid = partition_equal(v, pivot, is_less);
799
800 // Continue sorting elements greater than the pivot.
801 v = &mut v[mid..];
802 continue;
803 }
804 }
805
806 // Partition the slice.
807 let (mid, was_p) = partition(v, pivot, is_less);
808 was_balanced = cmp::min(mid, len - mid) >= len / 8;
809 was_partitioned = was_p;
810
811 // Split the slice into `left`, `pivot`, and `right`.
812 let (left, right) = v.split_at_mut(mid);
813 let (pivot, right) = right.split_at_mut(1);
814 let pivot = &mut pivot[0];
815
816 if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
817 // Recurse into the shorter side only in order to minimize the total number of recursive
818 // calls and consume less stack space. Then just continue with the longer side (this is
819 // akin to tail recursion).
820 if left.len() < right.len() {
821 recurse(left, is_less, pred, limit);
822 v = right;
823 pred = Some(pivot);
824 } else {
825 recurse(right, is_less, Some(pivot), limit);
826 v = left;
827 }
828 } else {
829 // Sort the left and right half in parallel.
830 rayon_core::join(
831 || recurse(left, is_less, pred, limit),
832 || recurse(right, is_less, Some(pivot), limit),
833 );
834 break;
835 }
836 }
837 }
838
839 /// Sorts `v` using pattern-defeating quicksort in parallel.
840 ///
841 /// The algorithm is unstable, in-place, and *O*(*n* \* log(*n*)) worst-case.
par_quicksort<T, F>(v: &mut [T], is_less: F) where T: Send, F: Fn(&T, &T) -> bool + Sync,842 pub(super) fn par_quicksort<T, F>(v: &mut [T], is_less: F)
843 where
844 T: Send,
845 F: Fn(&T, &T) -> bool + Sync,
846 {
847 // Sorting has no meaningful behavior on zero-sized types.
848 if mem::size_of::<T>() == 0 {
849 return;
850 }
851
852 // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
853 let limit = usize::BITS - v.len().leading_zeros();
854
855 recurse(v, &is_less, None, limit);
856 }
857
858 #[cfg(test)]
859 mod tests {
860 use super::heapsort;
861 use rand::distributions::Uniform;
862 use rand::{thread_rng, Rng};
863
864 #[test]
test_heapsort()865 fn test_heapsort() {
866 let rng = &mut thread_rng();
867
868 for len in (0..25).chain(500..501) {
869 for &modulus in &[5, 10, 100] {
870 let dist = Uniform::new(0, modulus);
871 for _ in 0..100 {
872 let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
873
874 // Test heapsort using `<` operator.
875 let mut tmp = v.clone();
876 heapsort(&mut tmp, &|a, b| a < b);
877 assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
878
879 // Test heapsort using `>` operator.
880 let mut tmp = v.clone();
881 heapsort(&mut tmp, &|a, b| a > b);
882 assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
883 }
884 }
885 }
886
887 // Sort using a completely random comparison function.
888 // This will reorder the elements *somehow*, but won't panic.
889 let mut v: Vec<_> = (0..100).collect();
890 heapsort(&mut v, &|_, _| thread_rng().gen());
891 heapsort(&mut v, &|a, b| a < b);
892
893 for (i, &entry) in v.iter().enumerate() {
894 assert_eq!(entry, i);
895 }
896 }
897 }
898