• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2 
3 //! A dynamically-sized view into a contiguous sequence, `[T]`.
4 //!
5 //! *[See also the slice primitive type](slice).*
6 //!
7 //! Slices are a view into a block of memory represented as a pointer and a
8 //! length.
9 //!
10 //! ```
11 //! // slicing a Vec
12 //! let vec = vec![1, 2, 3];
13 //! let int_slice = &vec[..];
14 //! // coercing an array to a slice
15 //! let str_slice: &[&str] = &["one", "two", "three"];
16 //! ```
17 //!
18 //! Slices are either mutable or shared. The shared slice type is `&[T]`,
19 //! while the mutable slice type is `&mut [T]`, where `T` represents the element
20 //! type. For example, you can mutate the block of memory that a mutable slice
21 //! points to:
22 //!
23 //! ```
24 //! let x = &mut [1, 2, 3];
25 //! x[1] = 7;
26 //! assert_eq!(x, &[1, 7, 3]);
27 //! ```
28 //!
29 //! Here are some of the things this module contains:
30 //!
31 //! ## Structs
32 //!
33 //! There are several structs that are useful for slices, such as [`Iter`], which
34 //! represents iteration over a slice.
35 //!
36 //! ## Trait Implementations
37 //!
38 //! There are several implementations of common traits for slices. Some examples
39 //! include:
40 //!
41 //! * [`Clone`]
42 //! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
43 //! * [`Hash`] - for slices whose element type is [`Hash`].
44 //!
45 //! ## Iteration
46 //!
47 //! The slices implement `IntoIterator`. The iterator yields references to the
48 //! slice elements.
49 //!
50 //! ```
51 //! let numbers = &[0, 1, 2];
52 //! for n in numbers {
53 //!     println!("{n} is a number!");
54 //! }
55 //! ```
56 //!
57 //! The mutable slice yields mutable references to the elements:
58 //!
59 //! ```
60 //! let mut scores = [7, 8, 9];
61 //! for score in &mut scores[..] {
62 //!     *score += 1;
63 //! }
64 //! ```
65 //!
66 //! This iterator yields mutable references to the slice's elements, so while
67 //! the element type of the slice is `i32`, the element type of the iterator is
68 //! `&mut i32`.
69 //!
70 //! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
71 //!   iterators.
72 //! * Further methods that return iterators are [`.split`], [`.splitn`],
73 //!   [`.chunks`], [`.windows`] and more.
74 //!
75 //! [`Hash`]: core::hash::Hash
76 //! [`.iter`]: slice::iter
77 //! [`.iter_mut`]: slice::iter_mut
78 //! [`.split`]: slice::split
79 //! [`.splitn`]: slice::splitn
80 //! [`.chunks`]: slice::chunks
81 //! [`.windows`]: slice::windows
82 #![stable(feature = "rust1", since = "1.0.0")]
83 // Many of the usings in this module are only used in the test configuration.
84 // It's cleaner to just turn off the unused_imports warning than to fix them.
85 #![cfg_attr(test, allow(unused_imports, dead_code))]
86 
87 use core::borrow::{Borrow, BorrowMut};
88 #[cfg(not(no_global_oom_handling))]
89 use core::cmp::Ordering::{self, Less};
90 #[cfg(not(no_global_oom_handling))]
91 use core::mem;
92 #[cfg(not(no_global_oom_handling))]
93 use core::mem::size_of;
94 #[cfg(not(no_global_oom_handling))]
95 use core::ptr;
96 
97 use crate::alloc::Allocator;
98 #[cfg(not(no_global_oom_handling))]
99 use crate::alloc::Global;
100 #[cfg(not(no_global_oom_handling))]
101 use crate::borrow::ToOwned;
102 use crate::boxed::Box;
103 use crate::vec::Vec;
104 
105 #[unstable(feature = "slice_range", issue = "76393")]
106 pub use core::slice::range;
107 #[unstable(feature = "array_chunks", issue = "74985")]
108 pub use core::slice::ArrayChunks;
109 #[unstable(feature = "array_chunks", issue = "74985")]
110 pub use core::slice::ArrayChunksMut;
111 #[unstable(feature = "array_windows", issue = "75027")]
112 pub use core::slice::ArrayWindows;
113 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
114 pub use core::slice::EscapeAscii;
115 #[stable(feature = "slice_get_slice", since = "1.28.0")]
116 pub use core::slice::SliceIndex;
117 #[stable(feature = "from_ref", since = "1.28.0")]
118 pub use core::slice::{from_mut, from_ref};
119 #[stable(feature = "rust1", since = "1.0.0")]
120 pub use core::slice::{from_raw_parts, from_raw_parts_mut};
121 #[stable(feature = "rust1", since = "1.0.0")]
122 pub use core::slice::{Chunks, Windows};
123 #[stable(feature = "chunks_exact", since = "1.31.0")]
124 pub use core::slice::{ChunksExact, ChunksExactMut};
125 #[stable(feature = "rust1", since = "1.0.0")]
126 pub use core::slice::{ChunksMut, Split, SplitMut};
127 #[unstable(feature = "slice_group_by", issue = "80552")]
128 pub use core::slice::{GroupBy, GroupByMut};
129 #[stable(feature = "rust1", since = "1.0.0")]
130 pub use core::slice::{Iter, IterMut};
131 #[stable(feature = "rchunks", since = "1.31.0")]
132 pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
133 #[stable(feature = "slice_rsplit", since = "1.27.0")]
134 pub use core::slice::{RSplit, RSplitMut};
135 #[stable(feature = "rust1", since = "1.0.0")]
136 pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
137 #[stable(feature = "split_inclusive", since = "1.51.0")]
138 pub use core::slice::{SplitInclusive, SplitInclusiveMut};
139 
140 ////////////////////////////////////////////////////////////////////////////////
141 // Basic slice extension methods
142 ////////////////////////////////////////////////////////////////////////////////
143 
144 // HACK(japaric) needed for the implementation of `vec!` macro during testing
145 // N.B., see the `hack` module in this file for more details.
146 #[cfg(test)]
147 pub use hack::into_vec;
148 
149 // HACK(japaric) needed for the implementation of `Vec::clone` during testing
150 // N.B., see the `hack` module in this file for more details.
151 #[cfg(test)]
152 pub use hack::to_vec;
153 
154 // HACK(japaric): With cfg(test) `impl [T]` is not available, these three
155 // functions are actually methods that are in `impl [T]` but not in
156 // `core::slice::SliceExt` - we need to supply these functions for the
157 // `test_permutations` test
158 pub(crate) mod hack {
159     use core::alloc::Allocator;
160 
161     use crate::boxed::Box;
162     use crate::vec::Vec;
163 
164     // We shouldn't add inline attribute to this since this is used in
165     // `vec!` macro mostly and causes perf regression. See #71204 for
166     // discussion and perf results.
into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A>167     pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
168         unsafe {
169             let len = b.len();
170             let (b, alloc) = Box::into_raw_with_allocator(b);
171             Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
172         }
173     }
174 
175     #[cfg(not(no_global_oom_handling))]
176     #[inline]
to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A>177     pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
178         T::to_vec(s, alloc)
179     }
180 
181     #[cfg(not(no_global_oom_handling))]
182     pub trait ConvertVec {
to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> where Self: Sized183         fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
184         where
185             Self: Sized;
186     }
187 
188     #[cfg(not(no_global_oom_handling))]
189     impl<T: Clone> ConvertVec for T {
190         #[inline]
to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>191         default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
192             struct DropGuard<'a, T, A: Allocator> {
193                 vec: &'a mut Vec<T, A>,
194                 num_init: usize,
195             }
196             impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
197                 #[inline]
198                 fn drop(&mut self) {
199                     // SAFETY:
200                     // items were marked initialized in the loop below
201                     unsafe {
202                         self.vec.set_len(self.num_init);
203                     }
204                 }
205             }
206             let mut vec = Vec::with_capacity_in(s.len(), alloc);
207             let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
208             let slots = guard.vec.spare_capacity_mut();
209             // .take(slots.len()) is necessary for LLVM to remove bounds checks
210             // and has better codegen than zip.
211             for (i, b) in s.iter().enumerate().take(slots.len()) {
212                 guard.num_init = i;
213                 slots[i].write(b.clone());
214             }
215             core::mem::forget(guard);
216             // SAFETY:
217             // the vec was allocated and initialized above to at least this length.
218             unsafe {
219                 vec.set_len(s.len());
220             }
221             vec
222         }
223     }
224 
225     #[cfg(not(no_global_oom_handling))]
226     impl<T: Copy> ConvertVec for T {
227         #[inline]
to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>228         fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
229             let mut v = Vec::with_capacity_in(s.len(), alloc);
230             // SAFETY:
231             // allocated above with the capacity of `s`, and initialize to `s.len()` in
232             // ptr::copy_to_non_overlapping below.
233             unsafe {
234                 s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
235                 v.set_len(s.len());
236             }
237             v
238         }
239     }
240 }
241 
242 #[cfg(not(test))]
243 impl<T> [T] {
244     /// Sorts the slice.
245     ///
246     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
247     ///
248     /// When applicable, unstable sorting is preferred because it is generally faster than stable
249     /// sorting and it doesn't allocate auxiliary memory.
250     /// See [`sort_unstable`](slice::sort_unstable).
251     ///
252     /// # Current implementation
253     ///
254     /// The current algorithm is an adaptive, iterative merge sort inspired by
255     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
256     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
257     /// two or more sorted sequences concatenated one after another.
258     ///
259     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
260     /// non-allocating insertion sort is used instead.
261     ///
262     /// # Examples
263     ///
264     /// ```
265     /// let mut v = [-5, 4, 1, -3, 2];
266     ///
267     /// v.sort();
268     /// assert!(v == [-5, -3, 1, 2, 4]);
269     /// ```
270     #[cfg(not(no_global_oom_handling))]
271     #[rustc_allow_incoherent_impl]
272     #[stable(feature = "rust1", since = "1.0.0")]
273     #[inline]
sort(&mut self) where T: Ord,274     pub fn sort(&mut self)
275     where
276         T: Ord,
277     {
278         merge_sort(self, |a, b| a.lt(b));
279     }
280 
281     /// Sorts the slice with a comparator function.
282     ///
283     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
284     ///
285     /// The comparator function must define a total ordering for the elements in the slice. If
286     /// the ordering is not total, the order of the elements is unspecified. An order is a
287     /// total order if it is (for all `a`, `b` and `c`):
288     ///
289     /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
290     /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
291     ///
292     /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
293     /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
294     ///
295     /// ```
296     /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
297     /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
298     /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
299     /// ```
300     ///
301     /// When applicable, unstable sorting is preferred because it is generally faster than stable
302     /// sorting and it doesn't allocate auxiliary memory.
303     /// See [`sort_unstable_by`](slice::sort_unstable_by).
304     ///
305     /// # Current implementation
306     ///
307     /// The current algorithm is an adaptive, iterative merge sort inspired by
308     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
309     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
310     /// two or more sorted sequences concatenated one after another.
311     ///
312     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
313     /// non-allocating insertion sort is used instead.
314     ///
315     /// # Examples
316     ///
317     /// ```
318     /// let mut v = [5, 4, 1, 3, 2];
319     /// v.sort_by(|a, b| a.cmp(b));
320     /// assert!(v == [1, 2, 3, 4, 5]);
321     ///
322     /// // reverse sorting
323     /// v.sort_by(|a, b| b.cmp(a));
324     /// assert!(v == [5, 4, 3, 2, 1]);
325     /// ```
326     #[cfg(not(no_global_oom_handling))]
327     #[rustc_allow_incoherent_impl]
328     #[stable(feature = "rust1", since = "1.0.0")]
329     #[inline]
sort_by<F>(&mut self, mut compare: F) where F: FnMut(&T, &T) -> Ordering,330     pub fn sort_by<F>(&mut self, mut compare: F)
331     where
332         F: FnMut(&T, &T) -> Ordering,
333     {
334         merge_sort(self, |a, b| compare(a, b) == Less);
335     }
336 
337     /// Sorts the slice with a key extraction function.
338     ///
339     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
340     /// worst-case, where the key function is *O*(*m*).
341     ///
342     /// For expensive key functions (e.g. functions that are not simple property accesses or
343     /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
344     /// significantly faster, as it does not recompute element keys.
345     ///
346     /// When applicable, unstable sorting is preferred because it is generally faster than stable
347     /// sorting and it doesn't allocate auxiliary memory.
348     /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
349     ///
350     /// # Current implementation
351     ///
352     /// The current algorithm is an adaptive, iterative merge sort inspired by
353     /// [timsort](https://en.wikipedia.org/wiki/Timsort).
354     /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
355     /// two or more sorted sequences concatenated one after another.
356     ///
357     /// Also, it allocates temporary storage half the size of `self`, but for short slices a
358     /// non-allocating insertion sort is used instead.
359     ///
360     /// # Examples
361     ///
362     /// ```
363     /// let mut v = [-5i32, 4, 1, -3, 2];
364     ///
365     /// v.sort_by_key(|k| k.abs());
366     /// assert!(v == [1, 2, -3, 4, -5]);
367     /// ```
368     #[cfg(not(no_global_oom_handling))]
369     #[rustc_allow_incoherent_impl]
370     #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
371     #[inline]
sort_by_key<K, F>(&mut self, mut f: F) where F: FnMut(&T) -> K, K: Ord,372     pub fn sort_by_key<K, F>(&mut self, mut f: F)
373     where
374         F: FnMut(&T) -> K,
375         K: Ord,
376     {
377         merge_sort(self, |a, b| f(a).lt(&f(b)));
378     }
379 
380     /// Sorts the slice with a key extraction function.
381     ///
382     /// During sorting, the key function is called at most once per element, by using
383     /// temporary storage to remember the results of key evaluation.
384     /// The order of calls to the key function is unspecified and may change in future versions
385     /// of the standard library.
386     ///
387     /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
388     /// worst-case, where the key function is *O*(*m*).
389     ///
390     /// For simple key functions (e.g., functions that are property accesses or
391     /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
392     /// faster.
393     ///
394     /// # Current implementation
395     ///
396     /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
397     /// which combines the fast average case of randomized quicksort with the fast worst case of
398     /// heapsort, while achieving linear time on slices with certain patterns. It uses some
399     /// randomization to avoid degenerate cases, but with a fixed seed to always provide
400     /// deterministic behavior.
401     ///
402     /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
403     /// length of the slice.
404     ///
405     /// # Examples
406     ///
407     /// ```
408     /// let mut v = [-5i32, 4, 32, -3, 2];
409     ///
410     /// v.sort_by_cached_key(|k| k.to_string());
411     /// assert!(v == [-3, -5, 2, 32, 4]);
412     /// ```
413     ///
414     /// [pdqsort]: https://github.com/orlp/pdqsort
415     #[cfg(not(no_global_oom_handling))]
416     #[rustc_allow_incoherent_impl]
417     #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
418     #[inline]
sort_by_cached_key<K, F>(&mut self, f: F) where F: FnMut(&T) -> K, K: Ord,419     pub fn sort_by_cached_key<K, F>(&mut self, f: F)
420     where
421         F: FnMut(&T) -> K,
422         K: Ord,
423     {
424         // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
425         macro_rules! sort_by_key {
426             ($t:ty, $slice:ident, $f:ident) => {{
427                 let mut indices: Vec<_> =
428                     $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
429                 // The elements of `indices` are unique, as they are indexed, so any sort will be
430                 // stable with respect to the original slice. We use `sort_unstable` here because
431                 // it requires less memory allocation.
432                 indices.sort_unstable();
433                 for i in 0..$slice.len() {
434                     let mut index = indices[i].1;
435                     while (index as usize) < i {
436                         index = indices[index as usize].1;
437                     }
438                     indices[i].1 = index;
439                     $slice.swap(i, index as usize);
440                 }
441             }};
442         }
443 
444         let sz_u8 = mem::size_of::<(K, u8)>();
445         let sz_u16 = mem::size_of::<(K, u16)>();
446         let sz_u32 = mem::size_of::<(K, u32)>();
447         let sz_usize = mem::size_of::<(K, usize)>();
448 
449         let len = self.len();
450         if len < 2 {
451             return;
452         }
453         if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
454             return sort_by_key!(u8, self, f);
455         }
456         if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
457             return sort_by_key!(u16, self, f);
458         }
459         if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
460             return sort_by_key!(u32, self, f);
461         }
462         sort_by_key!(usize, self, f)
463     }
464 
465     /// Copies `self` into a new `Vec`.
466     ///
467     /// # Examples
468     ///
469     /// ```
470     /// let s = [10, 40, 30];
471     /// let x = s.to_vec();
472     /// // Here, `s` and `x` can be modified independently.
473     /// ```
474     #[cfg(not(no_global_oom_handling))]
475     #[rustc_allow_incoherent_impl]
476     #[rustc_conversion_suggestion]
477     #[stable(feature = "rust1", since = "1.0.0")]
478     #[inline]
to_vec(&self) -> Vec<T> where T: Clone,479     pub fn to_vec(&self) -> Vec<T>
480     where
481         T: Clone,
482     {
483         self.to_vec_in(Global)
484     }
485 
486     /// Copies `self` into a new `Vec` with an allocator.
487     ///
488     /// # Examples
489     ///
490     /// ```
491     /// #![feature(allocator_api)]
492     ///
493     /// use std::alloc::System;
494     ///
495     /// let s = [10, 40, 30];
496     /// let x = s.to_vec_in(System);
497     /// // Here, `s` and `x` can be modified independently.
498     /// ```
499     #[cfg(not(no_global_oom_handling))]
500     #[rustc_allow_incoherent_impl]
501     #[inline]
502     #[unstable(feature = "allocator_api", issue = "32838")]
to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A> where T: Clone,503     pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
504     where
505         T: Clone,
506     {
507         // N.B., see the `hack` module in this file for more details.
508         hack::to_vec(self, alloc)
509     }
510 
511     /// Converts `self` into a vector without clones or allocation.
512     ///
513     /// The resulting vector can be converted back into a box via
514     /// `Vec<T>`'s `into_boxed_slice` method.
515     ///
516     /// # Examples
517     ///
518     /// ```
519     /// let s: Box<[i32]> = Box::new([10, 40, 30]);
520     /// let x = s.into_vec();
521     /// // `s` cannot be used anymore because it has been converted into `x`.
522     ///
523     /// assert_eq!(x, vec![10, 40, 30]);
524     /// ```
525     #[rustc_allow_incoherent_impl]
526     #[stable(feature = "rust1", since = "1.0.0")]
527     #[inline]
into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A>528     pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
529         // N.B., see the `hack` module in this file for more details.
530         hack::into_vec(self)
531     }
532 
533     /// Creates a vector by repeating a slice `n` times.
534     ///
535     /// # Panics
536     ///
537     /// This function will panic if the capacity would overflow.
538     ///
539     /// # Examples
540     ///
541     /// Basic usage:
542     ///
543     /// ```
544     /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
545     /// ```
546     ///
547     /// A panic upon overflow:
548     ///
549     /// ```should_panic
550     /// // this will panic at runtime
551     /// b"0123456789abcdef".repeat(usize::MAX);
552     /// ```
553     #[rustc_allow_incoherent_impl]
554     #[cfg(not(no_global_oom_handling))]
555     #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
repeat(&self, n: usize) -> Vec<T> where T: Copy,556     pub fn repeat(&self, n: usize) -> Vec<T>
557     where
558         T: Copy,
559     {
560         if n == 0 {
561             return Vec::new();
562         }
563 
564         // If `n` is larger than zero, it can be split as
565         // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
566         // `2^expn` is the number represented by the leftmost '1' bit of `n`,
567         // and `rem` is the remaining part of `n`.
568 
569         // Using `Vec` to access `set_len()`.
570         let capacity = self.len().checked_mul(n).expect("capacity overflow");
571         let mut buf = Vec::with_capacity(capacity);
572 
573         // `2^expn` repetition is done by doubling `buf` `expn`-times.
574         buf.extend(self);
575         {
576             let mut m = n >> 1;
577             // If `m > 0`, there are remaining bits up to the leftmost '1'.
578             while m > 0 {
579                 // `buf.extend(buf)`:
580                 unsafe {
581                     ptr::copy_nonoverlapping(
582                         buf.as_ptr(),
583                         (buf.as_mut_ptr() as *mut T).add(buf.len()),
584                         buf.len(),
585                     );
586                     // `buf` has capacity of `self.len() * n`.
587                     let buf_len = buf.len();
588                     buf.set_len(buf_len * 2);
589                 }
590 
591                 m >>= 1;
592             }
593         }
594 
595         // `rem` (`= n - 2^expn`) repetition is done by copying
596         // first `rem` repetitions from `buf` itself.
597         let rem_len = capacity - buf.len(); // `self.len() * rem`
598         if rem_len > 0 {
599             // `buf.extend(buf[0 .. rem_len])`:
600             unsafe {
601                 // This is non-overlapping since `2^expn > rem`.
602                 ptr::copy_nonoverlapping(
603                     buf.as_ptr(),
604                     (buf.as_mut_ptr() as *mut T).add(buf.len()),
605                     rem_len,
606                 );
607                 // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
608                 buf.set_len(capacity);
609             }
610         }
611         buf
612     }
613 
614     /// Flattens a slice of `T` into a single value `Self::Output`.
615     ///
616     /// # Examples
617     ///
618     /// ```
619     /// assert_eq!(["hello", "world"].concat(), "helloworld");
620     /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
621     /// ```
622     #[rustc_allow_incoherent_impl]
623     #[stable(feature = "rust1", since = "1.0.0")]
concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output where Self: Concat<Item>,624     pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
625     where
626         Self: Concat<Item>,
627     {
628         Concat::concat(self)
629     }
630 
631     /// Flattens a slice of `T` into a single value `Self::Output`, placing a
632     /// given separator between each.
633     ///
634     /// # Examples
635     ///
636     /// ```
637     /// assert_eq!(["hello", "world"].join(" "), "hello world");
638     /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
639     /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
640     /// ```
641     #[rustc_allow_incoherent_impl]
642     #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output where Self: Join<Separator>,643     pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
644     where
645         Self: Join<Separator>,
646     {
647         Join::join(self, sep)
648     }
649 
650     /// Flattens a slice of `T` into a single value `Self::Output`, placing a
651     /// given separator between each.
652     ///
653     /// # Examples
654     ///
655     /// ```
656     /// # #![allow(deprecated)]
657     /// assert_eq!(["hello", "world"].connect(" "), "hello world");
658     /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
659     /// ```
660     #[rustc_allow_incoherent_impl]
661     #[stable(feature = "rust1", since = "1.0.0")]
662     #[deprecated(since = "1.3.0", note = "renamed to join")]
connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output where Self: Join<Separator>,663     pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
664     where
665         Self: Join<Separator>,
666     {
667         Join::join(self, sep)
668     }
669 }
670 
671 #[cfg(not(test))]
672 impl [u8] {
673     /// Returns a vector containing a copy of this slice where each byte
674     /// is mapped to its ASCII upper case equivalent.
675     ///
676     /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
677     /// but non-ASCII letters are unchanged.
678     ///
679     /// To uppercase the value in-place, use [`make_ascii_uppercase`].
680     ///
681     /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
682     #[cfg(not(no_global_oom_handling))]
683     #[rustc_allow_incoherent_impl]
684     #[must_use = "this returns the uppercase bytes as a new Vec, \
685                   without modifying the original"]
686     #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
687     #[inline]
to_ascii_uppercase(&self) -> Vec<u8>688     pub fn to_ascii_uppercase(&self) -> Vec<u8> {
689         let mut me = self.to_vec();
690         me.make_ascii_uppercase();
691         me
692     }
693 
694     /// Returns a vector containing a copy of this slice where each byte
695     /// is mapped to its ASCII lower case equivalent.
696     ///
697     /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
698     /// but non-ASCII letters are unchanged.
699     ///
700     /// To lowercase the value in-place, use [`make_ascii_lowercase`].
701     ///
702     /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
703     #[cfg(not(no_global_oom_handling))]
704     #[rustc_allow_incoherent_impl]
705     #[must_use = "this returns the lowercase bytes as a new Vec, \
706                   without modifying the original"]
707     #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
708     #[inline]
to_ascii_lowercase(&self) -> Vec<u8>709     pub fn to_ascii_lowercase(&self) -> Vec<u8> {
710         let mut me = self.to_vec();
711         me.make_ascii_lowercase();
712         me
713     }
714 }
715 
716 ////////////////////////////////////////////////////////////////////////////////
717 // Extension traits for slices over specific kinds of data
718 ////////////////////////////////////////////////////////////////////////////////
719 
720 /// Helper trait for [`[T]::concat`](slice::concat).
721 ///
722 /// Note: the `Item` type parameter is not used in this trait,
723 /// but it allows impls to be more generic.
724 /// Without it, we get this error:
725 ///
726 /// ```error
727 /// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
728 ///    --> src/liballoc/slice.rs:608:6
729 ///     |
730 /// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
731 ///     |      ^ unconstrained type parameter
732 /// ```
733 ///
734 /// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
735 /// such that multiple `T` types would apply:
736 ///
737 /// ```
738 /// # #[allow(dead_code)]
739 /// pub struct Foo(Vec<u32>, Vec<String>);
740 ///
741 /// impl std::borrow::Borrow<[u32]> for Foo {
742 ///     fn borrow(&self) -> &[u32] { &self.0 }
743 /// }
744 ///
745 /// impl std::borrow::Borrow<[String]> for Foo {
746 ///     fn borrow(&self) -> &[String] { &self.1 }
747 /// }
748 /// ```
749 #[unstable(feature = "slice_concat_trait", issue = "27747")]
750 pub trait Concat<Item: ?Sized> {
751     #[unstable(feature = "slice_concat_trait", issue = "27747")]
752     /// The resulting type after concatenation
753     type Output;
754 
755     /// Implementation of [`[T]::concat`](slice::concat)
756     #[unstable(feature = "slice_concat_trait", issue = "27747")]
concat(slice: &Self) -> Self::Output757     fn concat(slice: &Self) -> Self::Output;
758 }
759 
760 /// Helper trait for [`[T]::join`](slice::join)
761 #[unstable(feature = "slice_concat_trait", issue = "27747")]
762 pub trait Join<Separator> {
763     #[unstable(feature = "slice_concat_trait", issue = "27747")]
764     /// The resulting type after concatenation
765     type Output;
766 
767     /// Implementation of [`[T]::join`](slice::join)
768     #[unstable(feature = "slice_concat_trait", issue = "27747")]
join(slice: &Self, sep: Separator) -> Self::Output769     fn join(slice: &Self, sep: Separator) -> Self::Output;
770 }
771 
772 #[cfg(not(no_global_oom_handling))]
773 #[unstable(feature = "slice_concat_ext", issue = "27747")]
774 impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
775     type Output = Vec<T>;
776 
concat(slice: &Self) -> Vec<T>777     fn concat(slice: &Self) -> Vec<T> {
778         let size = slice.iter().map(|slice| slice.borrow().len()).sum();
779         let mut result = Vec::with_capacity(size);
780         for v in slice {
781             result.extend_from_slice(v.borrow())
782         }
783         result
784     }
785 }
786 
787 #[cfg(not(no_global_oom_handling))]
788 #[unstable(feature = "slice_concat_ext", issue = "27747")]
789 impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
790     type Output = Vec<T>;
791 
join(slice: &Self, sep: &T) -> Vec<T>792     fn join(slice: &Self, sep: &T) -> Vec<T> {
793         let mut iter = slice.iter();
794         let first = match iter.next() {
795             Some(first) => first,
796             None => return vec![],
797         };
798         let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
799         let mut result = Vec::with_capacity(size);
800         result.extend_from_slice(first.borrow());
801 
802         for v in iter {
803             result.push(sep.clone());
804             result.extend_from_slice(v.borrow())
805         }
806         result
807     }
808 }
809 
810 #[cfg(not(no_global_oom_handling))]
811 #[unstable(feature = "slice_concat_ext", issue = "27747")]
812 impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
813     type Output = Vec<T>;
814 
join(slice: &Self, sep: &[T]) -> Vec<T>815     fn join(slice: &Self, sep: &[T]) -> Vec<T> {
816         let mut iter = slice.iter();
817         let first = match iter.next() {
818             Some(first) => first,
819             None => return vec![],
820         };
821         let size =
822             slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
823         let mut result = Vec::with_capacity(size);
824         result.extend_from_slice(first.borrow());
825 
826         for v in iter {
827             result.extend_from_slice(sep);
828             result.extend_from_slice(v.borrow())
829         }
830         result
831     }
832 }
833 
834 ////////////////////////////////////////////////////////////////////////////////
835 // Standard trait implementations for slices
836 ////////////////////////////////////////////////////////////////////////////////
837 
838 #[stable(feature = "rust1", since = "1.0.0")]
839 impl<T> Borrow<[T]> for Vec<T> {
borrow(&self) -> &[T]840     fn borrow(&self) -> &[T] {
841         &self[..]
842     }
843 }
844 
845 #[stable(feature = "rust1", since = "1.0.0")]
846 impl<T> BorrowMut<[T]> for Vec<T> {
borrow_mut(&mut self) -> &mut [T]847     fn borrow_mut(&mut self) -> &mut [T] {
848         &mut self[..]
849     }
850 }
851 
852 #[cfg(not(no_global_oom_handling))]
853 #[stable(feature = "rust1", since = "1.0.0")]
854 impl<T: Clone> ToOwned for [T] {
855     type Owned = Vec<T>;
856     #[cfg(not(test))]
to_owned(&self) -> Vec<T>857     fn to_owned(&self) -> Vec<T> {
858         self.to_vec()
859     }
860 
861     #[cfg(test)]
to_owned(&self) -> Vec<T>862     fn to_owned(&self) -> Vec<T> {
863         hack::to_vec(self, Global)
864     }
865 
clone_into(&self, target: &mut Vec<T>)866     fn clone_into(&self, target: &mut Vec<T>) {
867         // drop anything in target that will not be overwritten
868         target.truncate(self.len());
869 
870         // target.len <= self.len due to the truncate above, so the
871         // slices here are always in-bounds.
872         let (init, tail) = self.split_at(target.len());
873 
874         // reuse the contained values' allocations/resources.
875         target.clone_from_slice(init);
876         target.extend_from_slice(tail);
877     }
878 }
879 
880 ////////////////////////////////////////////////////////////////////////////////
881 // Sorting
882 ////////////////////////////////////////////////////////////////////////////////
883 
884 /// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
885 ///
886 /// This is the integral subroutine of insertion sort.
887 #[cfg(not(no_global_oom_handling))]
insert_head<T, F>(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool,888 fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
889 where
890     F: FnMut(&T, &T) -> bool,
891 {
892     if v.len() >= 2 && is_less(&v[1], &v[0]) {
893         unsafe {
894             // There are three ways to implement insertion here:
895             //
896             // 1. Swap adjacent elements until the first one gets to its final destination.
897             //    However, this way we copy data around more than is necessary. If elements are big
898             //    structures (costly to copy), this method will be slow.
899             //
900             // 2. Iterate until the right place for the first element is found. Then shift the
901             //    elements succeeding it to make room for it and finally place it into the
902             //    remaining hole. This is a good method.
903             //
904             // 3. Copy the first element into a temporary variable. Iterate until the right place
905             //    for it is found. As we go along, copy every traversed element into the slot
906             //    preceding it. Finally, copy data from the temporary variable into the remaining
907             //    hole. This method is very good. Benchmarks demonstrated slightly better
908             //    performance than with the 2nd method.
909             //
910             // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
911             let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
912 
913             // Intermediate state of the insertion process is always tracked by `hole`, which
914             // serves two purposes:
915             // 1. Protects integrity of `v` from panics in `is_less`.
916             // 2. Fills the remaining hole in `v` in the end.
917             //
918             // Panic safety:
919             //
920             // If `is_less` panics at any point during the process, `hole` will get dropped and
921             // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
922             // initially held exactly once.
923             let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
924             ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
925 
926             for i in 2..v.len() {
927                 if !is_less(&v[i], &*tmp) {
928                     break;
929                 }
930                 ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
931                 hole.dest = &mut v[i];
932             }
933             // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
934         }
935     }
936 
937     // When dropped, copies from `src` into `dest`.
938     struct InsertionHole<T> {
939         src: *const T,
940         dest: *mut T,
941     }
942 
943     impl<T> Drop for InsertionHole<T> {
944         fn drop(&mut self) {
945             unsafe {
946                 ptr::copy_nonoverlapping(self.src, self.dest, 1);
947             }
948         }
949     }
950 }
951 
952 /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
953 /// stores the result into `v[..]`.
954 ///
955 /// # Safety
956 ///
957 /// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
958 /// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
959 #[cfg(not(no_global_oom_handling))]
merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) where F: FnMut(&T, &T) -> bool,960 unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
961 where
962     F: FnMut(&T, &T) -> bool,
963 {
964     let len = v.len();
965     let v = v.as_mut_ptr();
966     let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
967 
968     // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
969     // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
970     // copying the lesser (or greater) one into `v`.
971     //
972     // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
973     // consumed first, then we must copy whatever is left of the shorter run into the remaining
974     // hole in `v`.
975     //
976     // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
977     // 1. Protects integrity of `v` from panics in `is_less`.
978     // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
979     //
980     // Panic safety:
981     //
982     // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
983     // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
984     // object it initially held exactly once.
985     let mut hole;
986 
987     if mid <= len - mid {
988         // The left run is shorter.
989         unsafe {
990             ptr::copy_nonoverlapping(v, buf, mid);
991             hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
992         }
993 
994         // Initially, these pointers point to the beginnings of their arrays.
995         let left = &mut hole.start;
996         let mut right = v_mid;
997         let out = &mut hole.dest;
998 
999         while *left < hole.end && right < v_end {
1000             // Consume the lesser side.
1001             // If equal, prefer the left run to maintain stability.
1002             unsafe {
1003                 let to_copy = if is_less(&*right, &**left) {
1004                     get_and_increment(&mut right)
1005                 } else {
1006                     get_and_increment(left)
1007                 };
1008                 ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
1009             }
1010         }
1011     } else {
1012         // The right run is shorter.
1013         unsafe {
1014             ptr::copy_nonoverlapping(v_mid, buf, len - mid);
1015             hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
1016         }
1017 
1018         // Initially, these pointers point past the ends of their arrays.
1019         let left = &mut hole.dest;
1020         let right = &mut hole.end;
1021         let mut out = v_end;
1022 
1023         while v < *left && buf < *right {
1024             // Consume the greater side.
1025             // If equal, prefer the right run to maintain stability.
1026             unsafe {
1027                 let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
1028                     decrement_and_get(left)
1029                 } else {
1030                     decrement_and_get(right)
1031                 };
1032                 ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
1033             }
1034         }
1035     }
1036     // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
1037     // it will now be copied into the hole in `v`.
1038 
1039     unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
1040         let old = *ptr;
1041         *ptr = unsafe { ptr.offset(1) };
1042         old
1043     }
1044 
1045     unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
1046         *ptr = unsafe { ptr.offset(-1) };
1047         *ptr
1048     }
1049 
1050     // When dropped, copies the range `start..end` into `dest..`.
1051     struct MergeHole<T> {
1052         start: *mut T,
1053         end: *mut T,
1054         dest: *mut T,
1055     }
1056 
1057     impl<T> Drop for MergeHole<T> {
1058         fn drop(&mut self) {
1059             // `T` is not a zero-sized type, and these are pointers into a slice's elements.
1060             unsafe {
1061                 let len = self.end.sub_ptr(self.start);
1062                 ptr::copy_nonoverlapping(self.start, self.dest, len);
1063             }
1064         }
1065     }
1066 }
1067 
1068 /// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
1069 /// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
1070 ///
1071 /// The algorithm identifies strictly descending and non-descending subsequences, which are called
1072 /// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
1073 /// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
1074 /// satisfied:
1075 ///
1076 /// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
1077 /// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
1078 ///
1079 /// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
1080 #[cfg(not(no_global_oom_handling))]
merge_sort<T, F>(v: &mut [T], mut is_less: F) where F: FnMut(&T, &T) -> bool,1081 fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
1082 where
1083     F: FnMut(&T, &T) -> bool,
1084 {
1085     // Slices of up to this length get sorted using insertion sort.
1086     const MAX_INSERTION: usize = 20;
1087     // Very short runs are extended using insertion sort to span at least this many elements.
1088     const MIN_RUN: usize = 10;
1089 
1090     // Sorting has no meaningful behavior on zero-sized types.
1091     if size_of::<T>() == 0 {
1092         return;
1093     }
1094 
1095     let len = v.len();
1096 
1097     // Short arrays get sorted in-place via insertion sort to avoid allocations.
1098     if len <= MAX_INSERTION {
1099         if len >= 2 {
1100             for i in (0..len - 1).rev() {
1101                 insert_head(&mut v[i..], &mut is_less);
1102             }
1103         }
1104         return;
1105     }
1106 
1107     // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
1108     // shallow copies of the contents of `v` without risking the dtors running on copies if
1109     // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
1110     // which will always have length at most `len / 2`.
1111     let mut buf = Vec::with_capacity(len / 2);
1112 
1113     // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
1114     // strange decision, but consider the fact that merges more often go in the opposite direction
1115     // (forwards). According to benchmarks, merging forwards is slightly faster than merging
1116     // backwards. To conclude, identifying runs by traversing backwards improves performance.
1117     let mut runs = vec![];
1118     let mut end = len;
1119     while end > 0 {
1120         // Find the next natural run, and reverse it if it's strictly descending.
1121         let mut start = end - 1;
1122         if start > 0 {
1123             start -= 1;
1124             unsafe {
1125                 if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
1126                     while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
1127                         start -= 1;
1128                     }
1129                     v[start..end].reverse();
1130                 } else {
1131                     while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
1132                     {
1133                         start -= 1;
1134                     }
1135                 }
1136             }
1137         }
1138 
1139         // Insert some more elements into the run if it's too short. Insertion sort is faster than
1140         // merge sort on short sequences, so this significantly improves performance.
1141         while start > 0 && end - start < MIN_RUN {
1142             start -= 1;
1143             insert_head(&mut v[start..end], &mut is_less);
1144         }
1145 
1146         // Push this run onto the stack.
1147         runs.push(Run { start, len: end - start });
1148         end = start;
1149 
1150         // Merge some pairs of adjacent runs to satisfy the invariants.
1151         while let Some(r) = collapse(&runs) {
1152             let left = runs[r + 1];
1153             let right = runs[r];
1154             unsafe {
1155                 merge(
1156                     &mut v[left.start..right.start + right.len],
1157                     left.len,
1158                     buf.as_mut_ptr(),
1159                     &mut is_less,
1160                 );
1161             }
1162             runs[r] = Run { start: left.start, len: left.len + right.len };
1163             runs.remove(r + 1);
1164         }
1165     }
1166 
1167     // Finally, exactly one run must remain in the stack.
1168     debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
1169 
1170     // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
1171     // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
1172     // algorithm should continue building a new run instead, `None` is returned.
1173     //
1174     // TimSort is infamous for its buggy implementations, as described here:
1175     // http://envisage-project.eu/timsort-specification-and-verification/
1176     //
1177     // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
1178     // Enforcing them on just top three is not sufficient to ensure that the invariants will still
1179     // hold for *all* runs in the stack.
1180     //
1181     // This function correctly checks invariants for the top four runs. Additionally, if the top
1182     // run starts at index 0, it will always demand a merge operation until the stack is fully
1183     // collapsed, in order to complete the sort.
1184     #[inline]
1185     fn collapse(runs: &[Run]) -> Option<usize> {
1186         let n = runs.len();
1187         if n >= 2
1188             && (runs[n - 1].start == 0
1189                 || runs[n - 2].len <= runs[n - 1].len
1190                 || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
1191                 || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
1192         {
1193             if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
1194         } else {
1195             None
1196         }
1197     }
1198 
1199     #[derive(Clone, Copy)]
1200     struct Run {
1201         start: usize,
1202         len: usize,
1203     }
1204 }
1205