• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Traits for writing parallel programs using an iterator-style interface
2 //!
3 //! You will rarely need to interact with this module directly unless you have
4 //! need to name one of the iterator types.
5 //!
6 //! Parallel iterators make it easy to write iterator-like chains that
7 //! execute in parallel: typically all you have to do is convert the
8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into
9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For
10 //! example, to compute the sum of the squares of a sequence of
11 //! integers, one might write:
12 //!
13 //! ```rust
14 //! use rayon::prelude::*;
15 //! fn sum_of_squares(input: &[i32]) -> i32 {
16 //!     input.par_iter()
17 //!          .map(|i| i * i)
18 //!          .sum()
19 //! }
20 //! ```
21 //!
22 //! Or, to increment all the integers in a slice, you could write:
23 //!
24 //! ```rust
25 //! use rayon::prelude::*;
26 //! fn increment_all(input: &mut [i32]) {
27 //!     input.par_iter_mut()
28 //!          .for_each(|p| *p += 1);
29 //! }
30 //! ```
31 //!
32 //! To use parallel iterators, first import the traits by adding
33 //! something like `use rayon::prelude::*` to your module. You can
34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a
35 //! parallel iterator. Like a [regular iterator][], parallel
36 //! iterators work by first constructing a computation and then
37 //! executing it.
38 //!
39 //! In addition to `par_iter()` and friends, some types offer other
40 //! ways to create (or consume) parallel iterators:
41 //!
42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and
43 //!   `par_windows`, as well as various parallel sorting
44 //!   operations. See [the `ParallelSlice` trait] for the full list.
45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`.
46 //!   See [the `ParallelString` trait] for the full list.
47 //! - Various collections offer [`par_extend`], which grows a
48 //!   collection given a parallel iterator. (If you don't have a
49 //!   collection to extend, you can use [`collect()`] to create a new
50 //!   one from scratch.)
51 //!
52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html
53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html
54 //! [`par_extend`]: trait.ParallelExtend.html
55 //! [`collect()`]: trait.ParallelIterator.html#method.collect
56 //!
57 //! To see the full range of methods available on parallel iterators,
58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`]
59 //! traits.
60 //!
61 //! If you'd like to build a custom parallel iterator, or to write your own
62 //! combinator, then check out the [split] function and the [plumbing] module.
63 //!
64 //! [regular iterator]: https://doc.rust-lang.org/std/iter/trait.Iterator.html
65 //! [`ParallelIterator`]: trait.ParallelIterator.html
66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
67 //! [split]: fn.split.html
68 //! [plumbing]: plumbing/index.html
69 //!
70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which
71 //! has been deliberately obscured from the public API.  This trait is intended
72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and
73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but
74 //! `None`/`Err` values will exit early.
75 //!
76 //! A note about object safety: It is currently _not_ possible to wrap
77 //! a `ParallelIterator` (or any trait that depends on it) using a
78 //! `Box<dyn ParallelIterator>` or other kind of dynamic allocation,
79 //! because `ParallelIterator` is **not object-safe**.
80 //! (This keeps the implementation simpler and allows extra optimizations.)
81 
82 use self::plumbing::*;
83 use self::private::Try;
84 pub use either::Either;
85 use std::cmp::Ordering;
86 use std::collections::LinkedList;
87 use std::iter::{Product, Sum};
88 use std::ops::{Fn, RangeBounds};
89 
90 pub mod plumbing;
91 
92 #[cfg(test)]
93 mod test;
94 
95 // There is a method to the madness here:
96 //
97 // - These modules are private but expose certain types to the end-user
98 //   (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the
99 //   public API surface of the `ParallelIterator` traits.
100 // - In **this** module, those public types are always used unprefixed, which forces
101 //   us to add a `pub use` and helps identify if we missed anything.
102 // - In contrast, items that appear **only** in the body of a method,
103 //   e.g. `find::find()`, are always used **prefixed**, so that they
104 //   can be readily distinguished.
105 
106 mod blocks;
107 mod chain;
108 mod chunks;
109 mod cloned;
110 mod collect;
111 mod copied;
112 mod empty;
113 mod enumerate;
114 mod extend;
115 mod filter;
116 mod filter_map;
117 mod find;
118 mod find_first_last;
119 mod flat_map;
120 mod flat_map_iter;
121 mod flatten;
122 mod flatten_iter;
123 mod fold;
124 mod fold_chunks;
125 mod fold_chunks_with;
126 mod for_each;
127 mod from_par_iter;
128 mod inspect;
129 mod interleave;
130 mod interleave_shortest;
131 mod intersperse;
132 mod len;
133 mod map;
134 mod map_with;
135 mod multizip;
136 mod noop;
137 mod once;
138 mod panic_fuse;
139 mod par_bridge;
140 mod positions;
141 mod product;
142 mod reduce;
143 mod repeat;
144 mod rev;
145 mod skip;
146 mod skip_any;
147 mod skip_any_while;
148 mod splitter;
149 mod step_by;
150 mod sum;
151 mod take;
152 mod take_any;
153 mod take_any_while;
154 mod try_fold;
155 mod try_reduce;
156 mod try_reduce_with;
157 mod unzip;
158 mod update;
159 mod walk_tree;
160 mod while_some;
161 mod zip;
162 mod zip_eq;
163 
164 pub use self::{
165     blocks::{ExponentialBlocks, UniformBlocks},
166     chain::Chain,
167     chunks::Chunks,
168     cloned::Cloned,
169     copied::Copied,
170     empty::{empty, Empty},
171     enumerate::Enumerate,
172     filter::Filter,
173     filter_map::FilterMap,
174     flat_map::FlatMap,
175     flat_map_iter::FlatMapIter,
176     flatten::Flatten,
177     flatten_iter::FlattenIter,
178     fold::{Fold, FoldWith},
179     fold_chunks::FoldChunks,
180     fold_chunks_with::FoldChunksWith,
181     inspect::Inspect,
182     interleave::Interleave,
183     interleave_shortest::InterleaveShortest,
184     intersperse::Intersperse,
185     len::{MaxLen, MinLen},
186     map::Map,
187     map_with::{MapInit, MapWith},
188     multizip::MultiZip,
189     once::{once, Once},
190     panic_fuse::PanicFuse,
191     par_bridge::{IterBridge, ParallelBridge},
192     positions::Positions,
193     repeat::{repeat, repeatn, Repeat, RepeatN},
194     rev::Rev,
195     skip::Skip,
196     skip_any::SkipAny,
197     skip_any_while::SkipAnyWhile,
198     splitter::{split, Split},
199     step_by::StepBy,
200     take::Take,
201     take_any::TakeAny,
202     take_any_while::TakeAnyWhile,
203     try_fold::{TryFold, TryFoldWith},
204     update::Update,
205     walk_tree::{
206         walk_tree, walk_tree_postfix, walk_tree_prefix, WalkTree, WalkTreePostfix, WalkTreePrefix,
207     },
208     while_some::WhileSome,
209     zip::Zip,
210     zip_eq::ZipEq,
211 };
212 
213 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`].
214 ///
215 /// By implementing `IntoParallelIterator` for a type, you define how it will
216 /// transformed into an iterator. This is a parallel version of the standard
217 /// library's [`std::iter::IntoIterator`] trait.
218 ///
219 /// [`ParallelIterator`]: trait.ParallelIterator.html
220 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html
221 pub trait IntoParallelIterator {
222     /// The parallel iterator type that will be created.
223     type Iter: ParallelIterator<Item = Self::Item>;
224 
225     /// The type of item that the parallel iterator will produce.
226     type Item: Send;
227 
228     /// Converts `self` into a parallel iterator.
229     ///
230     /// # Examples
231     ///
232     /// ```
233     /// use rayon::prelude::*;
234     ///
235     /// println!("counting in parallel:");
236     /// (0..100).into_par_iter()
237     ///     .for_each(|i| println!("{}", i));
238     /// ```
239     ///
240     /// This conversion is often implicit for arguments to methods like [`zip`].
241     ///
242     /// ```
243     /// use rayon::prelude::*;
244     ///
245     /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect();
246     /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]);
247     /// ```
248     ///
249     /// [`zip`]: trait.IndexedParallelIterator.html#method.zip
into_par_iter(self) -> Self::Iter250     fn into_par_iter(self) -> Self::Iter;
251 }
252 
253 /// `IntoParallelRefIterator` implements the conversion to a
254 /// [`ParallelIterator`], providing shared references to the data.
255 ///
256 /// This is a parallel version of the `iter()` method
257 /// defined by various collections.
258 ///
259 /// This trait is automatically implemented
260 /// `for I where &I: IntoParallelIterator`. In most cases, users
261 /// will want to implement [`IntoParallelIterator`] rather than implement
262 /// this trait directly.
263 ///
264 /// [`ParallelIterator`]: trait.ParallelIterator.html
265 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
266 pub trait IntoParallelRefIterator<'data> {
267     /// The type of the parallel iterator that will be returned.
268     type Iter: ParallelIterator<Item = Self::Item>;
269 
270     /// The type of item that the parallel iterator will produce.
271     /// This will typically be an `&'data T` reference type.
272     type Item: Send + 'data;
273 
274     /// Converts `self` into a parallel iterator.
275     ///
276     /// # Examples
277     ///
278     /// ```
279     /// use rayon::prelude::*;
280     ///
281     /// let v: Vec<_> = (0..100).collect();
282     /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2);
283     ///
284     /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`,
285     /// // producing the exact same references.
286     /// assert!(v.par_iter().zip(&v)
287     ///          .all(|(a, b)| std::ptr::eq(a, b)));
288     /// ```
par_iter(&'data self) -> Self::Iter289     fn par_iter(&'data self) -> Self::Iter;
290 }
291 
292 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I
293 where
294     &'data I: IntoParallelIterator,
295 {
296     type Iter = <&'data I as IntoParallelIterator>::Iter;
297     type Item = <&'data I as IntoParallelIterator>::Item;
298 
par_iter(&'data self) -> Self::Iter299     fn par_iter(&'data self) -> Self::Iter {
300         self.into_par_iter()
301     }
302 }
303 
304 /// `IntoParallelRefMutIterator` implements the conversion to a
305 /// [`ParallelIterator`], providing mutable references to the data.
306 ///
307 /// This is a parallel version of the `iter_mut()` method
308 /// defined by various collections.
309 ///
310 /// This trait is automatically implemented
311 /// `for I where &mut I: IntoParallelIterator`. In most cases, users
312 /// will want to implement [`IntoParallelIterator`] rather than implement
313 /// this trait directly.
314 ///
315 /// [`ParallelIterator`]: trait.ParallelIterator.html
316 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html
317 pub trait IntoParallelRefMutIterator<'data> {
318     /// The type of iterator that will be created.
319     type Iter: ParallelIterator<Item = Self::Item>;
320 
321     /// The type of item that will be produced; this is typically an
322     /// `&'data mut T` reference.
323     type Item: Send + 'data;
324 
325     /// Creates the parallel iterator from `self`.
326     ///
327     /// # Examples
328     ///
329     /// ```
330     /// use rayon::prelude::*;
331     ///
332     /// let mut v = vec![0usize; 5];
333     /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i);
334     /// assert_eq!(v, [0, 1, 2, 3, 4]);
335     /// ```
par_iter_mut(&'data mut self) -> Self::Iter336     fn par_iter_mut(&'data mut self) -> Self::Iter;
337 }
338 
339 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I
340 where
341     &'data mut I: IntoParallelIterator,
342 {
343     type Iter = <&'data mut I as IntoParallelIterator>::Iter;
344     type Item = <&'data mut I as IntoParallelIterator>::Item;
345 
par_iter_mut(&'data mut self) -> Self::Iter346     fn par_iter_mut(&'data mut self) -> Self::Iter {
347         self.into_par_iter()
348     }
349 }
350 
351 /// Parallel version of the standard iterator trait.
352 ///
353 /// The combinators on this trait are available on **all** parallel
354 /// iterators.  Additional methods can be found on the
355 /// [`IndexedParallelIterator`] trait: those methods are only
356 /// available for parallel iterators where the number of items is
357 /// known in advance (so, e.g., after invoking `filter`, those methods
358 /// become unavailable).
359 ///
360 /// For examples of using parallel iterators, see [the docs on the
361 /// `iter` module][iter].
362 ///
363 /// [iter]: index.html
364 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
365 pub trait ParallelIterator: Sized + Send {
366     /// The type of item that this parallel iterator produces.
367     /// For example, if you use the [`for_each`] method, this is the type of
368     /// item that your closure will be invoked with.
369     ///
370     /// [`for_each`]: #method.for_each
371     type Item: Send;
372 
373     /// Executes `OP` on each item produced by the iterator, in parallel.
374     ///
375     /// # Examples
376     ///
377     /// ```
378     /// use rayon::prelude::*;
379     ///
380     /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x));
381     /// ```
for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync + Send,382     fn for_each<OP>(self, op: OP)
383     where
384         OP: Fn(Self::Item) + Sync + Send,
385     {
386         for_each::for_each(self, &op)
387     }
388 
389     /// Executes `OP` on the given `init` value with each item produced by
390     /// the iterator, in parallel.
391     ///
392     /// The `init` value will be cloned only as needed to be paired with
393     /// the group of items in each rayon job.  It does not require the type
394     /// to be `Sync`.
395     ///
396     /// # Examples
397     ///
398     /// ```
399     /// use std::sync::mpsc::channel;
400     /// use rayon::prelude::*;
401     ///
402     /// let (sender, receiver) = channel();
403     ///
404     /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap());
405     ///
406     /// let mut res: Vec<_> = receiver.iter().collect();
407     ///
408     /// res.sort();
409     ///
410     /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
411     /// ```
for_each_with<OP, T>(self, init: T, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, T: Send + Clone,412     fn for_each_with<OP, T>(self, init: T, op: OP)
413     where
414         OP: Fn(&mut T, Self::Item) + Sync + Send,
415         T: Send + Clone,
416     {
417         self.map_with(init, op).collect()
418     }
419 
420     /// Executes `OP` on a value returned by `init` with each item produced by
421     /// the iterator, in parallel.
422     ///
423     /// The `init` function will be called only as needed for a value to be
424     /// paired with the group of items in each rayon job.  There is no
425     /// constraint on that returned type at all!
426     ///
427     /// # Examples
428     ///
429     /// ```
430     /// use rand::Rng;
431     /// use rayon::prelude::*;
432     ///
433     /// let mut v = vec![0u8; 1_000_000];
434     ///
435     /// v.par_chunks_mut(1000)
436     ///     .for_each_init(
437     ///         || rand::thread_rng(),
438     ///         |rng, chunk| rng.fill(chunk),
439     ///     );
440     ///
441     /// // There's a remote chance that this will fail...
442     /// for i in 0u8..=255 {
443     ///     assert!(v.contains(&i));
444     /// }
445     /// ```
for_each_init<OP, INIT, T>(self, init: INIT, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, INIT: Fn() -> T + Sync + Send,446     fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP)
447     where
448         OP: Fn(&mut T, Self::Item) + Sync + Send,
449         INIT: Fn() -> T + Sync + Send,
450     {
451         self.map_init(init, op).collect()
452     }
453 
454     /// Executes a fallible `OP` on each item produced by the iterator, in parallel.
455     ///
456     /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to
457     /// stop processing the rest of the items in the iterator as soon as
458     /// possible, and we will return that terminating value.  Otherwise, we will
459     /// return an empty `Result::Ok(())` or `Option::Some(())`.  If there are
460     /// multiple errors in parallel, it is not specified which will be returned.
461     ///
462     /// # Examples
463     ///
464     /// ```
465     /// use rayon::prelude::*;
466     /// use std::io::{self, Write};
467     ///
468     /// // This will stop iteration early if there's any write error, like
469     /// // having piped output get closed on the other end.
470     /// (0..100).into_par_iter()
471     ///     .try_for_each(|x| writeln!(io::stdout(), "{:?}", x))
472     ///     .expect("expected no write errors");
473     /// ```
try_for_each<OP, R>(self, op: OP) -> R where OP: Fn(Self::Item) -> R + Sync + Send, R: Try<Output = ()> + Send,474     fn try_for_each<OP, R>(self, op: OP) -> R
475     where
476         OP: Fn(Self::Item) -> R + Sync + Send,
477         R: Try<Output = ()> + Send,
478     {
479         fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
480             R::from_output(())
481         }
482 
483         self.map(op).try_reduce(<()>::default, ok)
484     }
485 
486     /// Executes a fallible `OP` on the given `init` value with each item
487     /// produced by the iterator, in parallel.
488     ///
489     /// This combines the `init` semantics of [`for_each_with()`] and the
490     /// failure semantics of [`try_for_each()`].
491     ///
492     /// [`for_each_with()`]: #method.for_each_with
493     /// [`try_for_each()`]: #method.try_for_each
494     ///
495     /// # Examples
496     ///
497     /// ```
498     /// use std::sync::mpsc::channel;
499     /// use rayon::prelude::*;
500     ///
501     /// let (sender, receiver) = channel();
502     ///
503     /// (0..5).into_par_iter()
504     ///     .try_for_each_with(sender, |s, x| s.send(x))
505     ///     .expect("expected no send errors");
506     ///
507     /// let mut res: Vec<_> = receiver.iter().collect();
508     ///
509     /// res.sort();
510     ///
511     /// assert_eq!(&res[..], &[0, 1, 2, 3, 4])
512     /// ```
try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Try<Output = ()> + Send,513     fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R
514     where
515         OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
516         T: Send + Clone,
517         R: Try<Output = ()> + Send,
518     {
519         fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
520             R::from_output(())
521         }
522 
523         self.map_with(init, op).try_reduce(<()>::default, ok)
524     }
525 
526     /// Executes a fallible `OP` on a value returned by `init` with each item
527     /// produced by the iterator, in parallel.
528     ///
529     /// This combines the `init` semantics of [`for_each_init()`] and the
530     /// failure semantics of [`try_for_each()`].
531     ///
532     /// [`for_each_init()`]: #method.for_each_init
533     /// [`try_for_each()`]: #method.try_for_each
534     ///
535     /// # Examples
536     ///
537     /// ```
538     /// use rand::Rng;
539     /// use rayon::prelude::*;
540     ///
541     /// let mut v = vec![0u8; 1_000_000];
542     ///
543     /// v.par_chunks_mut(1000)
544     ///     .try_for_each_init(
545     ///         || rand::thread_rng(),
546     ///         |rng, chunk| rng.try_fill(chunk),
547     ///     )
548     ///     .expect("expected no rand errors");
549     ///
550     /// // There's a remote chance that this will fail...
551     /// for i in 0u8..=255 {
552     ///     assert!(v.contains(&i));
553     /// }
554     /// ```
try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Try<Output = ()> + Send,555     fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R
556     where
557         OP: Fn(&mut T, Self::Item) -> R + Sync + Send,
558         INIT: Fn() -> T + Sync + Send,
559         R: Try<Output = ()> + Send,
560     {
561         fn ok<R: Try<Output = ()>>(_: (), _: ()) -> R {
562             R::from_output(())
563         }
564 
565         self.map_init(init, op).try_reduce(<()>::default, ok)
566     }
567 
568     /// Counts the number of items in this parallel iterator.
569     ///
570     /// # Examples
571     ///
572     /// ```
573     /// use rayon::prelude::*;
574     ///
575     /// let count = (0..100).into_par_iter().count();
576     ///
577     /// assert_eq!(count, 100);
578     /// ```
count(self) -> usize579     fn count(self) -> usize {
580         fn one<T>(_: T) -> usize {
581             1
582         }
583 
584         self.map(one).sum()
585     }
586 
587     /// Applies `map_op` to each item of this iterator, producing a new
588     /// iterator with the results.
589     ///
590     /// # Examples
591     ///
592     /// ```
593     /// use rayon::prelude::*;
594     ///
595     /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2);
596     ///
597     /// let doubles: Vec<_> = par_iter.collect();
598     ///
599     /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
600     /// ```
map<F, R>(self, map_op: F) -> Map<Self, F> where F: Fn(Self::Item) -> R + Sync + Send, R: Send,601     fn map<F, R>(self, map_op: F) -> Map<Self, F>
602     where
603         F: Fn(Self::Item) -> R + Sync + Send,
604         R: Send,
605     {
606         Map::new(self, map_op)
607     }
608 
609     /// Applies `map_op` to the given `init` value with each item of this
610     /// iterator, producing a new iterator with the results.
611     ///
612     /// The `init` value will be cloned only as needed to be paired with
613     /// the group of items in each rayon job.  It does not require the type
614     /// to be `Sync`.
615     ///
616     /// # Examples
617     ///
618     /// ```
619     /// use std::sync::mpsc::channel;
620     /// use rayon::prelude::*;
621     ///
622     /// let (sender, receiver) = channel();
623     ///
624     /// let a: Vec<_> = (0..5)
625     ///                 .into_par_iter()            // iterating over i32
626     ///                 .map_with(sender, |s, x| {
627     ///                     s.send(x).unwrap();     // sending i32 values through the channel
628     ///                     x                       // returning i32
629     ///                 })
630     ///                 .collect();                 // collecting the returned values into a vector
631     ///
632     /// let mut b: Vec<_> = receiver.iter()         // iterating over the values in the channel
633     ///                             .collect();     // and collecting them
634     /// b.sort();
635     ///
636     /// assert_eq!(a, b);
637     /// ```
map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Send,638     fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F>
639     where
640         F: Fn(&mut T, Self::Item) -> R + Sync + Send,
641         T: Send + Clone,
642         R: Send,
643     {
644         MapWith::new(self, init, map_op)
645     }
646 
647     /// Applies `map_op` to a value returned by `init` with each item of this
648     /// iterator, producing a new iterator with the results.
649     ///
650     /// The `init` function will be called only as needed for a value to be
651     /// paired with the group of items in each rayon job.  There is no
652     /// constraint on that returned type at all!
653     ///
654     /// # Examples
655     ///
656     /// ```
657     /// use rand::Rng;
658     /// use rayon::prelude::*;
659     ///
660     /// let a: Vec<_> = (1i32..1_000_000)
661     ///     .into_par_iter()
662     ///     .map_init(
663     ///         || rand::thread_rng(),  // get the thread-local RNG
664     ///         |rng, x| if rng.gen() { // randomly negate items
665     ///             -x
666     ///         } else {
667     ///             x
668     ///         },
669     ///     ).collect();
670     ///
671     /// // There's a remote chance that this will fail...
672     /// assert!(a.iter().any(|&x| x < 0));
673     /// assert!(a.iter().any(|&x| x > 0));
674     /// ```
map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Send,675     fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F>
676     where
677         F: Fn(&mut T, Self::Item) -> R + Sync + Send,
678         INIT: Fn() -> T + Sync + Send,
679         R: Send,
680     {
681         MapInit::new(self, init, map_op)
682     }
683 
684     /// Creates an iterator which clones all of its elements.  This may be
685     /// useful when you have an iterator over `&T`, but you need `T`, and
686     /// that type implements `Clone`. See also [`copied()`].
687     ///
688     /// [`copied()`]: #method.copied
689     ///
690     /// # Examples
691     ///
692     /// ```
693     /// use rayon::prelude::*;
694     ///
695     /// let a = [1, 2, 3];
696     ///
697     /// let v_cloned: Vec<_> = a.par_iter().cloned().collect();
698     ///
699     /// // cloned is the same as .map(|&x| x), for integers
700     /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
701     ///
702     /// assert_eq!(v_cloned, vec![1, 2, 3]);
703     /// assert_eq!(v_map, vec![1, 2, 3]);
704     /// ```
cloned<'a, T>(self) -> Cloned<Self> where T: 'a + Clone + Send, Self: ParallelIterator<Item = &'a T>,705     fn cloned<'a, T>(self) -> Cloned<Self>
706     where
707         T: 'a + Clone + Send,
708         Self: ParallelIterator<Item = &'a T>,
709     {
710         Cloned::new(self)
711     }
712 
713     /// Creates an iterator which copies all of its elements.  This may be
714     /// useful when you have an iterator over `&T`, but you need `T`, and
715     /// that type implements `Copy`. See also [`cloned()`].
716     ///
717     /// [`cloned()`]: #method.cloned
718     ///
719     /// # Examples
720     ///
721     /// ```
722     /// use rayon::prelude::*;
723     ///
724     /// let a = [1, 2, 3];
725     ///
726     /// let v_copied: Vec<_> = a.par_iter().copied().collect();
727     ///
728     /// // copied is the same as .map(|&x| x), for integers
729     /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect();
730     ///
731     /// assert_eq!(v_copied, vec![1, 2, 3]);
732     /// assert_eq!(v_map, vec![1, 2, 3]);
733     /// ```
copied<'a, T>(self) -> Copied<Self> where T: 'a + Copy + Send, Self: ParallelIterator<Item = &'a T>,734     fn copied<'a, T>(self) -> Copied<Self>
735     where
736         T: 'a + Copy + Send,
737         Self: ParallelIterator<Item = &'a T>,
738     {
739         Copied::new(self)
740     }
741 
742     /// Applies `inspect_op` to a reference to each item of this iterator,
743     /// producing a new iterator passing through the original items.  This is
744     /// often useful for debugging to see what's happening in iterator stages.
745     ///
746     /// # Examples
747     ///
748     /// ```
749     /// use rayon::prelude::*;
750     ///
751     /// let a = [1, 4, 2, 3];
752     ///
753     /// // this iterator sequence is complex.
754     /// let sum = a.par_iter()
755     ///             .cloned()
756     ///             .filter(|&x| x % 2 == 0)
757     ///             .reduce(|| 0, |sum, i| sum + i);
758     ///
759     /// println!("{}", sum);
760     ///
761     /// // let's add some inspect() calls to investigate what's happening
762     /// let sum = a.par_iter()
763     ///             .cloned()
764     ///             .inspect(|x| println!("about to filter: {}", x))
765     ///             .filter(|&x| x % 2 == 0)
766     ///             .inspect(|x| println!("made it through filter: {}", x))
767     ///             .reduce(|| 0, |sum, i| sum + i);
768     ///
769     /// println!("{}", sum);
770     /// ```
inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> where OP: Fn(&Self::Item) + Sync + Send,771     fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP>
772     where
773         OP: Fn(&Self::Item) + Sync + Send,
774     {
775         Inspect::new(self, inspect_op)
776     }
777 
778     /// Mutates each item of this iterator before yielding it.
779     ///
780     /// # Examples
781     ///
782     /// ```
783     /// use rayon::prelude::*;
784     ///
785     /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;});
786     ///
787     /// let doubles: Vec<_> = par_iter.collect();
788     ///
789     /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]);
790     /// ```
update<F>(self, update_op: F) -> Update<Self, F> where F: Fn(&mut Self::Item) + Sync + Send,791     fn update<F>(self, update_op: F) -> Update<Self, F>
792     where
793         F: Fn(&mut Self::Item) + Sync + Send,
794     {
795         Update::new(self, update_op)
796     }
797 
798     /// Applies `filter_op` to each item of this iterator, producing a new
799     /// iterator with only the items that gave `true` results.
800     ///
801     /// # Examples
802     ///
803     /// ```
804     /// use rayon::prelude::*;
805     ///
806     /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0);
807     ///
808     /// let even_numbers: Vec<_> = par_iter.collect();
809     ///
810     /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]);
811     /// ```
filter<P>(self, filter_op: P) -> Filter<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,812     fn filter<P>(self, filter_op: P) -> Filter<Self, P>
813     where
814         P: Fn(&Self::Item) -> bool + Sync + Send,
815     {
816         Filter::new(self, filter_op)
817     }
818 
819     /// Applies `filter_op` to each item of this iterator to get an `Option`,
820     /// producing a new iterator with only the items from `Some` results.
821     ///
822     /// # Examples
823     ///
824     /// ```
825     /// use rayon::prelude::*;
826     ///
827     /// let mut par_iter = (0..10).into_par_iter()
828     ///                         .filter_map(|x| {
829     ///                             if x % 2 == 0 { Some(x * 3) }
830     ///                             else { None }
831     ///                         });
832     ///
833     /// let even_numbers: Vec<_> = par_iter.collect();
834     ///
835     /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]);
836     /// ```
filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,837     fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P>
838     where
839         P: Fn(Self::Item) -> Option<R> + Sync + Send,
840         R: Send,
841     {
842         FilterMap::new(self, filter_op)
843     }
844 
845     /// Applies `map_op` to each item of this iterator to get nested parallel iterators,
846     /// producing a new parallel iterator that flattens these back into one.
847     ///
848     /// See also [`flat_map_iter`](#method.flat_map_iter).
849     ///
850     /// # Examples
851     ///
852     /// ```
853     /// use rayon::prelude::*;
854     ///
855     /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
856     ///
857     /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec());
858     ///
859     /// let vec: Vec<_> = par_iter.collect();
860     ///
861     /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
862     /// ```
flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> where F: Fn(Self::Item) -> PI + Sync + Send, PI: IntoParallelIterator,863     fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F>
864     where
865         F: Fn(Self::Item) -> PI + Sync + Send,
866         PI: IntoParallelIterator,
867     {
868         FlatMap::new(self, map_op)
869     }
870 
871     /// Applies `map_op` to each item of this iterator to get nested serial iterators,
872     /// producing a new parallel iterator that flattens these back into one.
873     ///
874     /// # `flat_map_iter` versus `flat_map`
875     ///
876     /// These two methods are similar but behave slightly differently. With [`flat_map`],
877     /// each of the nested iterators must be a parallel iterator, and they will be further
878     /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a
879     /// sequential `Iterator`, and we only parallelize _between_ them, while the items
880     /// produced by each nested iterator are processed sequentially.
881     ///
882     /// When choosing between these methods, consider whether nested parallelism suits the
883     /// potential iterators at hand. If there's little computation involved, or its length
884     /// is much less than the outer parallel iterator, then it may perform better to avoid
885     /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`.
886     /// If there is a lot of computation, potentially outweighing the outer parallel
887     /// iterator, then the nested parallelism of `flat_map` may be worthwhile.
888     ///
889     /// [`flat_map`]: #method.flat_map
890     ///
891     /// # Examples
892     ///
893     /// ```
894     /// use rayon::prelude::*;
895     /// use std::cell::RefCell;
896     ///
897     /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]];
898     ///
899     /// let par_iter = a.par_iter().flat_map_iter(|a| {
900     ///     // The serial iterator doesn't have to be thread-safe, just its items.
901     ///     let cell_iter = RefCell::new(a.iter().cloned());
902     ///     std::iter::from_fn(move || cell_iter.borrow_mut().next())
903     /// });
904     ///
905     /// let vec: Vec<_> = par_iter.collect();
906     ///
907     /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]);
908     /// ```
flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F> where F: Fn(Self::Item) -> SI + Sync + Send, SI: IntoIterator, SI::Item: Send,909     fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F>
910     where
911         F: Fn(Self::Item) -> SI + Sync + Send,
912         SI: IntoIterator,
913         SI::Item: Send,
914     {
915         FlatMapIter::new(self, map_op)
916     }
917 
918     /// An adaptor that flattens parallel-iterable `Item`s into one large iterator.
919     ///
920     /// See also [`flatten_iter`](#method.flatten_iter).
921     ///
922     /// # Examples
923     ///
924     /// ```
925     /// use rayon::prelude::*;
926     ///
927     /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
928     /// let y: Vec<_> = x.into_par_iter().flatten().collect();
929     ///
930     /// assert_eq!(y, vec![1, 2, 3, 4]);
931     /// ```
flatten(self) -> Flatten<Self> where Self::Item: IntoParallelIterator,932     fn flatten(self) -> Flatten<Self>
933     where
934         Self::Item: IntoParallelIterator,
935     {
936         Flatten::new(self)
937     }
938 
939     /// An adaptor that flattens serial-iterable `Item`s into one large iterator.
940     ///
941     /// See also [`flatten`](#method.flatten) and the analogous comparison of
942     /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map).
943     ///
944     /// # Examples
945     ///
946     /// ```
947     /// use rayon::prelude::*;
948     ///
949     /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]];
950     /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect();
951     /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect();
952     ///
953     /// assert_eq!(y, vec![1, 2, 3, 4]);
954     /// ```
flatten_iter(self) -> FlattenIter<Self> where Self::Item: IntoIterator, <Self::Item as IntoIterator>::Item: Send,955     fn flatten_iter(self) -> FlattenIter<Self>
956     where
957         Self::Item: IntoIterator,
958         <Self::Item as IntoIterator>::Item: Send,
959     {
960         FlattenIter::new(self)
961     }
962 
963     /// Reduces the items in the iterator into one item using `op`.
964     /// The argument `identity` should be a closure that can produce
965     /// "identity" value which may be inserted into the sequence as
966     /// needed to create opportunities for parallel execution. So, for
967     /// example, if you are doing a summation, then `identity()` ought
968     /// to produce something that represents the zero for your type
969     /// (but consider just calling `sum()` in that case).
970     ///
971     /// # Examples
972     ///
973     /// ```
974     /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)`
975     /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)`
976     /// // where the first/second elements are summed separately.
977     /// use rayon::prelude::*;
978     /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
979     ///            .par_iter()        // iterating over &(i32, i32)
980     ///            .cloned()          // iterating over (i32, i32)
981     ///            .reduce(|| (0, 0), // the "identity" is 0 in both columns
982     ///                    |a, b| (a.0 + b.0, a.1 + b.1));
983     /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
984     /// ```
985     ///
986     /// **Note:** unlike a sequential `fold` operation, the order in
987     /// which `op` will be applied to reduce the result is not fully
988     /// specified. So `op` should be [associative] or else the results
989     /// will be non-deterministic. And of course `identity()` should
990     /// produce a true identity.
991     ///
992     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, ID: Fn() -> Self::Item + Sync + Send,993     fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item
994     where
995         OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
996         ID: Fn() -> Self::Item + Sync + Send,
997     {
998         reduce::reduce(self, identity, op)
999     }
1000 
1001     /// Reduces the items in the iterator into one item using `op`.
1002     /// If the iterator is empty, `None` is returned; otherwise,
1003     /// `Some` is returned.
1004     ///
1005     /// This version of `reduce` is simple but somewhat less
1006     /// efficient. If possible, it is better to call `reduce()`, which
1007     /// requires an identity element.
1008     ///
1009     /// # Examples
1010     ///
1011     /// ```
1012     /// use rayon::prelude::*;
1013     /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)]
1014     ///            .par_iter()        // iterating over &(i32, i32)
1015     ///            .cloned()          // iterating over (i32, i32)
1016     ///            .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1))
1017     ///            .unwrap();
1018     /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9));
1019     /// ```
1020     ///
1021     /// **Note:** unlike a sequential `fold` operation, the order in
1022     /// which `op` will be applied to reduce the result is not fully
1023     /// specified. So `op` should be [associative] or else the results
1024     /// will be non-deterministic.
1025     ///
1026     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,1027     fn reduce_with<OP>(self, op: OP) -> Option<Self::Item>
1028     where
1029         OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,
1030     {
1031         fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> {
1032             move |opt_a, b| match opt_a {
1033                 Some(a) => Some(op(a, b)),
1034                 None => Some(b),
1035             }
1036         }
1037 
1038         fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> {
1039             move |opt_a, opt_b| match (opt_a, opt_b) {
1040                 (Some(a), Some(b)) => Some(op(a, b)),
1041                 (Some(v), None) | (None, Some(v)) => Some(v),
1042                 (None, None) => None,
1043             }
1044         }
1045 
1046         self.fold(<_>::default, opt_fold(&op))
1047             .reduce(<_>::default, opt_reduce(&op))
1048     }
1049 
1050     /// Reduces the items in the iterator into one item using a fallible `op`.
1051     /// The `identity` argument is used the same way as in [`reduce()`].
1052     ///
1053     /// [`reduce()`]: #method.reduce
1054     ///
1055     /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces
1056     /// to one, we will attempt to stop processing the rest of the items in the
1057     /// iterator as soon as possible, and we will return that terminating value.
1058     /// Otherwise, we will return the final reduced `Result::Ok(T)` or
1059     /// `Option::Some(T)`.  If there are multiple errors in parallel, it is not
1060     /// specified which will be returned.
1061     ///
1062     /// # Examples
1063     ///
1064     /// ```
1065     /// use rayon::prelude::*;
1066     ///
1067     /// // Compute the sum of squares, being careful about overflow.
1068     /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> {
1069     ///     iter.into_par_iter()
1070     ///         .map(|i| i.checked_mul(i))            // square each item,
1071     ///         .try_reduce(|| 0, i32::checked_add)   // and add them up!
1072     /// }
1073     /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16));
1074     ///
1075     /// // The sum might overflow
1076     /// assert_eq!(sum_squares(0..10_000), None);
1077     ///
1078     /// // Or the squares might overflow before it even reaches `try_reduce`
1079     /// assert_eq!(sum_squares(1_000_000..1_000_001), None);
1080     /// ```
try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(T, T) -> Self::Item + Sync + Send, ID: Fn() -> T + Sync + Send, Self::Item: Try<Output = T>,1081     fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item
1082     where
1083         OP: Fn(T, T) -> Self::Item + Sync + Send,
1084         ID: Fn() -> T + Sync + Send,
1085         Self::Item: Try<Output = T>,
1086     {
1087         try_reduce::try_reduce(self, identity, op)
1088     }
1089 
1090     /// Reduces the items in the iterator into one item using a fallible `op`.
1091     ///
1092     /// Like [`reduce_with()`], if the iterator is empty, `None` is returned;
1093     /// otherwise, `Some` is returned.  Beyond that, it behaves like
1094     /// [`try_reduce()`] for handling `Err`/`None`.
1095     ///
1096     /// [`reduce_with()`]: #method.reduce_with
1097     /// [`try_reduce()`]: #method.try_reduce
1098     ///
1099     /// For instance, with `Option` items, the return value may be:
1100     /// - `None`, the iterator was empty
1101     /// - `Some(None)`, we stopped after encountering `None`.
1102     /// - `Some(Some(x))`, the entire iterator reduced to `x`.
1103     ///
1104     /// With `Result` items, the nesting is more obvious:
1105     /// - `None`, the iterator was empty
1106     /// - `Some(Err(e))`, we stopped after encountering an error `e`.
1107     /// - `Some(Ok(x))`, the entire iterator reduced to `x`.
1108     ///
1109     /// # Examples
1110     ///
1111     /// ```
1112     /// use rayon::prelude::*;
1113     ///
1114     /// let files = ["/dev/null", "/does/not/exist"];
1115     ///
1116     /// // Find the biggest file
1117     /// files.into_par_iter()
1118     ///     .map(|path| std::fs::metadata(path).map(|m| (path, m.len())))
1119     ///     .try_reduce_with(|a, b| {
1120     ///         Ok(if a.1 >= b.1 { a } else { b })
1121     ///     })
1122     ///     .expect("Some value, since the iterator is not empty")
1123     ///     .expect_err("not found");
1124     /// ```
try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(T, T) -> Self::Item + Sync + Send, Self::Item: Try<Output = T>,1125     fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item>
1126     where
1127         OP: Fn(T, T) -> Self::Item + Sync + Send,
1128         Self::Item: Try<Output = T>,
1129     {
1130         try_reduce_with::try_reduce_with(self, op)
1131     }
1132 
1133     /// Parallel fold is similar to sequential fold except that the
1134     /// sequence of items may be subdivided before it is
1135     /// folded. Consider a list of numbers like `22 3 77 89 46`. If
1136     /// you used sequential fold to add them (`fold(0, |a,b| a+b)`,
1137     /// you would wind up first adding 0 + 22, then 22 + 3, then 25 +
1138     /// 77, and so forth. The **parallel fold** works similarly except
1139     /// that it first breaks up your list into sublists, and hence
1140     /// instead of yielding up a single sum at the end, it yields up
1141     /// multiple sums. The number of results is nondeterministic, as
1142     /// is the point where the breaks occur.
1143     ///
1144     /// So if we did the same parallel fold (`fold(0, |a,b| a+b)`) on
1145     /// our example list, we might wind up with a sequence of two numbers,
1146     /// like so:
1147     ///
1148     /// ```notrust
1149     /// 22 3 77 89 46
1150     ///       |     |
1151     ///     102   135
1152     /// ```
1153     ///
1154     /// Or perhaps these three numbers:
1155     ///
1156     /// ```notrust
1157     /// 22 3 77 89 46
1158     ///       |  |  |
1159     ///     102 89 46
1160     /// ```
1161     ///
1162     /// In general, Rayon will attempt to find good breaking points
1163     /// that keep all of your cores busy.
1164     ///
1165     /// ### Fold versus reduce
1166     ///
1167     /// The `fold()` and `reduce()` methods each take an identity element
1168     /// and a combining function, but they operate rather differently.
1169     ///
1170     /// `reduce()` requires that the identity function has the same
1171     /// type as the things you are iterating over, and it fully
1172     /// reduces the list of items into a single item. So, for example,
1173     /// imagine we are iterating over a list of bytes `bytes: [128_u8,
1174     /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b:
1175     /// u8| a + b)`, we would get an overflow. This is because `0`,
1176     /// `a`, and `b` here are all bytes, just like the numbers in the
1177     /// list (I wrote the types explicitly above, but those are the
1178     /// only types you can use). To avoid the overflow, we would need
1179     /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a,
1180     /// b| a + b)`, in which case our result would be `256`.
1181     ///
1182     /// In contrast, with `fold()`, the identity function does not
1183     /// have to have the same type as the things you are iterating
1184     /// over, and you potentially get back many results. So, if we
1185     /// continue with the `bytes` example from the previous paragraph,
1186     /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to
1187     /// convert our bytes into `u32`. And of course we might not get
1188     /// back a single sum.
1189     ///
1190     /// There is a more subtle distinction as well, though it's
1191     /// actually implied by the above points. When you use `reduce()`,
1192     /// your reduction function is sometimes called with values that
1193     /// were never part of your original parallel iterator (for
1194     /// example, both the left and right might be a partial sum). With
1195     /// `fold()`, in contrast, the left value in the fold function is
1196     /// always the accumulator, and the right value is always from
1197     /// your original sequence.
1198     ///
1199     /// ### Fold vs Map/Reduce
1200     ///
1201     /// Fold makes sense if you have some operation where it is
1202     /// cheaper to create groups of elements at a time. For example,
1203     /// imagine collecting characters into a string. If you were going
1204     /// to use map/reduce, you might try this:
1205     ///
1206     /// ```
1207     /// use rayon::prelude::*;
1208     ///
1209     /// let s =
1210     ///     ['a', 'b', 'c', 'd', 'e']
1211     ///     .par_iter()
1212     ///     .map(|c: &char| format!("{}", c))
1213     ///     .reduce(|| String::new(),
1214     ///             |mut a: String, b: String| { a.push_str(&b); a });
1215     ///
1216     /// assert_eq!(s, "abcde");
1217     /// ```
1218     ///
1219     /// Because reduce produces the same type of element as its input,
1220     /// you have to first map each character into a string, and then
1221     /// you can reduce them. This means we create one string per
1222     /// element in our iterator -- not so great. Using `fold`, we can
1223     /// do this instead:
1224     ///
1225     /// ```
1226     /// use rayon::prelude::*;
1227     ///
1228     /// let s =
1229     ///     ['a', 'b', 'c', 'd', 'e']
1230     ///     .par_iter()
1231     ///     .fold(|| String::new(),
1232     ///             |mut s: String, c: &char| { s.push(*c); s })
1233     ///     .reduce(|| String::new(),
1234     ///             |mut a: String, b: String| { a.push_str(&b); a });
1235     ///
1236     /// assert_eq!(s, "abcde");
1237     /// ```
1238     ///
1239     /// Now `fold` will process groups of our characters at a time,
1240     /// and we only make one string per group. We should wind up with
1241     /// some small-ish number of strings roughly proportional to the
1242     /// number of CPUs you have (it will ultimately depend on how busy
1243     /// your processors are). Note that we still need to do a reduce
1244     /// afterwards to combine those groups of strings into a single
1245     /// string.
1246     ///
1247     /// You could use a similar trick to save partial results (e.g., a
1248     /// cache) or something similar.
1249     ///
1250     /// ### Combining fold with other operations
1251     ///
1252     /// You can combine `fold` with `reduce` if you want to produce a
1253     /// single value. This is then roughly equivalent to a map/reduce
1254     /// combination in effect:
1255     ///
1256     /// ```
1257     /// use rayon::prelude::*;
1258     ///
1259     /// let bytes = 0..22_u8;
1260     /// let sum = bytes.into_par_iter()
1261     ///                .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32))
1262     ///                .sum::<u32>();
1263     ///
1264     /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1265     /// ```
fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> where F: Fn(T, Self::Item) -> T + Sync + Send, ID: Fn() -> T + Sync + Send, T: Send,1266     fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F>
1267     where
1268         F: Fn(T, Self::Item) -> T + Sync + Send,
1269         ID: Fn() -> T + Sync + Send,
1270         T: Send,
1271     {
1272         Fold::new(self, identity, fold_op)
1273     }
1274 
1275     /// Applies `fold_op` to the given `init` value with each item of this
1276     /// iterator, finally producing the value for further use.
1277     ///
1278     /// This works essentially like `fold(|| init.clone(), fold_op)`, except
1279     /// it doesn't require the `init` type to be `Sync`, nor any other form
1280     /// of added synchronization.
1281     ///
1282     /// # Examples
1283     ///
1284     /// ```
1285     /// use rayon::prelude::*;
1286     ///
1287     /// let bytes = 0..22_u8;
1288     /// let sum = bytes.into_par_iter()
1289     ///                .fold_with(0_u32, |a: u32, b: u8| a + (b as u32))
1290     ///                .sum::<u32>();
1291     ///
1292     /// assert_eq!(sum, (0..22).sum()); // compare to sequential
1293     /// ```
fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> where F: Fn(T, Self::Item) -> T + Sync + Send, T: Send + Clone,1294     fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F>
1295     where
1296         F: Fn(T, Self::Item) -> T + Sync + Send,
1297         T: Send + Clone,
1298     {
1299         FoldWith::new(self, init, fold_op)
1300     }
1301 
1302     /// Performs a fallible parallel fold.
1303     ///
1304     /// This is a variation of [`fold()`] for operations which can fail with
1305     /// `Option::None` or `Result::Err`.  The first such failure stops
1306     /// processing the local set of items, without affecting other folds in the
1307     /// iterator's subdivisions.
1308     ///
1309     /// Often, `try_fold()` will be followed by [`try_reduce()`]
1310     /// for a final reduction and global short-circuiting effect.
1311     ///
1312     /// [`fold()`]: #method.fold
1313     /// [`try_reduce()`]: #method.try_reduce
1314     ///
1315     /// # Examples
1316     ///
1317     /// ```
1318     /// use rayon::prelude::*;
1319     ///
1320     /// let bytes = 0..22_u8;
1321     /// let sum = bytes.into_par_iter()
1322     ///                .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1323     ///                .try_reduce(|| 0, u32::checked_add);
1324     ///
1325     /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1326     /// ```
try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> where F: Fn(T, Self::Item) -> R + Sync + Send, ID: Fn() -> T + Sync + Send, R: Try<Output = T> + Send,1327     fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F>
1328     where
1329         F: Fn(T, Self::Item) -> R + Sync + Send,
1330         ID: Fn() -> T + Sync + Send,
1331         R: Try<Output = T> + Send,
1332     {
1333         TryFold::new(self, identity, fold_op)
1334     }
1335 
1336     /// Performs a fallible parallel fold with a cloneable `init` value.
1337     ///
1338     /// This combines the `init` semantics of [`fold_with()`] and the failure
1339     /// semantics of [`try_fold()`].
1340     ///
1341     /// [`fold_with()`]: #method.fold_with
1342     /// [`try_fold()`]: #method.try_fold
1343     ///
1344     /// ```
1345     /// use rayon::prelude::*;
1346     ///
1347     /// let bytes = 0..22_u8;
1348     /// let sum = bytes.into_par_iter()
1349     ///                .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32))
1350     ///                .try_reduce(|| 0, u32::checked_add);
1351     ///
1352     /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential
1353     /// ```
try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> where F: Fn(T, Self::Item) -> R + Sync + Send, R: Try<Output = T> + Send, T: Clone + Send,1354     fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F>
1355     where
1356         F: Fn(T, Self::Item) -> R + Sync + Send,
1357         R: Try<Output = T> + Send,
1358         T: Clone + Send,
1359     {
1360         TryFoldWith::new(self, init, fold_op)
1361     }
1362 
1363     /// Sums up the items in the iterator.
1364     ///
1365     /// Note that the order in items will be reduced is not specified,
1366     /// so if the `+` operator is not truly [associative] \(as is the
1367     /// case for floating point numbers), then the results are not
1368     /// fully deterministic.
1369     ///
1370     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1371     ///
1372     /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`,
1373     /// except that the type of `0` and the `+` operation may vary
1374     /// depending on the type of value being produced.
1375     ///
1376     /// # Examples
1377     ///
1378     /// ```
1379     /// use rayon::prelude::*;
1380     ///
1381     /// let a = [1, 5, 7];
1382     ///
1383     /// let sum: i32 = a.par_iter().sum();
1384     ///
1385     /// assert_eq!(sum, 13);
1386     /// ```
sum<S>(self) -> S where S: Send + Sum<Self::Item> + Sum<S>,1387     fn sum<S>(self) -> S
1388     where
1389         S: Send + Sum<Self::Item> + Sum<S>,
1390     {
1391         sum::sum(self)
1392     }
1393 
1394     /// Multiplies all the items in the iterator.
1395     ///
1396     /// Note that the order in items will be reduced is not specified,
1397     /// so if the `*` operator is not truly [associative] \(as is the
1398     /// case for floating point numbers), then the results are not
1399     /// fully deterministic.
1400     ///
1401     /// [associative]: https://en.wikipedia.org/wiki/Associative_property
1402     ///
1403     /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`,
1404     /// except that the type of `1` and the `*` operation may vary
1405     /// depending on the type of value being produced.
1406     ///
1407     /// # Examples
1408     ///
1409     /// ```
1410     /// use rayon::prelude::*;
1411     ///
1412     /// fn factorial(n: u32) -> u32 {
1413     ///    (1..n+1).into_par_iter().product()
1414     /// }
1415     ///
1416     /// assert_eq!(factorial(0), 1);
1417     /// assert_eq!(factorial(1), 1);
1418     /// assert_eq!(factorial(5), 120);
1419     /// ```
product<P>(self) -> P where P: Send + Product<Self::Item> + Product<P>,1420     fn product<P>(self) -> P
1421     where
1422         P: Send + Product<Self::Item> + Product<P>,
1423     {
1424         product::product(self)
1425     }
1426 
1427     /// Computes the minimum of all the items in the iterator. If the
1428     /// iterator is empty, `None` is returned; otherwise, `Some(min)`
1429     /// is returned.
1430     ///
1431     /// Note that the order in which the items will be reduced is not
1432     /// specified, so if the `Ord` impl is not truly associative, then
1433     /// the results are not deterministic.
1434     ///
1435     /// Basically equivalent to `self.reduce_with(|a, b| Ord::min(a, b))`.
1436     ///
1437     /// # Examples
1438     ///
1439     /// ```
1440     /// use rayon::prelude::*;
1441     ///
1442     /// let a = [45, 74, 32];
1443     ///
1444     /// assert_eq!(a.par_iter().min(), Some(&32));
1445     ///
1446     /// let b: [i32; 0] = [];
1447     ///
1448     /// assert_eq!(b.par_iter().min(), None);
1449     /// ```
min(self) -> Option<Self::Item> where Self::Item: Ord,1450     fn min(self) -> Option<Self::Item>
1451     where
1452         Self::Item: Ord,
1453     {
1454         self.reduce_with(Ord::min)
1455     }
1456 
1457     /// Computes the minimum of all the items in the iterator with respect to
1458     /// the given comparison function. If the iterator is empty, `None` is
1459     /// returned; otherwise, `Some(min)` is returned.
1460     ///
1461     /// Note that the order in which the items will be reduced is not
1462     /// specified, so if the comparison function is not associative, then
1463     /// the results are not deterministic.
1464     ///
1465     /// # Examples
1466     ///
1467     /// ```
1468     /// use rayon::prelude::*;
1469     ///
1470     /// let a = [-3_i32, 77, 53, 240, -1];
1471     ///
1472     /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3));
1473     /// ```
min_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1474     fn min_by<F>(self, f: F) -> Option<Self::Item>
1475     where
1476         F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1477     {
1478         fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1479             move |a, b| match f(&a, &b) {
1480                 Ordering::Greater => b,
1481                 _ => a,
1482             }
1483         }
1484 
1485         self.reduce_with(min(f))
1486     }
1487 
1488     /// Computes the item that yields the minimum value for the given
1489     /// function. If the iterator is empty, `None` is returned;
1490     /// otherwise, `Some(item)` is returned.
1491     ///
1492     /// Note that the order in which the items will be reduced is not
1493     /// specified, so if the `Ord` impl is not truly associative, then
1494     /// the results are not deterministic.
1495     ///
1496     /// # Examples
1497     ///
1498     /// ```
1499     /// use rayon::prelude::*;
1500     ///
1501     /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1502     ///
1503     /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2));
1504     /// ```
min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1505     fn min_by_key<K, F>(self, f: F) -> Option<Self::Item>
1506     where
1507         K: Ord + Send,
1508         F: Sync + Send + Fn(&Self::Item) -> K,
1509     {
1510         fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1511             move |x| (f(&x), x)
1512         }
1513 
1514         fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1515             match (a.0).cmp(&b.0) {
1516                 Ordering::Greater => b,
1517                 _ => a,
1518             }
1519         }
1520 
1521         let (_, x) = self.map(key(f)).reduce_with(min_key)?;
1522         Some(x)
1523     }
1524 
1525     /// Computes the maximum of all the items in the iterator. If the
1526     /// iterator is empty, `None` is returned; otherwise, `Some(max)`
1527     /// is returned.
1528     ///
1529     /// Note that the order in which the items will be reduced is not
1530     /// specified, so if the `Ord` impl is not truly associative, then
1531     /// the results are not deterministic.
1532     ///
1533     /// Basically equivalent to `self.reduce_with(|a, b| Ord::max(a, b))`.
1534     ///
1535     /// # Examples
1536     ///
1537     /// ```
1538     /// use rayon::prelude::*;
1539     ///
1540     /// let a = [45, 74, 32];
1541     ///
1542     /// assert_eq!(a.par_iter().max(), Some(&74));
1543     ///
1544     /// let b: [i32; 0] = [];
1545     ///
1546     /// assert_eq!(b.par_iter().max(), None);
1547     /// ```
max(self) -> Option<Self::Item> where Self::Item: Ord,1548     fn max(self) -> Option<Self::Item>
1549     where
1550         Self::Item: Ord,
1551     {
1552         self.reduce_with(Ord::max)
1553     }
1554 
1555     /// Computes the maximum of all the items in the iterator with respect to
1556     /// the given comparison function. If the iterator is empty, `None` is
1557     /// returned; otherwise, `Some(max)` is returned.
1558     ///
1559     /// Note that the order in which the items will be reduced is not
1560     /// specified, so if the comparison function is not associative, then
1561     /// the results are not deterministic.
1562     ///
1563     /// # Examples
1564     ///
1565     /// ```
1566     /// use rayon::prelude::*;
1567     ///
1568     /// let a = [-3_i32, 77, 53, 240, -1];
1569     ///
1570     /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240));
1571     /// ```
max_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1572     fn max_by<F>(self, f: F) -> Option<Self::Item>
1573     where
1574         F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,
1575     {
1576         fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T {
1577             move |a, b| match f(&a, &b) {
1578                 Ordering::Greater => a,
1579                 _ => b,
1580             }
1581         }
1582 
1583         self.reduce_with(max(f))
1584     }
1585 
1586     /// Computes the item that yields the maximum value for the given
1587     /// function. If the iterator is empty, `None` is returned;
1588     /// otherwise, `Some(item)` is returned.
1589     ///
1590     /// Note that the order in which the items will be reduced is not
1591     /// specified, so if the `Ord` impl is not truly associative, then
1592     /// the results are not deterministic.
1593     ///
1594     /// # Examples
1595     ///
1596     /// ```
1597     /// use rayon::prelude::*;
1598     ///
1599     /// let a = [-3_i32, 34, 2, 5, -10, -3, -23];
1600     ///
1601     /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34));
1602     /// ```
max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1603     fn max_by_key<K, F>(self, f: F) -> Option<Self::Item>
1604     where
1605         K: Ord + Send,
1606         F: Sync + Send + Fn(&Self::Item) -> K,
1607     {
1608         fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) {
1609             move |x| (f(&x), x)
1610         }
1611 
1612         fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) {
1613             match (a.0).cmp(&b.0) {
1614                 Ordering::Greater => a,
1615                 _ => b,
1616             }
1617         }
1618 
1619         let (_, x) = self.map(key(f)).reduce_with(max_key)?;
1620         Some(x)
1621     }
1622 
1623     /// Takes two iterators and creates a new iterator over both.
1624     ///
1625     /// # Examples
1626     ///
1627     /// ```
1628     /// use rayon::prelude::*;
1629     ///
1630     /// let a = [0, 1, 2];
1631     /// let b = [9, 8, 7];
1632     ///
1633     /// let par_iter = a.par_iter().chain(b.par_iter());
1634     ///
1635     /// let chained: Vec<_> = par_iter.cloned().collect();
1636     ///
1637     /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]);
1638     /// ```
chain<C>(self, chain: C) -> Chain<Self, C::Iter> where C: IntoParallelIterator<Item = Self::Item>,1639     fn chain<C>(self, chain: C) -> Chain<Self, C::Iter>
1640     where
1641         C: IntoParallelIterator<Item = Self::Item>,
1642     {
1643         Chain::new(self, chain.into_par_iter())
1644     }
1645 
1646     /// Searches for **some** item in the parallel iterator that
1647     /// matches the given predicate and returns it. This operation
1648     /// is similar to [`find` on sequential iterators][find] but
1649     /// the item returned may not be the **first** one in the parallel
1650     /// sequence which matches, since we search the entire sequence in parallel.
1651     ///
1652     /// Once a match is found, we will attempt to stop processing
1653     /// the rest of the items in the iterator as soon as possible
1654     /// (just as `find` stops iterating once a match is found).
1655     ///
1656     /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find
1657     ///
1658     /// # Examples
1659     ///
1660     /// ```
1661     /// use rayon::prelude::*;
1662     ///
1663     /// let a = [1, 2, 3, 3];
1664     ///
1665     /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3));
1666     ///
1667     /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None);
1668     /// ```
find_any<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1669     fn find_any<P>(self, predicate: P) -> Option<Self::Item>
1670     where
1671         P: Fn(&Self::Item) -> bool + Sync + Send,
1672     {
1673         find::find(self, predicate)
1674     }
1675 
1676     /// Searches for the sequentially **first** item in the parallel iterator
1677     /// that matches the given predicate and returns it.
1678     ///
1679     /// Once a match is found, all attempts to the right of the match
1680     /// will be stopped, while attempts to the left must continue in case
1681     /// an earlier match is found.
1682     ///
1683     /// For added performance, you might consider using `find_first` in conjunction with
1684     /// [`by_exponential_blocks()`][IndexedParallelIterator::by_exponential_blocks].
1685     ///
1686     /// Note that not all parallel iterators have a useful order, much like
1687     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
1688     /// just want the first match that discovered anywhere in the iterator,
1689     /// `find_any` is a better choice.
1690     ///
1691     /// # Examples
1692     ///
1693     /// ```
1694     /// use rayon::prelude::*;
1695     ///
1696     /// let a = [1, 2, 3, 3];
1697     ///
1698     /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3));
1699     ///
1700     /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None);
1701     /// ```
find_first<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1702     fn find_first<P>(self, predicate: P) -> Option<Self::Item>
1703     where
1704         P: Fn(&Self::Item) -> bool + Sync + Send,
1705     {
1706         find_first_last::find_first(self, predicate)
1707     }
1708 
1709     /// Searches for the sequentially **last** item in the parallel iterator
1710     /// that matches the given predicate and returns it.
1711     ///
1712     /// Once a match is found, all attempts to the left of the match
1713     /// will be stopped, while attempts to the right must continue in case
1714     /// a later match is found.
1715     ///
1716     /// Note that not all parallel iterators have a useful order, much like
1717     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
1718     /// order doesn't actually matter to you, `find_any` is a better choice.
1719     ///
1720     /// # Examples
1721     ///
1722     /// ```
1723     /// use rayon::prelude::*;
1724     ///
1725     /// let a = [1, 2, 3, 3];
1726     ///
1727     /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3));
1728     ///
1729     /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None);
1730     /// ```
find_last<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1731     fn find_last<P>(self, predicate: P) -> Option<Self::Item>
1732     where
1733         P: Fn(&Self::Item) -> bool + Sync + Send,
1734     {
1735         find_first_last::find_last(self, predicate)
1736     }
1737 
1738     /// Applies the given predicate to the items in the parallel iterator
1739     /// and returns **any** non-None result of the map operation.
1740     ///
1741     /// Once a non-None value is produced from the map operation, we will
1742     /// attempt to stop processing the rest of the items in the iterator
1743     /// as soon as possible.
1744     ///
1745     /// Note that this method only returns **some** item in the parallel
1746     /// iterator that is not None from the map predicate. The item returned
1747     /// may not be the **first** non-None value produced in the parallel
1748     /// sequence, since the entire sequence is mapped over in parallel.
1749     ///
1750     /// # Examples
1751     ///
1752     /// ```
1753     /// use rayon::prelude::*;
1754     ///
1755     /// let c = ["lol", "NaN", "5", "5"];
1756     ///
1757     /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok());
1758     ///
1759     /// assert_eq!(found_number, Some(5));
1760     /// ```
find_map_any<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1761     fn find_map_any<P, R>(self, predicate: P) -> Option<R>
1762     where
1763         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1764         R: Send,
1765     {
1766         fn yes<T>(_: &T) -> bool {
1767             true
1768         }
1769         self.filter_map(predicate).find_any(yes)
1770     }
1771 
1772     /// Applies the given predicate to the items in the parallel iterator and
1773     /// returns the sequentially **first** non-None result of the map operation.
1774     ///
1775     /// Once a non-None value is produced from the map operation, all attempts
1776     /// to the right of the match will be stopped, while attempts to the left
1777     /// must continue in case an earlier match is found.
1778     ///
1779     /// Note that not all parallel iterators have a useful order, much like
1780     /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1781     /// just want the first non-None value discovered anywhere in the iterator,
1782     /// `find_map_any` is a better choice.
1783     ///
1784     /// # Examples
1785     ///
1786     /// ```
1787     /// use rayon::prelude::*;
1788     ///
1789     /// let c = ["lol", "NaN", "2", "5"];
1790     ///
1791     /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok());
1792     ///
1793     /// assert_eq!(first_number, Some(2));
1794     /// ```
find_map_first<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1795     fn find_map_first<P, R>(self, predicate: P) -> Option<R>
1796     where
1797         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1798         R: Send,
1799     {
1800         fn yes<T>(_: &T) -> bool {
1801             true
1802         }
1803         self.filter_map(predicate).find_first(yes)
1804     }
1805 
1806     /// Applies the given predicate to the items in the parallel iterator and
1807     /// returns the sequentially **last** non-None result of the map operation.
1808     ///
1809     /// Once a non-None value is produced from the map operation, all attempts
1810     /// to the left of the match will be stopped, while attempts to the right
1811     /// must continue in case a later match is found.
1812     ///
1813     /// Note that not all parallel iterators have a useful order, much like
1814     /// sequential `HashMap` iteration, so "first" may be nebulous. If you
1815     /// just want the first non-None value discovered anywhere in the iterator,
1816     /// `find_map_any` is a better choice.
1817     ///
1818     /// # Examples
1819     ///
1820     /// ```
1821     /// use rayon::prelude::*;
1822     ///
1823     /// let c = ["lol", "NaN", "2", "5"];
1824     ///
1825     /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok());
1826     ///
1827     /// assert_eq!(last_number, Some(5));
1828     /// ```
find_map_last<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1829     fn find_map_last<P, R>(self, predicate: P) -> Option<R>
1830     where
1831         P: Fn(Self::Item) -> Option<R> + Sync + Send,
1832         R: Send,
1833     {
1834         fn yes<T>(_: &T) -> bool {
1835             true
1836         }
1837         self.filter_map(predicate).find_last(yes)
1838     }
1839 
1840     #[doc(hidden)]
1841     #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\
1842                          `find_first`, or `find_last`")]
find<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1843     fn find<P>(self, predicate: P) -> Option<Self::Item>
1844     where
1845         P: Fn(&Self::Item) -> bool + Sync + Send,
1846     {
1847         self.find_any(predicate)
1848     }
1849 
1850     /// Searches for **some** item in the parallel iterator that
1851     /// matches the given predicate, and if so returns true.  Once
1852     /// a match is found, we'll attempt to stop process the rest
1853     /// of the items.  Proving that there's no match, returning false,
1854     /// does require visiting every item.
1855     ///
1856     /// # Examples
1857     ///
1858     /// ```
1859     /// use rayon::prelude::*;
1860     ///
1861     /// let a = [0, 12, 3, 4, 0, 23, 0];
1862     ///
1863     /// let is_valid = a.par_iter().any(|&x| x > 10);
1864     ///
1865     /// assert!(is_valid);
1866     /// ```
any<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1867     fn any<P>(self, predicate: P) -> bool
1868     where
1869         P: Fn(Self::Item) -> bool + Sync + Send,
1870     {
1871         self.map(predicate).find_any(bool::clone).is_some()
1872     }
1873 
1874     /// Tests that every item in the parallel iterator matches the given
1875     /// predicate, and if so returns true.  If a counter-example is found,
1876     /// we'll attempt to stop processing more items, then return false.
1877     ///
1878     /// # Examples
1879     ///
1880     /// ```
1881     /// use rayon::prelude::*;
1882     ///
1883     /// let a = [0, 12, 3, 4, 0, 23, 0];
1884     ///
1885     /// let is_valid = a.par_iter().all(|&x| x > 10);
1886     ///
1887     /// assert!(!is_valid);
1888     /// ```
all<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1889     fn all<P>(self, predicate: P) -> bool
1890     where
1891         P: Fn(Self::Item) -> bool + Sync + Send,
1892     {
1893         #[inline]
1894         fn is_false(x: &bool) -> bool {
1895             !x
1896         }
1897 
1898         self.map(predicate).find_any(is_false).is_none()
1899     }
1900 
1901     /// Creates an iterator over the `Some` items of this iterator, halting
1902     /// as soon as any `None` is found.
1903     ///
1904     /// # Examples
1905     ///
1906     /// ```
1907     /// use rayon::prelude::*;
1908     /// use std::sync::atomic::{AtomicUsize, Ordering};
1909     ///
1910     /// let counter = AtomicUsize::new(0);
1911     /// let value = (0_i32..2048)
1912     ///     .into_par_iter()
1913     ///     .map(|x| {
1914     ///              counter.fetch_add(1, Ordering::SeqCst);
1915     ///              if x < 1024 { Some(x) } else { None }
1916     ///          })
1917     ///     .while_some()
1918     ///     .max();
1919     ///
1920     /// assert!(value < Some(1024));
1921     /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one
1922     /// ```
while_some<T>(self) -> WhileSome<Self> where Self: ParallelIterator<Item = Option<T>>, T: Send,1923     fn while_some<T>(self) -> WhileSome<Self>
1924     where
1925         Self: ParallelIterator<Item = Option<T>>,
1926         T: Send,
1927     {
1928         WhileSome::new(self)
1929     }
1930 
1931     /// Wraps an iterator with a fuse in case of panics, to halt all threads
1932     /// as soon as possible.
1933     ///
1934     /// Panics within parallel iterators are always propagated to the caller,
1935     /// but they don't always halt the rest of the iterator right away, due to
1936     /// the internal semantics of [`join`]. This adaptor makes a greater effort
1937     /// to stop processing other items sooner, with the cost of additional
1938     /// synchronization overhead, which may also inhibit some optimizations.
1939     ///
1940     /// [`join`]: ../fn.join.html#panics
1941     ///
1942     /// # Examples
1943     ///
1944     /// If this code didn't use `panic_fuse()`, it would continue processing
1945     /// many more items in other threads (with long sleep delays) before the
1946     /// panic is finally propagated.
1947     ///
1948     /// ```should_panic
1949     /// use rayon::prelude::*;
1950     /// use std::{thread, time};
1951     ///
1952     /// (0..1_000_000)
1953     ///     .into_par_iter()
1954     ///     .panic_fuse()
1955     ///     .for_each(|i| {
1956     ///         // simulate some work
1957     ///         thread::sleep(time::Duration::from_secs(1));
1958     ///         assert!(i > 0); // oops!
1959     ///     });
1960     /// ```
panic_fuse(self) -> PanicFuse<Self>1961     fn panic_fuse(self) -> PanicFuse<Self> {
1962         PanicFuse::new(self)
1963     }
1964 
1965     /// Creates a fresh collection containing all the elements produced
1966     /// by this parallel iterator.
1967     ///
1968     /// You may prefer [`collect_into_vec()`] implemented on
1969     /// [`IndexedParallelIterator`], if your underlying iterator also implements
1970     /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge
1971     /// of how many elements the iterator contains, and even allows you to reuse
1972     /// an existing vector's backing store rather than allocating a fresh vector.
1973     ///
1974     /// See also [`collect_vec_list()`][Self::collect_vec_list] for collecting
1975     /// into a `LinkedList<Vec<T>>`.
1976     ///
1977     /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html
1978     /// [`collect_into_vec()`]:
1979     ///     trait.IndexedParallelIterator.html#method.collect_into_vec
1980     ///
1981     /// # Examples
1982     ///
1983     /// ```
1984     /// use rayon::prelude::*;
1985     ///
1986     /// let sync_vec: Vec<_> = (0..100).into_iter().collect();
1987     ///
1988     /// let async_vec: Vec<_> = (0..100).into_par_iter().collect();
1989     ///
1990     /// assert_eq!(sync_vec, async_vec);
1991     /// ```
1992     ///
1993     /// You can collect a pair of collections like [`unzip`](#method.unzip)
1994     /// for paired items:
1995     ///
1996     /// ```
1997     /// use rayon::prelude::*;
1998     ///
1999     /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2000     /// let (first, second): (Vec<_>, Vec<_>) = a.into_par_iter().collect();
2001     ///
2002     /// assert_eq!(first, [0, 1, 2, 3]);
2003     /// assert_eq!(second, [1, 2, 3, 4]);
2004     /// ```
2005     ///
2006     /// Or like [`partition_map`](#method.partition_map) for `Either` items:
2007     ///
2008     /// ```
2009     /// use rayon::prelude::*;
2010     /// use rayon::iter::Either;
2011     ///
2012     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().map(|x| {
2013     ///     if x % 2 == 0 {
2014     ///         Either::Left(x * 4)
2015     ///     } else {
2016     ///         Either::Right(x * 3)
2017     ///     }
2018     /// }).collect();
2019     ///
2020     /// assert_eq!(left, [0, 8, 16, 24]);
2021     /// assert_eq!(right, [3, 9, 15, 21]);
2022     /// ```
2023     ///
2024     /// You can even collect an arbitrarily-nested combination of pairs and `Either`:
2025     ///
2026     /// ```
2027     /// use rayon::prelude::*;
2028     /// use rayon::iter::Either;
2029     ///
2030     /// let (first, (left, right)): (Vec<_>, (Vec<_>, Vec<_>))
2031     ///     = (0..8).into_par_iter().map(|x| {
2032     ///         if x % 2 == 0 {
2033     ///             (x, Either::Left(x * 4))
2034     ///         } else {
2035     ///             (-x, Either::Right(x * 3))
2036     ///         }
2037     ///     }).collect();
2038     ///
2039     /// assert_eq!(first, [0, -1, 2, -3, 4, -5, 6, -7]);
2040     /// assert_eq!(left, [0, 8, 16, 24]);
2041     /// assert_eq!(right, [3, 9, 15, 21]);
2042     /// ```
2043     ///
2044     /// All of that can _also_ be combined with short-circuiting collection of
2045     /// `Result` or `Option` types:
2046     ///
2047     /// ```
2048     /// use rayon::prelude::*;
2049     /// use rayon::iter::Either;
2050     ///
2051     /// let result: Result<(Vec<_>, (Vec<_>, Vec<_>)), _>
2052     ///     = (0..8).into_par_iter().map(|x| {
2053     ///         if x > 5 {
2054     ///             Err(x)
2055     ///         } else if x % 2 == 0 {
2056     ///             Ok((x, Either::Left(x * 4)))
2057     ///         } else {
2058     ///             Ok((-x, Either::Right(x * 3)))
2059     ///         }
2060     ///     }).collect();
2061     ///
2062     /// let error = result.unwrap_err();
2063     /// assert!(error == 6 || error == 7);
2064     /// ```
collect<C>(self) -> C where C: FromParallelIterator<Self::Item>,2065     fn collect<C>(self) -> C
2066     where
2067         C: FromParallelIterator<Self::Item>,
2068     {
2069         C::from_par_iter(self)
2070     }
2071 
2072     /// Unzips the items of a parallel iterator into a pair of arbitrary
2073     /// `ParallelExtend` containers.
2074     ///
2075     /// You may prefer to use `unzip_into_vecs()`, which allocates more
2076     /// efficiently with precise knowledge of how many elements the
2077     /// iterator contains, and even allows you to reuse existing
2078     /// vectors' backing stores rather than allocating fresh vectors.
2079     ///
2080     /// # Examples
2081     ///
2082     /// ```
2083     /// use rayon::prelude::*;
2084     ///
2085     /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)];
2086     ///
2087     /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip();
2088     ///
2089     /// assert_eq!(left, [0, 1, 2, 3]);
2090     /// assert_eq!(right, [1, 2, 3, 4]);
2091     /// ```
2092     ///
2093     /// Nested pairs can be unzipped too.
2094     ///
2095     /// ```
2096     /// use rayon::prelude::*;
2097     ///
2098     /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter()
2099     ///     .map(|i| (i, (i * i, i * i * i)))
2100     ///     .unzip();
2101     ///
2102     /// assert_eq!(values, [0, 1, 2, 3]);
2103     /// assert_eq!(squares, [0, 1, 4, 9]);
2104     /// assert_eq!(cubes, [0, 1, 8, 27]);
2105     /// ```
unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where Self: ParallelIterator<Item = (A, B)>, FromA: Default + Send + ParallelExtend<A>, FromB: Default + Send + ParallelExtend<B>, A: Send, B: Send,2106     fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
2107     where
2108         Self: ParallelIterator<Item = (A, B)>,
2109         FromA: Default + Send + ParallelExtend<A>,
2110         FromB: Default + Send + ParallelExtend<B>,
2111         A: Send,
2112         B: Send,
2113     {
2114         unzip::unzip(self)
2115     }
2116 
2117     /// Partitions the items of a parallel iterator into a pair of arbitrary
2118     /// `ParallelExtend` containers.  Items for which the `predicate` returns
2119     /// true go into the first container, and the rest go into the second.
2120     ///
2121     /// Note: unlike the standard `Iterator::partition`, this allows distinct
2122     /// collection types for the left and right items.  This is more flexible,
2123     /// but may require new type annotations when converting sequential code
2124     /// that used type inference assuming the two were the same.
2125     ///
2126     /// # Examples
2127     ///
2128     /// ```
2129     /// use rayon::prelude::*;
2130     ///
2131     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0);
2132     ///
2133     /// assert_eq!(left, [0, 2, 4, 6]);
2134     /// assert_eq!(right, [1, 3, 5, 7]);
2135     /// ```
partition<A, B, P>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<Self::Item>, B: Default + Send + ParallelExtend<Self::Item>, P: Fn(&Self::Item) -> bool + Sync + Send,2136     fn partition<A, B, P>(self, predicate: P) -> (A, B)
2137     where
2138         A: Default + Send + ParallelExtend<Self::Item>,
2139         B: Default + Send + ParallelExtend<Self::Item>,
2140         P: Fn(&Self::Item) -> bool + Sync + Send,
2141     {
2142         unzip::partition(self, predicate)
2143     }
2144 
2145     /// Partitions and maps the items of a parallel iterator into a pair of
2146     /// arbitrary `ParallelExtend` containers.  `Either::Left` items go into
2147     /// the first container, and `Either::Right` items go into the second.
2148     ///
2149     /// # Examples
2150     ///
2151     /// ```
2152     /// use rayon::prelude::*;
2153     /// use rayon::iter::Either;
2154     ///
2155     /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter()
2156     ///     .partition_map(|x| {
2157     ///         if x % 2 == 0 {
2158     ///             Either::Left(x * 4)
2159     ///         } else {
2160     ///             Either::Right(x * 3)
2161     ///         }
2162     ///     });
2163     ///
2164     /// assert_eq!(left, [0, 8, 16, 24]);
2165     /// assert_eq!(right, [3, 9, 15, 21]);
2166     /// ```
2167     ///
2168     /// Nested `Either` enums can be split as well.
2169     ///
2170     /// ```
2171     /// use rayon::prelude::*;
2172     /// use rayon::iter::Either::*;
2173     ///
2174     /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20)
2175     ///     .into_par_iter()
2176     ///     .partition_map(|x| match (x % 3, x % 5) {
2177     ///         (0, 0) => Left(Left(x)),
2178     ///         (0, _) => Left(Right(x)),
2179     ///         (_, 0) => Right(Left(x)),
2180     ///         (_, _) => Right(Right(x)),
2181     ///     });
2182     ///
2183     /// assert_eq!(fizzbuzz, [15]);
2184     /// assert_eq!(fizz, [3, 6, 9, 12, 18]);
2185     /// assert_eq!(buzz, [5, 10]);
2186     /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]);
2187     /// ```
partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<L>, B: Default + Send + ParallelExtend<R>, P: Fn(Self::Item) -> Either<L, R> + Sync + Send, L: Send, R: Send,2188     fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B)
2189     where
2190         A: Default + Send + ParallelExtend<L>,
2191         B: Default + Send + ParallelExtend<R>,
2192         P: Fn(Self::Item) -> Either<L, R> + Sync + Send,
2193         L: Send,
2194         R: Send,
2195     {
2196         unzip::partition_map(self, predicate)
2197     }
2198 
2199     /// Intersperses clones of an element between items of this iterator.
2200     ///
2201     /// # Examples
2202     ///
2203     /// ```
2204     /// use rayon::prelude::*;
2205     ///
2206     /// let x = vec![1, 2, 3];
2207     /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect();
2208     ///
2209     /// assert_eq!(r, vec![1, -1, 2, -1, 3]);
2210     /// ```
intersperse(self, element: Self::Item) -> Intersperse<Self> where Self::Item: Clone,2211     fn intersperse(self, element: Self::Item) -> Intersperse<Self>
2212     where
2213         Self::Item: Clone,
2214     {
2215         Intersperse::new(self, element)
2216     }
2217 
2218     /// Creates an iterator that yields `n` elements from *anywhere* in the original iterator.
2219     ///
2220     /// This is similar to [`IndexedParallelIterator::take`] without being
2221     /// constrained to the "first" `n` of the original iterator order. The
2222     /// taken items will still maintain their relative order where that is
2223     /// visible in `collect`, `reduce`, and similar outputs.
2224     ///
2225     /// # Examples
2226     ///
2227     /// ```
2228     /// use rayon::prelude::*;
2229     ///
2230     /// let result: Vec<_> = (0..100)
2231     ///     .into_par_iter()
2232     ///     .filter(|&x| x % 2 == 0)
2233     ///     .take_any(5)
2234     ///     .collect();
2235     ///
2236     /// assert_eq!(result.len(), 5);
2237     /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2238     /// ```
take_any(self, n: usize) -> TakeAny<Self>2239     fn take_any(self, n: usize) -> TakeAny<Self> {
2240         TakeAny::new(self, n)
2241     }
2242 
2243     /// Creates an iterator that skips `n` elements from *anywhere* in the original iterator.
2244     ///
2245     /// This is similar to [`IndexedParallelIterator::skip`] without being
2246     /// constrained to the "first" `n` of the original iterator order. The
2247     /// remaining items will still maintain their relative order where that is
2248     /// visible in `collect`, `reduce`, and similar outputs.
2249     ///
2250     /// # Examples
2251     ///
2252     /// ```
2253     /// use rayon::prelude::*;
2254     ///
2255     /// let result: Vec<_> = (0..100)
2256     ///     .into_par_iter()
2257     ///     .filter(|&x| x % 2 == 0)
2258     ///     .skip_any(5)
2259     ///     .collect();
2260     ///
2261     /// assert_eq!(result.len(), 45);
2262     /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2263     /// ```
skip_any(self, n: usize) -> SkipAny<Self>2264     fn skip_any(self, n: usize) -> SkipAny<Self> {
2265         SkipAny::new(self, n)
2266     }
2267 
2268     /// Creates an iterator that takes elements from *anywhere* in the original iterator
2269     /// until the given `predicate` returns `false`.
2270     ///
2271     /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2272     /// global condition unrelated to the item itself, or some combination thereof.
2273     ///
2274     /// If parallel calls to the `predicate` race and give different results, then the
2275     /// `true` results will still take those particular items, while respecting the `false`
2276     /// result from elsewhere to skip any further items.
2277     ///
2278     /// This is similar to [`Iterator::take_while`] without being constrained to the original
2279     /// iterator order. The taken items will still maintain their relative order where that is
2280     /// visible in `collect`, `reduce`, and similar outputs.
2281     ///
2282     /// # Examples
2283     ///
2284     /// ```
2285     /// use rayon::prelude::*;
2286     ///
2287     /// let result: Vec<_> = (0..100)
2288     ///     .into_par_iter()
2289     ///     .take_any_while(|x| *x < 50)
2290     ///     .collect();
2291     ///
2292     /// assert!(result.len() <= 50);
2293     /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2294     /// ```
2295     ///
2296     /// ```
2297     /// use rayon::prelude::*;
2298     /// use std::sync::atomic::AtomicUsize;
2299     /// use std::sync::atomic::Ordering::Relaxed;
2300     ///
2301     /// // Collect any group of items that sum <= 1000
2302     /// let quota = AtomicUsize::new(1000);
2303     /// let result: Vec<_> = (0_usize..100)
2304     ///     .into_par_iter()
2305     ///     .take_any_while(|&x| {
2306     ///         quota.fetch_update(Relaxed, Relaxed, |q| q.checked_sub(x))
2307     ///             .is_ok()
2308     ///     })
2309     ///     .collect();
2310     ///
2311     /// let sum = result.iter().sum::<usize>();
2312     /// assert!(matches!(sum, 902..=1000));
2313     /// ```
take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,2314     fn take_any_while<P>(self, predicate: P) -> TakeAnyWhile<Self, P>
2315     where
2316         P: Fn(&Self::Item) -> bool + Sync + Send,
2317     {
2318         TakeAnyWhile::new(self, predicate)
2319     }
2320 
2321     /// Creates an iterator that skips elements from *anywhere* in the original iterator
2322     /// until the given `predicate` returns `false`.
2323     ///
2324     /// The `predicate` may be anything -- e.g. it could be checking a fact about the item, a
2325     /// global condition unrelated to the item itself, or some combination thereof.
2326     ///
2327     /// If parallel calls to the `predicate` race and give different results, then the
2328     /// `true` results will still skip those particular items, while respecting the `false`
2329     /// result from elsewhere to skip any further items.
2330     ///
2331     /// This is similar to [`Iterator::skip_while`] without being constrained to the original
2332     /// iterator order. The remaining items will still maintain their relative order where that is
2333     /// visible in `collect`, `reduce`, and similar outputs.
2334     ///
2335     /// # Examples
2336     ///
2337     /// ```
2338     /// use rayon::prelude::*;
2339     ///
2340     /// let result: Vec<_> = (0..100)
2341     ///     .into_par_iter()
2342     ///     .skip_any_while(|x| *x < 50)
2343     ///     .collect();
2344     ///
2345     /// assert!(result.len() >= 50);
2346     /// assert!(result.windows(2).all(|w| w[0] < w[1]));
2347     /// ```
skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,2348     fn skip_any_while<P>(self, predicate: P) -> SkipAnyWhile<Self, P>
2349     where
2350         P: Fn(&Self::Item) -> bool + Sync + Send,
2351     {
2352         SkipAnyWhile::new(self, predicate)
2353     }
2354 
2355     /// Collects this iterator into a linked list of vectors.
2356     ///
2357     /// This is useful when you need to condense a parallel iterator into a collection,
2358     /// but have no specific requirements for what that collection should be. If you
2359     /// plan to store the collection longer-term, `Vec<T>` is, as always, likely the
2360     /// best default choice, despite the overhead that comes from concatenating each
2361     /// vector. Or, if this is an `IndexedParallelIterator`, you should also prefer to
2362     /// just collect to a `Vec<T>`.
2363     ///
2364     /// Internally, most [`FromParallelIterator`]/[`ParallelExtend`] implementations
2365     /// use this strategy; each job collecting their chunk of the iterator to a `Vec<T>`
2366     /// and those chunks getting merged into a `LinkedList`, before then extending the
2367     /// collection with each vector. This is a very efficient way to collect an
2368     /// unindexed parallel iterator, without much intermediate data movement.
2369     ///
2370     /// # Examples
2371     ///
2372     /// ```
2373     /// # use std::collections::LinkedList;
2374     /// use rayon::prelude::*;
2375     ///
2376     /// let result: LinkedList<Vec<_>> = (0..=100)
2377     ///     .into_par_iter()
2378     ///     .filter(|x| x % 2 == 0)
2379     ///     .flat_map(|x| 0..x)
2380     ///     .collect_vec_list();
2381     ///
2382     /// // `par_iter.collect_vec_list().into_iter().flatten()` turns
2383     /// // a parallel iterator into a serial one
2384     /// let total_len = result.into_iter().flatten().count();
2385     /// assert_eq!(total_len, 2550);
2386     /// ```
collect_vec_list(self) -> LinkedList<Vec<Self::Item>>2387     fn collect_vec_list(self) -> LinkedList<Vec<Self::Item>> {
2388         match extend::fast_collect(self) {
2389             Either::Left(vec) => {
2390                 let mut list = LinkedList::new();
2391                 if !vec.is_empty() {
2392                     list.push_back(vec);
2393                 }
2394                 list
2395             }
2396             Either::Right(list) => list,
2397         }
2398     }
2399 
2400     /// Internal method used to define the behavior of this parallel
2401     /// iterator. You should not need to call this directly.
2402     ///
2403     /// This method causes the iterator `self` to start producing
2404     /// items and to feed them to the consumer `consumer` one by one.
2405     /// It may split the consumer before doing so to create the
2406     /// opportunity to produce in parallel.
2407     ///
2408     /// See the [README] for more details on the internals of parallel
2409     /// iterators.
2410     ///
2411     /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>2412     fn drive_unindexed<C>(self, consumer: C) -> C::Result
2413     where
2414         C: UnindexedConsumer<Self::Item>;
2415 
2416     /// Internal method used to define the behavior of this parallel
2417     /// iterator. You should not need to call this directly.
2418     ///
2419     /// Returns the number of items produced by this iterator, if known
2420     /// statically. This can be used by consumers to trigger special fast
2421     /// paths. Therefore, if `Some(_)` is returned, this iterator must only
2422     /// use the (indexed) `Consumer` methods when driving a consumer, such
2423     /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or
2424     /// other `UnindexedConsumer` methods -- or returning an inaccurate
2425     /// value -- may result in panics.
2426     ///
2427     /// This method is currently used to optimize `collect` for want
2428     /// of true Rust specialization; it may be removed when
2429     /// specialization is stable.
opt_len(&self) -> Option<usize>2430     fn opt_len(&self) -> Option<usize> {
2431         None
2432     }
2433 }
2434 
2435 impl<T: ParallelIterator> IntoParallelIterator for T {
2436     type Iter = T;
2437     type Item = T::Item;
2438 
into_par_iter(self) -> T2439     fn into_par_iter(self) -> T {
2440         self
2441     }
2442 }
2443 
2444 /// An iterator that supports "random access" to its data, meaning
2445 /// that you can split it at arbitrary indices and draw data from
2446 /// those points.
2447 ///
2448 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges
2449 // Waiting for `ExactSizeIterator::is_empty` to be stabilized. See rust-lang/rust#35428
2450 #[allow(clippy::len_without_is_empty)]
2451 pub trait IndexedParallelIterator: ParallelIterator {
2452     /// Divides an iterator into sequential blocks of exponentially-increasing size.
2453     ///
2454     /// Normally, parallel iterators are recursively divided into tasks in parallel.
2455     /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2456     /// of parallel iterators of increasing sizes.
2457     /// Sizes grow exponentially in order to avoid creating
2458     /// too many blocks. This also allows to balance the current block with all previous ones.
2459     ///
2460     /// This can have many applications but the most notable ones are:
2461     /// - better performance with [`find_first()`][ParallelIterator::find_first]
2462     /// - more predictable performance with [`find_any()`][ParallelIterator::find_any]
2463     ///   or any interruptible computation
2464     ///
2465     /// # Examples
2466     ///
2467     /// ```
2468     /// use rayon::prelude::*;
2469     /// assert_eq!((0..10_000).into_par_iter()
2470     ///                       .by_exponential_blocks()
2471     ///                       .find_first(|&e| e==4_999), Some(4_999))
2472     /// ```
2473     ///
2474     /// In this example, without blocks, rayon will split the initial range into two but all work
2475     /// on the right hand side (from 5,000 onwards) is **useless** since the sequential algorithm
2476     /// never goes there. This means that if two threads are used there will be **no** speedup **at
2477     /// all**.
2478     ///
2479     /// `by_exponential_blocks` on the other hand will start with the leftmost range from 0
2480     /// to `p` (threads number), continue with p to 3p, the 3p to 7p...
2481     ///
2482     /// Each subrange is treated in parallel, while all subranges are treated sequentially.
2483     /// We therefore ensure a logarithmic number of blocks (and overhead) while guaranteeing
2484     /// we stop at the first block containing the searched data.
by_exponential_blocks(self) -> ExponentialBlocks<Self>2485     fn by_exponential_blocks(self) -> ExponentialBlocks<Self> {
2486         ExponentialBlocks::new(self)
2487     }
2488 
2489     /// Divides an iterator into sequential blocks of the given size.
2490     ///
2491     /// Normally, parallel iterators are recursively divided into tasks in parallel.
2492     /// This adaptor changes the default behavior by splitting the iterator into a **sequence**
2493     /// of parallel iterators of given `block_size`.
2494     /// The main application is to obtain better
2495     /// memory locality (especially if the reduce operation re-use folded data).
2496     ///
2497     /// **Panics** if `block_size` is 0.
2498     ///
2499     /// # Example
2500     /// ```
2501     /// use rayon::prelude::*;
2502     /// // during most reductions v1 and v2 fit the cache
2503     /// let v = (0u32..10_000_000)
2504     ///     .into_par_iter()
2505     ///     .by_uniform_blocks(1_000_000)
2506     ///     .fold(Vec::new, |mut v, e| { v.push(e); v})
2507     ///     .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
2508     /// assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());
2509     /// ```
2510     #[track_caller]
by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self>2511     fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> {
2512         assert!(block_size != 0, "block_size must not be zero");
2513         UniformBlocks::new(self, block_size)
2514     }
2515 
2516     /// Collects the results of the iterator into the specified
2517     /// vector. The vector is always cleared before execution
2518     /// begins. If possible, reusing the vector across calls can lead
2519     /// to better performance since it reuses the same backing buffer.
2520     ///
2521     /// # Examples
2522     ///
2523     /// ```
2524     /// use rayon::prelude::*;
2525     ///
2526     /// // any prior data will be cleared
2527     /// let mut vec = vec![-1, -2, -3];
2528     ///
2529     /// (0..5).into_par_iter()
2530     ///     .collect_into_vec(&mut vec);
2531     ///
2532     /// assert_eq!(vec, [0, 1, 2, 3, 4]);
2533     /// ```
collect_into_vec(self, target: &mut Vec<Self::Item>)2534     fn collect_into_vec(self, target: &mut Vec<Self::Item>) {
2535         collect::collect_into_vec(self, target);
2536     }
2537 
2538     /// Unzips the results of the iterator into the specified
2539     /// vectors. The vectors are always cleared before execution
2540     /// begins. If possible, reusing the vectors across calls can lead
2541     /// to better performance since they reuse the same backing buffer.
2542     ///
2543     /// # Examples
2544     ///
2545     /// ```
2546     /// use rayon::prelude::*;
2547     ///
2548     /// // any prior data will be cleared
2549     /// let mut left = vec![42; 10];
2550     /// let mut right = vec![-1; 10];
2551     ///
2552     /// (10..15).into_par_iter()
2553     ///     .enumerate()
2554     ///     .unzip_into_vecs(&mut left, &mut right);
2555     ///
2556     /// assert_eq!(left, [0, 1, 2, 3, 4]);
2557     /// assert_eq!(right, [10, 11, 12, 13, 14]);
2558     /// ```
unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send,2559     fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
2560     where
2561         Self: IndexedParallelIterator<Item = (A, B)>,
2562         A: Send,
2563         B: Send,
2564     {
2565         collect::unzip_into_vecs(self, left, right);
2566     }
2567 
2568     /// Iterates over tuples `(A, B)`, where the items `A` are from
2569     /// this iterator and `B` are from the iterator given as argument.
2570     /// Like the `zip` method on ordinary iterators, if the two
2571     /// iterators are of unequal length, you only get the items they
2572     /// have in common.
2573     ///
2574     /// # Examples
2575     ///
2576     /// ```
2577     /// use rayon::prelude::*;
2578     ///
2579     /// let result: Vec<_> = (1..4)
2580     ///     .into_par_iter()
2581     ///     .zip(vec!['a', 'b', 'c'])
2582     ///     .collect();
2583     ///
2584     /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);
2585     /// ```
zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2586     fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter>
2587     where
2588         Z: IntoParallelIterator,
2589         Z::Iter: IndexedParallelIterator,
2590     {
2591         Zip::new(self, zip_op.into_par_iter())
2592     }
2593 
2594     /// The same as `Zip`, but requires that both iterators have the same length.
2595     ///
2596     /// # Panics
2597     /// Will panic if `self` and `zip_op` are not the same length.
2598     ///
2599     /// ```should_panic
2600     /// use rayon::prelude::*;
2601     ///
2602     /// let one = [1u8];
2603     /// let two = [2u8, 2];
2604     /// let one_iter = one.par_iter();
2605     /// let two_iter = two.par_iter();
2606     ///
2607     /// // this will panic
2608     /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();
2609     ///
2610     /// // we should never get here
2611     /// assert_eq!(1, zipped.len());
2612     /// ```
2613     #[track_caller]
zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2614     fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter>
2615     where
2616         Z: IntoParallelIterator,
2617         Z::Iter: IndexedParallelIterator,
2618     {
2619         let zip_op_iter = zip_op.into_par_iter();
2620         assert_eq!(
2621             self.len(),
2622             zip_op_iter.len(),
2623             "iterators must have the same length"
2624         );
2625         ZipEq::new(self, zip_op_iter)
2626     }
2627 
2628     /// Interleaves elements of this iterator and the other given
2629     /// iterator. Alternately yields elements from this iterator and
2630     /// the given iterator, until both are exhausted. If one iterator
2631     /// is exhausted before the other, the last elements are provided
2632     /// from the other.
2633     ///
2634     /// # Examples
2635     ///
2636     /// ```
2637     /// use rayon::prelude::*;
2638     /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
2639     /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
2640     /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);
2641     /// ```
interleave<I>(self, other: I) -> Interleave<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2642     fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter>
2643     where
2644         I: IntoParallelIterator<Item = Self::Item>,
2645         I::Iter: IndexedParallelIterator<Item = Self::Item>,
2646     {
2647         Interleave::new(self, other.into_par_iter())
2648     }
2649 
2650     /// Interleaves elements of this iterator and the other given
2651     /// iterator, until one is exhausted.
2652     ///
2653     /// # Examples
2654     ///
2655     /// ```
2656     /// use rayon::prelude::*;
2657     /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
2658     /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
2659     /// assert_eq!(r, vec![1, 5, 2, 6, 3]);
2660     /// ```
interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2661     fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter>
2662     where
2663         I: IntoParallelIterator<Item = Self::Item>,
2664         I::Iter: IndexedParallelIterator<Item = Self::Item>,
2665     {
2666         InterleaveShortest::new(self, other.into_par_iter())
2667     }
2668 
2669     /// Splits an iterator up into fixed-size chunks.
2670     ///
2671     /// Returns an iterator that returns `Vec`s of the given number of elements.
2672     /// If the number of elements in the iterator is not divisible by `chunk_size`,
2673     /// the last chunk may be shorter than `chunk_size`.
2674     ///
2675     /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on
2676     /// slices, without having to allocate intermediate `Vec`s for the chunks.
2677     ///
2678     /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks
2679     /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut
2680     ///
2681     /// **Panics** if `chunk_size` is 0.
2682     ///
2683     /// # Examples
2684     ///
2685     /// ```
2686     /// use rayon::prelude::*;
2687     /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2688     /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
2689     /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);
2690     /// ```
2691     #[track_caller]
chunks(self, chunk_size: usize) -> Chunks<Self>2692     fn chunks(self, chunk_size: usize) -> Chunks<Self> {
2693         assert!(chunk_size != 0, "chunk_size must not be zero");
2694         Chunks::new(self, chunk_size)
2695     }
2696 
2697     /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2698     /// each chunk.
2699     ///
2700     /// Returns an iterator that produces a folded result for each chunk of items
2701     /// produced by this iterator.
2702     ///
2703     /// This works essentially like:
2704     ///
2705     /// ```text
2706     /// iter.chunks(chunk_size)
2707     ///     .map(|chunk|
2708     ///         chunk.into_iter()
2709     ///             .fold(identity, fold_op)
2710     ///     )
2711     /// ```
2712     ///
2713     /// except there is no per-chunk allocation overhead.
2714     ///
2715     /// [`fold()`]: std::iter::Iterator#method.fold
2716     ///
2717     /// **Panics** if `chunk_size` is 0.
2718     ///
2719     /// # Examples
2720     ///
2721     /// ```
2722     /// use rayon::prelude::*;
2723     /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2724     /// let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
2725     /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2726     /// ```
2727     #[track_caller]
fold_chunks<T, ID, F>( self, chunk_size: usize, identity: ID, fold_op: F, ) -> FoldChunks<Self, ID, F> where ID: Fn() -> T + Send + Sync, F: Fn(T, Self::Item) -> T + Send + Sync, T: Send,2728     fn fold_chunks<T, ID, F>(
2729         self,
2730         chunk_size: usize,
2731         identity: ID,
2732         fold_op: F,
2733     ) -> FoldChunks<Self, ID, F>
2734     where
2735         ID: Fn() -> T + Send + Sync,
2736         F: Fn(T, Self::Item) -> T + Send + Sync,
2737         T: Send,
2738     {
2739         assert!(chunk_size != 0, "chunk_size must not be zero");
2740         FoldChunks::new(self, chunk_size, identity, fold_op)
2741     }
2742 
2743     /// Splits an iterator into fixed-size chunks, performing a sequential [`fold()`] on
2744     /// each chunk.
2745     ///
2746     /// Returns an iterator that produces a folded result for each chunk of items
2747     /// produced by this iterator.
2748     ///
2749     /// This works essentially like `fold_chunks(chunk_size, || init.clone(), fold_op)`,
2750     /// except it doesn't require the `init` type to be `Sync`, nor any other form of
2751     /// added synchronization.
2752     ///
2753     /// [`fold()`]: std::iter::Iterator#method.fold
2754     ///
2755     /// **Panics** if `chunk_size` is 0.
2756     ///
2757     /// # Examples
2758     ///
2759     /// ```
2760     /// use rayon::prelude::*;
2761     /// let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
2762     /// let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
2763     /// assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);
2764     /// ```
2765     #[track_caller]
fold_chunks_with<T, F>( self, chunk_size: usize, init: T, fold_op: F, ) -> FoldChunksWith<Self, T, F> where T: Send + Clone, F: Fn(T, Self::Item) -> T + Send + Sync,2766     fn fold_chunks_with<T, F>(
2767         self,
2768         chunk_size: usize,
2769         init: T,
2770         fold_op: F,
2771     ) -> FoldChunksWith<Self, T, F>
2772     where
2773         T: Send + Clone,
2774         F: Fn(T, Self::Item) -> T + Send + Sync,
2775     {
2776         assert!(chunk_size != 0, "chunk_size must not be zero");
2777         FoldChunksWith::new(self, chunk_size, init, fold_op)
2778     }
2779 
2780     /// Lexicographically compares the elements of this `ParallelIterator` with those of
2781     /// another.
2782     ///
2783     /// # Examples
2784     ///
2785     /// ```
2786     /// use rayon::prelude::*;
2787     /// use std::cmp::Ordering::*;
2788     ///
2789     /// let x = vec![1, 2, 3];
2790     /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
2791     /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
2792     /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);
2793     /// ```
cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator, Self::Item: Ord,2794     fn cmp<I>(self, other: I) -> Ordering
2795     where
2796         I: IntoParallelIterator<Item = Self::Item>,
2797         I::Iter: IndexedParallelIterator,
2798         Self::Item: Ord,
2799     {
2800         #[inline]
2801         fn ordering<T: Ord>((x, y): (T, T)) -> Ordering {
2802             Ord::cmp(&x, &y)
2803         }
2804 
2805         #[inline]
2806         fn inequal(&ord: &Ordering) -> bool {
2807             ord != Ordering::Equal
2808         }
2809 
2810         let other = other.into_par_iter();
2811         let ord_len = self.len().cmp(&other.len());
2812         self.zip(other)
2813             .map(ordering)
2814             .find_first(inequal)
2815             .unwrap_or(ord_len)
2816     }
2817 
2818     /// Lexicographically compares the elements of this `ParallelIterator` with those of
2819     /// another.
2820     ///
2821     /// # Examples
2822     ///
2823     /// ```
2824     /// use rayon::prelude::*;
2825     /// use std::cmp::Ordering::*;
2826     /// use std::f64::NAN;
2827     ///
2828     /// let x = vec![1.0, 2.0, 3.0];
2829     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
2830     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
2831     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
2832     /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);
2833     /// ```
partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2834     fn partial_cmp<I>(self, other: I) -> Option<Ordering>
2835     where
2836         I: IntoParallelIterator,
2837         I::Iter: IndexedParallelIterator,
2838         Self::Item: PartialOrd<I::Item>,
2839     {
2840         #[inline]
2841         fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> {
2842             PartialOrd::partial_cmp(&x, &y)
2843         }
2844 
2845         #[inline]
2846         fn inequal(&ord: &Option<Ordering>) -> bool {
2847             ord != Some(Ordering::Equal)
2848         }
2849 
2850         let other = other.into_par_iter();
2851         let ord_len = self.len().cmp(&other.len());
2852         self.zip(other)
2853             .map(ordering)
2854             .find_first(inequal)
2855             .unwrap_or(Some(ord_len))
2856     }
2857 
2858     /// Determines if the elements of this `ParallelIterator`
2859     /// are equal to those of another
eq<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2860     fn eq<I>(self, other: I) -> bool
2861     where
2862         I: IntoParallelIterator,
2863         I::Iter: IndexedParallelIterator,
2864         Self::Item: PartialEq<I::Item>,
2865     {
2866         #[inline]
2867         fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool {
2868             PartialEq::eq(&x, &y)
2869         }
2870 
2871         let other = other.into_par_iter();
2872         self.len() == other.len() && self.zip(other).all(eq)
2873     }
2874 
2875     /// Determines if the elements of this `ParallelIterator`
2876     /// are unequal to those of another
ne<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2877     fn ne<I>(self, other: I) -> bool
2878     where
2879         I: IntoParallelIterator,
2880         I::Iter: IndexedParallelIterator,
2881         Self::Item: PartialEq<I::Item>,
2882     {
2883         !self.eq(other)
2884     }
2885 
2886     /// Determines if the elements of this `ParallelIterator`
2887     /// are lexicographically less than those of another.
lt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2888     fn lt<I>(self, other: I) -> bool
2889     where
2890         I: IntoParallelIterator,
2891         I::Iter: IndexedParallelIterator,
2892         Self::Item: PartialOrd<I::Item>,
2893     {
2894         self.partial_cmp(other) == Some(Ordering::Less)
2895     }
2896 
2897     /// Determines if the elements of this `ParallelIterator`
2898     /// are less or equal to those of another.
le<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2899     fn le<I>(self, other: I) -> bool
2900     where
2901         I: IntoParallelIterator,
2902         I::Iter: IndexedParallelIterator,
2903         Self::Item: PartialOrd<I::Item>,
2904     {
2905         let ord = self.partial_cmp(other);
2906         ord == Some(Ordering::Equal) || ord == Some(Ordering::Less)
2907     }
2908 
2909     /// Determines if the elements of this `ParallelIterator`
2910     /// are lexicographically greater than those of another.
gt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2911     fn gt<I>(self, other: I) -> bool
2912     where
2913         I: IntoParallelIterator,
2914         I::Iter: IndexedParallelIterator,
2915         Self::Item: PartialOrd<I::Item>,
2916     {
2917         self.partial_cmp(other) == Some(Ordering::Greater)
2918     }
2919 
2920     /// Determines if the elements of this `ParallelIterator`
2921     /// are less or equal to those of another.
ge<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2922     fn ge<I>(self, other: I) -> bool
2923     where
2924         I: IntoParallelIterator,
2925         I::Iter: IndexedParallelIterator,
2926         Self::Item: PartialOrd<I::Item>,
2927     {
2928         let ord = self.partial_cmp(other);
2929         ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater)
2930     }
2931 
2932     /// Yields an index along with each item.
2933     ///
2934     /// # Examples
2935     ///
2936     /// ```
2937     /// use rayon::prelude::*;
2938     ///
2939     /// let chars = vec!['a', 'b', 'c'];
2940     /// let result: Vec<_> = chars
2941     ///     .into_par_iter()
2942     ///     .enumerate()
2943     ///     .collect();
2944     ///
2945     /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);
2946     /// ```
enumerate(self) -> Enumerate<Self>2947     fn enumerate(self) -> Enumerate<Self> {
2948         Enumerate::new(self)
2949     }
2950 
2951     /// Creates an iterator that steps by the given amount
2952     ///
2953     /// # Examples
2954     ///
2955     /// ```
2956     ///use rayon::prelude::*;
2957     ///
2958     /// let range = (3..10);
2959     /// let result: Vec<i32> = range
2960     ///    .into_par_iter()
2961     ///    .step_by(3)
2962     ///    .collect();
2963     ///
2964     /// assert_eq!(result, [3, 6, 9])
2965     /// ```
step_by(self, step: usize) -> StepBy<Self>2966     fn step_by(self, step: usize) -> StepBy<Self> {
2967         StepBy::new(self, step)
2968     }
2969 
2970     /// Creates an iterator that skips the first `n` elements.
2971     ///
2972     /// # Examples
2973     ///
2974     /// ```
2975     /// use rayon::prelude::*;
2976     ///
2977     /// let result: Vec<_> = (0..100)
2978     ///     .into_par_iter()
2979     ///     .skip(95)
2980     ///     .collect();
2981     ///
2982     /// assert_eq!(result, [95, 96, 97, 98, 99]);
2983     /// ```
skip(self, n: usize) -> Skip<Self>2984     fn skip(self, n: usize) -> Skip<Self> {
2985         Skip::new(self, n)
2986     }
2987 
2988     /// Creates an iterator that yields the first `n` elements.
2989     ///
2990     /// # Examples
2991     ///
2992     /// ```
2993     /// use rayon::prelude::*;
2994     ///
2995     /// let result: Vec<_> = (0..100)
2996     ///     .into_par_iter()
2997     ///     .take(5)
2998     ///     .collect();
2999     ///
3000     /// assert_eq!(result, [0, 1, 2, 3, 4]);
3001     /// ```
take(self, n: usize) -> Take<Self>3002     fn take(self, n: usize) -> Take<Self> {
3003         Take::new(self, n)
3004     }
3005 
3006     /// Searches for **some** item in the parallel iterator that
3007     /// matches the given predicate, and returns its index.  Like
3008     /// `ParallelIterator::find_any`, the parallel search will not
3009     /// necessarily find the **first** match, and once a match is
3010     /// found we'll attempt to stop processing any more.
3011     ///
3012     /// # Examples
3013     ///
3014     /// ```
3015     /// use rayon::prelude::*;
3016     ///
3017     /// let a = [1, 2, 3, 3];
3018     ///
3019     /// let i = a.par_iter().position_any(|&x| x == 3).expect("found");
3020     /// assert!(i == 2 || i == 3);
3021     ///
3022     /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None);
3023     /// ```
position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,3024     fn position_any<P>(self, predicate: P) -> Option<usize>
3025     where
3026         P: Fn(Self::Item) -> bool + Sync + Send,
3027     {
3028         #[inline]
3029         fn check(&(_, p): &(usize, bool)) -> bool {
3030             p
3031         }
3032 
3033         let (i, _) = self.map(predicate).enumerate().find_any(check)?;
3034         Some(i)
3035     }
3036 
3037     /// Searches for the sequentially **first** item in the parallel iterator
3038     /// that matches the given predicate, and returns its index.
3039     ///
3040     /// Like `ParallelIterator::find_first`, once a match is found,
3041     /// all attempts to the right of the match will be stopped, while
3042     /// attempts to the left must continue in case an earlier match
3043     /// is found.
3044     ///
3045     /// Note that not all parallel iterators have a useful order, much like
3046     /// sequential `HashMap` iteration, so "first" may be nebulous.  If you
3047     /// just want the first match that discovered anywhere in the iterator,
3048     /// `position_any` is a better choice.
3049     ///
3050     /// # Examples
3051     ///
3052     /// ```
3053     /// use rayon::prelude::*;
3054     ///
3055     /// let a = [1, 2, 3, 3];
3056     ///
3057     /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));
3058     ///
3059     /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None);
3060     /// ```
position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,3061     fn position_first<P>(self, predicate: P) -> Option<usize>
3062     where
3063         P: Fn(Self::Item) -> bool + Sync + Send,
3064     {
3065         #[inline]
3066         fn check(&(_, p): &(usize, bool)) -> bool {
3067             p
3068         }
3069 
3070         let (i, _) = self.map(predicate).enumerate().find_first(check)?;
3071         Some(i)
3072     }
3073 
3074     /// Searches for the sequentially **last** item in the parallel iterator
3075     /// that matches the given predicate, and returns its index.
3076     ///
3077     /// Like `ParallelIterator::find_last`, once a match is found,
3078     /// all attempts to the left of the match will be stopped, while
3079     /// attempts to the right must continue in case a later match
3080     /// is found.
3081     ///
3082     /// Note that not all parallel iterators have a useful order, much like
3083     /// sequential `HashMap` iteration, so "last" may be nebulous.  When the
3084     /// order doesn't actually matter to you, `position_any` is a better
3085     /// choice.
3086     ///
3087     /// # Examples
3088     ///
3089     /// ```
3090     /// use rayon::prelude::*;
3091     ///
3092     /// let a = [1, 2, 3, 3];
3093     ///
3094     /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));
3095     ///
3096     /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None);
3097     /// ```
position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,3098     fn position_last<P>(self, predicate: P) -> Option<usize>
3099     where
3100         P: Fn(Self::Item) -> bool + Sync + Send,
3101     {
3102         #[inline]
3103         fn check(&(_, p): &(usize, bool)) -> bool {
3104             p
3105         }
3106 
3107         let (i, _) = self.map(predicate).enumerate().find_last(check)?;
3108         Some(i)
3109     }
3110 
3111     #[doc(hidden)]
3112     #[deprecated(
3113         note = "parallel `position` does not search in order -- use `position_any`, \\
3114                 `position_first`, or `position_last`"
3115     )]
position<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,3116     fn position<P>(self, predicate: P) -> Option<usize>
3117     where
3118         P: Fn(Self::Item) -> bool + Sync + Send,
3119     {
3120         self.position_any(predicate)
3121     }
3122 
3123     /// Searches for items in the parallel iterator that match the given
3124     /// predicate, and returns their indices.
3125     ///
3126     /// # Examples
3127     ///
3128     /// ```
3129     /// use rayon::prelude::*;
3130     ///
3131     /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];
3132     ///
3133     /// // Find the positions of primes congruent to 1 modulo 6
3134     /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
3135     /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19
3136     ///
3137     /// // Find the positions of primes congruent to 5 modulo 6
3138     /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
3139     /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29
3140     /// ```
positions<P>(self, predicate: P) -> Positions<Self, P> where P: Fn(Self::Item) -> bool + Sync + Send,3141     fn positions<P>(self, predicate: P) -> Positions<Self, P>
3142     where
3143         P: Fn(Self::Item) -> bool + Sync + Send,
3144     {
3145         Positions::new(self, predicate)
3146     }
3147 
3148     /// Produces a new iterator with the elements of this iterator in
3149     /// reverse order.
3150     ///
3151     /// # Examples
3152     ///
3153     /// ```
3154     /// use rayon::prelude::*;
3155     ///
3156     /// let result: Vec<_> = (0..5)
3157     ///     .into_par_iter()
3158     ///     .rev()
3159     ///     .collect();
3160     ///
3161     /// assert_eq!(result, [4, 3, 2, 1, 0]);
3162     /// ```
rev(self) -> Rev<Self>3163     fn rev(self) -> Rev<Self> {
3164         Rev::new(self)
3165     }
3166 
3167     /// Sets the minimum length of iterators desired to process in each
3168     /// rayon job.  Rayon will not split any smaller than this length, but
3169     /// of course an iterator could already be smaller to begin with.
3170     ///
3171     /// Producers like `zip` and `interleave` will use greater of the two
3172     /// minimums.
3173     /// Chained iterators and iterators inside `flat_map` may each use
3174     /// their own minimum length.
3175     ///
3176     /// # Examples
3177     ///
3178     /// ```
3179     /// use rayon::prelude::*;
3180     ///
3181     /// let min = (0..1_000_000)
3182     ///     .into_par_iter()
3183     ///     .with_min_len(1234)
3184     ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3185     ///     .min().unwrap();
3186     ///
3187     /// assert!(min >= 1234);
3188     /// ```
with_min_len(self, min: usize) -> MinLen<Self>3189     fn with_min_len(self, min: usize) -> MinLen<Self> {
3190         MinLen::new(self, min)
3191     }
3192 
3193     /// Sets the maximum length of iterators desired to process in each
3194     /// rayon job.  Rayon will try to split at least below this length,
3195     /// unless that would put it below the length from `with_min_len()`.
3196     /// For example, given min=10 and max=15, a length of 16 will not be
3197     /// split any further.
3198     ///
3199     /// Producers like `zip` and `interleave` will use lesser of the two
3200     /// maximums.
3201     /// Chained iterators and iterators inside `flat_map` may each use
3202     /// their own maximum length.
3203     ///
3204     /// # Examples
3205     ///
3206     /// ```
3207     /// use rayon::prelude::*;
3208     ///
3209     /// let max = (0..1_000_000)
3210     ///     .into_par_iter()
3211     ///     .with_max_len(1234)
3212     ///     .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
3213     ///     .max().unwrap();
3214     ///
3215     /// assert!(max <= 1234);
3216     /// ```
with_max_len(self, max: usize) -> MaxLen<Self>3217     fn with_max_len(self, max: usize) -> MaxLen<Self> {
3218         MaxLen::new(self, max)
3219     }
3220 
3221     /// Produces an exact count of how many items this iterator will
3222     /// produce, presuming no panic occurs.
3223     ///
3224     /// # Examples
3225     ///
3226     /// ```
3227     /// use rayon::prelude::*;
3228     ///
3229     /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
3230     /// assert_eq!(par_iter.len(), 10);
3231     ///
3232     /// let vec: Vec<_> = par_iter.collect();
3233     /// assert_eq!(vec.len(), 10);
3234     /// ```
len(&self) -> usize3235     fn len(&self) -> usize;
3236 
3237     /// Internal method used to define the behavior of this parallel
3238     /// iterator. You should not need to call this directly.
3239     ///
3240     /// This method causes the iterator `self` to start producing
3241     /// items and to feed them to the consumer `consumer` one by one.
3242     /// It may split the consumer before doing so to create the
3243     /// opportunity to produce in parallel. If a split does happen, it
3244     /// will inform the consumer of the index where the split should
3245     /// occur (unlike `ParallelIterator::drive_unindexed()`).
3246     ///
3247     /// See the [README] for more details on the internals of parallel
3248     /// iterators.
3249     ///
3250     /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result3251     fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result;
3252 
3253     /// Internal method used to define the behavior of this parallel
3254     /// iterator. You should not need to call this directly.
3255     ///
3256     /// This method converts the iterator into a producer P and then
3257     /// invokes `callback.callback()` with P. Note that the type of
3258     /// this producer is not defined as part of the API, since
3259     /// `callback` must be defined generically for all producers. This
3260     /// allows the producer type to contain references; it also means
3261     /// that parallel iterators can adjust that type without causing a
3262     /// breaking change.
3263     ///
3264     /// See the [README] for more details on the internals of parallel
3265     /// iterators.
3266     ///
3267     /// [README]: https://github.com/rayon-rs/rayon/blob/main/src/iter/plumbing/README.md
with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output3268     fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output;
3269 }
3270 
3271 /// `FromParallelIterator` implements the creation of a collection
3272 /// from a [`ParallelIterator`]. By implementing
3273 /// `FromParallelIterator` for a given type, you define how it will be
3274 /// created from an iterator.
3275 ///
3276 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method.
3277 ///
3278 /// [`ParallelIterator`]: trait.ParallelIterator.html
3279 /// [`collect()`]: trait.ParallelIterator.html#method.collect
3280 ///
3281 /// # Examples
3282 ///
3283 /// Implementing `FromParallelIterator` for your type:
3284 ///
3285 /// ```
3286 /// use rayon::prelude::*;
3287 /// use std::mem;
3288 ///
3289 /// struct BlackHole {
3290 ///     mass: usize,
3291 /// }
3292 ///
3293 /// impl<T: Send> FromParallelIterator<T> for BlackHole {
3294 ///     fn from_par_iter<I>(par_iter: I) -> Self
3295 ///         where I: IntoParallelIterator<Item = T>
3296 ///     {
3297 ///         let par_iter = par_iter.into_par_iter();
3298 ///         BlackHole {
3299 ///             mass: par_iter.count() * mem::size_of::<T>(),
3300 ///         }
3301 ///     }
3302 /// }
3303 ///
3304 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect();
3305 /// assert_eq!(bh.mass, 4000);
3306 /// ```
3307 pub trait FromParallelIterator<T>
3308 where
3309     T: Send,
3310 {
3311     /// Creates an instance of the collection from the parallel iterator `par_iter`.
3312     ///
3313     /// If your collection is not naturally parallel, the easiest (and
3314     /// fastest) way to do this is often to collect `par_iter` into a
3315     /// [`LinkedList`] (via [`collect_vec_list`]) or another intermediate
3316     /// data structure and then sequentially extend your collection. However,
3317     /// a more 'native' technique is to use the [`par_iter.fold`] or
3318     /// [`par_iter.fold_with`] methods to create the collection.
3319     /// Alternatively, if your collection is 'natively' parallel, you
3320     /// can use `par_iter.for_each` to process each element in turn.
3321     ///
3322     /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html
3323     /// [`collect_vec_list`]: ParallelIterator::collect_vec_list
3324     /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold
3325     /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with
3326     /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each
from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>3327     fn from_par_iter<I>(par_iter: I) -> Self
3328     where
3329         I: IntoParallelIterator<Item = T>;
3330 }
3331 
3332 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`].
3333 ///
3334 /// [`ParallelIterator`]: trait.ParallelIterator.html
3335 ///
3336 /// # Examples
3337 ///
3338 /// Implementing `ParallelExtend` for your type:
3339 ///
3340 /// ```
3341 /// use rayon::prelude::*;
3342 /// use std::mem;
3343 ///
3344 /// struct BlackHole {
3345 ///     mass: usize,
3346 /// }
3347 ///
3348 /// impl<T: Send> ParallelExtend<T> for BlackHole {
3349 ///     fn par_extend<I>(&mut self, par_iter: I)
3350 ///         where I: IntoParallelIterator<Item = T>
3351 ///     {
3352 ///         let par_iter = par_iter.into_par_iter();
3353 ///         self.mass += par_iter.count() * mem::size_of::<T>();
3354 ///     }
3355 /// }
3356 ///
3357 /// let mut bh = BlackHole { mass: 0 };
3358 /// bh.par_extend(0i32..1000);
3359 /// assert_eq!(bh.mass, 4000);
3360 /// bh.par_extend(0i64..10);
3361 /// assert_eq!(bh.mass, 4080);
3362 /// ```
3363 pub trait ParallelExtend<T>
3364 where
3365     T: Send,
3366 {
3367     /// Extends an instance of the collection with the elements drawn
3368     /// from the parallel iterator `par_iter`.
3369     ///
3370     /// # Examples
3371     ///
3372     /// ```
3373     /// use rayon::prelude::*;
3374     ///
3375     /// let mut vec = vec![];
3376     /// vec.par_extend(0..5);
3377     /// vec.par_extend((0..5).into_par_iter().map(|i| i * i));
3378     /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]);
3379     /// ```
par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>3380     fn par_extend<I>(&mut self, par_iter: I)
3381     where
3382         I: IntoParallelIterator<Item = T>;
3383 }
3384 
3385 /// `ParallelDrainFull` creates a parallel iterator that moves all items
3386 /// from a collection while retaining the original capacity.
3387 ///
3388 /// Types which are indexable typically implement [`ParallelDrainRange`]
3389 /// instead, where you can drain fully with `par_drain(..)`.
3390 ///
3391 /// [`ParallelDrainRange`]: trait.ParallelDrainRange.html
3392 pub trait ParallelDrainFull {
3393     /// The draining parallel iterator type that will be created.
3394     type Iter: ParallelIterator<Item = Self::Item>;
3395 
3396     /// The type of item that the parallel iterator will produce.
3397     /// This is usually the same as `IntoParallelIterator::Item`.
3398     type Item: Send;
3399 
3400     /// Returns a draining parallel iterator over an entire collection.
3401     ///
3402     /// When the iterator is dropped, all items are removed, even if the
3403     /// iterator was not fully consumed. If the iterator is leaked, for example
3404     /// using `std::mem::forget`, it is unspecified how many items are removed.
3405     ///
3406     /// # Examples
3407     ///
3408     /// ```
3409     /// use rayon::prelude::*;
3410     /// use std::collections::{BinaryHeap, HashSet};
3411     ///
3412     /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect();
3413     ///
3414     /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect();
3415     /// assert_eq!(
3416     ///     // heaps are drained in arbitrary order
3417     ///     heap.par_drain()
3418     ///         .inspect(|x| assert!(squares.contains(x)))
3419     ///         .count(),
3420     ///     squares.len(),
3421     /// );
3422     /// assert!(heap.is_empty());
3423     /// assert!(heap.capacity() >= squares.len());
3424     /// ```
par_drain(self) -> Self::Iter3425     fn par_drain(self) -> Self::Iter;
3426 }
3427 
3428 /// `ParallelDrainRange` creates a parallel iterator that moves a range of items
3429 /// from a collection while retaining the original capacity.
3430 ///
3431 /// Types which are not indexable may implement [`ParallelDrainFull`] instead.
3432 ///
3433 /// [`ParallelDrainFull`]: trait.ParallelDrainFull.html
3434 pub trait ParallelDrainRange<Idx = usize> {
3435     /// The draining parallel iterator type that will be created.
3436     type Iter: ParallelIterator<Item = Self::Item>;
3437 
3438     /// The type of item that the parallel iterator will produce.
3439     /// This is usually the same as `IntoParallelIterator::Item`.
3440     type Item: Send;
3441 
3442     /// Returns a draining parallel iterator over a range of the collection.
3443     ///
3444     /// When the iterator is dropped, all items in the range are removed, even
3445     /// if the iterator was not fully consumed. If the iterator is leaked, for
3446     /// example using `std::mem::forget`, it is unspecified how many items are
3447     /// removed.
3448     ///
3449     /// # Examples
3450     ///
3451     /// ```
3452     /// use rayon::prelude::*;
3453     ///
3454     /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect();
3455     ///
3456     /// println!("RangeFull");
3457     /// let mut vec = squares.clone();
3458     /// assert!(vec.par_drain(..)
3459     ///            .eq(squares.par_iter().copied()));
3460     /// assert!(vec.is_empty());
3461     /// assert!(vec.capacity() >= squares.len());
3462     ///
3463     /// println!("RangeFrom");
3464     /// let mut vec = squares.clone();
3465     /// assert!(vec.par_drain(5..)
3466     ///            .eq(squares[5..].par_iter().copied()));
3467     /// assert_eq!(&vec[..], &squares[..5]);
3468     /// assert!(vec.capacity() >= squares.len());
3469     ///
3470     /// println!("RangeTo");
3471     /// let mut vec = squares.clone();
3472     /// assert!(vec.par_drain(..5)
3473     ///            .eq(squares[..5].par_iter().copied()));
3474     /// assert_eq!(&vec[..], &squares[5..]);
3475     /// assert!(vec.capacity() >= squares.len());
3476     ///
3477     /// println!("RangeToInclusive");
3478     /// let mut vec = squares.clone();
3479     /// assert!(vec.par_drain(..=5)
3480     ///            .eq(squares[..=5].par_iter().copied()));
3481     /// assert_eq!(&vec[..], &squares[6..]);
3482     /// assert!(vec.capacity() >= squares.len());
3483     ///
3484     /// println!("Range");
3485     /// let mut vec = squares.clone();
3486     /// assert!(vec.par_drain(3..7)
3487     ///            .eq(squares[3..7].par_iter().copied()));
3488     /// assert_eq!(&vec[..3], &squares[..3]);
3489     /// assert_eq!(&vec[3..], &squares[7..]);
3490     /// assert!(vec.capacity() >= squares.len());
3491     ///
3492     /// println!("RangeInclusive");
3493     /// let mut vec = squares.clone();
3494     /// assert!(vec.par_drain(3..=7)
3495     ///            .eq(squares[3..=7].par_iter().copied()));
3496     /// assert_eq!(&vec[..3], &squares[..3]);
3497     /// assert_eq!(&vec[3..], &squares[8..]);
3498     /// assert!(vec.capacity() >= squares.len());
3499     /// ```
par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter3500     fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter;
3501 }
3502 
3503 /// We hide the `Try` trait in a private module, as it's only meant to be a
3504 /// stable clone of the standard library's `Try` trait, as yet unstable.
3505 mod private {
3506     use std::convert::Infallible;
3507     use std::ops::ControlFlow::{self, Break, Continue};
3508     use std::task::Poll;
3509 
3510     /// Clone of `std::ops::Try`.
3511     ///
3512     /// Implementing this trait is not permitted outside of `rayon`.
3513     pub trait Try {
3514         private_decl! {}
3515 
3516         type Output;
3517         type Residual;
3518 
from_output(output: Self::Output) -> Self3519         fn from_output(output: Self::Output) -> Self;
3520 
from_residual(residual: Self::Residual) -> Self3521         fn from_residual(residual: Self::Residual) -> Self;
3522 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3523         fn branch(self) -> ControlFlow<Self::Residual, Self::Output>;
3524     }
3525 
3526     impl<B, C> Try for ControlFlow<B, C> {
3527         private_impl! {}
3528 
3529         type Output = C;
3530         type Residual = ControlFlow<B, Infallible>;
3531 
from_output(output: Self::Output) -> Self3532         fn from_output(output: Self::Output) -> Self {
3533             Continue(output)
3534         }
3535 
from_residual(residual: Self::Residual) -> Self3536         fn from_residual(residual: Self::Residual) -> Self {
3537             match residual {
3538                 Break(b) => Break(b),
3539                 Continue(_) => unreachable!(),
3540             }
3541         }
3542 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3543         fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3544             match self {
3545                 Continue(c) => Continue(c),
3546                 Break(b) => Break(Break(b)),
3547             }
3548         }
3549     }
3550 
3551     impl<T> Try for Option<T> {
3552         private_impl! {}
3553 
3554         type Output = T;
3555         type Residual = Option<Infallible>;
3556 
from_output(output: Self::Output) -> Self3557         fn from_output(output: Self::Output) -> Self {
3558             Some(output)
3559         }
3560 
from_residual(residual: Self::Residual) -> Self3561         fn from_residual(residual: Self::Residual) -> Self {
3562             match residual {
3563                 None => None,
3564                 Some(_) => unreachable!(),
3565             }
3566         }
3567 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3568         fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3569             match self {
3570                 Some(c) => Continue(c),
3571                 None => Break(None),
3572             }
3573         }
3574     }
3575 
3576     impl<T, E> Try for Result<T, E> {
3577         private_impl! {}
3578 
3579         type Output = T;
3580         type Residual = Result<Infallible, E>;
3581 
from_output(output: Self::Output) -> Self3582         fn from_output(output: Self::Output) -> Self {
3583             Ok(output)
3584         }
3585 
from_residual(residual: Self::Residual) -> Self3586         fn from_residual(residual: Self::Residual) -> Self {
3587             match residual {
3588                 Err(e) => Err(e),
3589                 Ok(_) => unreachable!(),
3590             }
3591         }
3592 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3593         fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3594             match self {
3595                 Ok(c) => Continue(c),
3596                 Err(e) => Break(Err(e)),
3597             }
3598         }
3599     }
3600 
3601     impl<T, E> Try for Poll<Result<T, E>> {
3602         private_impl! {}
3603 
3604         type Output = Poll<T>;
3605         type Residual = Result<Infallible, E>;
3606 
from_output(output: Self::Output) -> Self3607         fn from_output(output: Self::Output) -> Self {
3608             output.map(Ok)
3609         }
3610 
from_residual(residual: Self::Residual) -> Self3611         fn from_residual(residual: Self::Residual) -> Self {
3612             match residual {
3613                 Err(e) => Poll::Ready(Err(e)),
3614                 Ok(_) => unreachable!(),
3615             }
3616         }
3617 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3618         fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3619             match self {
3620                 Poll::Pending => Continue(Poll::Pending),
3621                 Poll::Ready(Ok(c)) => Continue(Poll::Ready(c)),
3622                 Poll::Ready(Err(e)) => Break(Err(e)),
3623             }
3624         }
3625     }
3626 
3627     impl<T, E> Try for Poll<Option<Result<T, E>>> {
3628         private_impl! {}
3629 
3630         type Output = Poll<Option<T>>;
3631         type Residual = Result<Infallible, E>;
3632 
from_output(output: Self::Output) -> Self3633         fn from_output(output: Self::Output) -> Self {
3634             match output {
3635                 Poll::Ready(o) => Poll::Ready(o.map(Ok)),
3636                 Poll::Pending => Poll::Pending,
3637             }
3638         }
3639 
from_residual(residual: Self::Residual) -> Self3640         fn from_residual(residual: Self::Residual) -> Self {
3641             match residual {
3642                 Err(e) => Poll::Ready(Some(Err(e))),
3643                 Ok(_) => unreachable!(),
3644             }
3645         }
3646 
branch(self) -> ControlFlow<Self::Residual, Self::Output>3647         fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
3648             match self {
3649                 Poll::Pending => Continue(Poll::Pending),
3650                 Poll::Ready(None) => Continue(Poll::Ready(None)),
3651                 Poll::Ready(Some(Ok(c))) => Continue(Poll::Ready(Some(c))),
3652                 Poll::Ready(Some(Err(e))) => Break(Err(e)),
3653             }
3654         }
3655     }
3656 }
3657