1 //! Traits for writing parallel programs using an iterator-style interface 2 //! 3 //! You will rarely need to interact with this module directly unless you have 4 //! need to name one of the iterator types. 5 //! 6 //! Parallel iterators make it easy to write iterator-like chains that 7 //! execute in parallel: typically all you have to do is convert the 8 //! first `.iter()` (or `iter_mut()`, `into_iter()`, etc) method into 9 //! `par_iter()` (or `par_iter_mut()`, `into_par_iter()`, etc). For 10 //! example, to compute the sum of the squares of a sequence of 11 //! integers, one might write: 12 //! 13 //! ```rust 14 //! use rayon::prelude::*; 15 //! fn sum_of_squares(input: &[i32]) -> i32 { 16 //! input.par_iter() 17 //! .map(|i| i * i) 18 //! .sum() 19 //! } 20 //! ``` 21 //! 22 //! Or, to increment all the integers in a slice, you could write: 23 //! 24 //! ```rust 25 //! use rayon::prelude::*; 26 //! fn increment_all(input: &mut [i32]) { 27 //! input.par_iter_mut() 28 //! .for_each(|p| *p += 1); 29 //! } 30 //! ``` 31 //! 32 //! To use parallel iterators, first import the traits by adding 33 //! something like `use rayon::prelude::*` to your module. You can 34 //! then call `par_iter`, `par_iter_mut`, or `into_par_iter` to get a 35 //! parallel iterator. Like a [regular iterator][], parallel 36 //! iterators work by first constructing a computation and then 37 //! executing it. 38 //! 39 //! In addition to `par_iter()` and friends, some types offer other 40 //! ways to create (or consume) parallel iterators: 41 //! 42 //! - Slices (`&[T]`, `&mut [T]`) offer methods like `par_split` and 43 //! `par_windows`, as well as various parallel sorting 44 //! operations. See [the `ParallelSlice` trait] for the full list. 45 //! - Strings (`&str`) offer methods like `par_split` and `par_lines`. 46 //! See [the `ParallelString` trait] for the full list. 47 //! - Various collections offer [`par_extend`], which grows a 48 //! collection given a parallel iterator. (If you don't have a 49 //! collection to extend, you can use [`collect()`] to create a new 50 //! one from scratch.) 51 //! 52 //! [the `ParallelSlice` trait]: ../slice/trait.ParallelSlice.html 53 //! [the `ParallelString` trait]: ../str/trait.ParallelString.html 54 //! [`par_extend`]: trait.ParallelExtend.html 55 //! [`collect()`]: trait.ParallelIterator.html#method.collect 56 //! 57 //! To see the full range of methods available on parallel iterators, 58 //! check out the [`ParallelIterator`] and [`IndexedParallelIterator`] 59 //! traits. 60 //! 61 //! If you'd like to build a custom parallel iterator, or to write your own 62 //! combinator, then check out the [split] function and the [plumbing] module. 63 //! 64 //! [regular iterator]: http://doc.rust-lang.org/std/iter/trait.Iterator.html 65 //! [`ParallelIterator`]: trait.ParallelIterator.html 66 //! [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 67 //! [split]: fn.split.html 68 //! [plumbing]: plumbing/index.html 69 //! 70 //! Note: Several of the `ParallelIterator` methods rely on a `Try` trait which 71 //! has been deliberately obscured from the public API. This trait is intended 72 //! to mirror the unstable `std::ops::Try` with implementations for `Option` and 73 //! `Result`, where `Some`/`Ok` values will let those iterators continue, but 74 //! `None`/`Err` values will exit early. 75 //! 76 //! A note about object safety: It is currently _not_ possible to wrap 77 //! a `ParallelIterator` (or any trait that depends on it) using a 78 //! `Box<dyn ParallelIterator>` or other kind of dynamic allocation, 79 //! because `ParallelIterator` is **not object-safe**. 80 //! (This keeps the implementation simpler and allows extra optimizations.) 81 82 use self::plumbing::*; 83 use self::private::Try; 84 pub use either::Either; 85 use std::cmp::{self, Ordering}; 86 use std::iter::{Product, Sum}; 87 use std::ops::{Fn, RangeBounds}; 88 89 pub mod plumbing; 90 91 #[cfg(test)] 92 mod test; 93 94 // There is a method to the madness here: 95 // 96 // - These modules are private but expose certain types to the end-user 97 // (e.g., `enumerate::Enumerate`) -- specifically, the types that appear in the 98 // public API surface of the `ParallelIterator` traits. 99 // - In **this** module, those public types are always used unprefixed, which forces 100 // us to add a `pub use` and helps identify if we missed anything. 101 // - In contrast, items that appear **only** in the body of a method, 102 // e.g. `find::find()`, are always used **prefixed**, so that they 103 // can be readily distinguished. 104 105 mod chain; 106 mod chunks; 107 mod cloned; 108 mod collect; 109 mod copied; 110 mod empty; 111 mod enumerate; 112 mod extend; 113 mod filter; 114 mod filter_map; 115 mod find; 116 mod find_first_last; 117 mod flat_map; 118 mod flat_map_iter; 119 mod flatten; 120 mod flatten_iter; 121 mod fold; 122 mod for_each; 123 mod from_par_iter; 124 mod inspect; 125 mod interleave; 126 mod interleave_shortest; 127 mod intersperse; 128 mod len; 129 mod map; 130 mod map_with; 131 mod multizip; 132 mod noop; 133 mod once; 134 mod panic_fuse; 135 mod par_bridge; 136 mod positions; 137 mod product; 138 mod reduce; 139 mod repeat; 140 mod rev; 141 mod skip; 142 mod splitter; 143 mod sum; 144 mod take; 145 mod try_fold; 146 mod try_reduce; 147 mod try_reduce_with; 148 mod unzip; 149 mod update; 150 mod while_some; 151 mod zip; 152 mod zip_eq; 153 154 pub use self::{ 155 chain::Chain, 156 chunks::Chunks, 157 cloned::Cloned, 158 copied::Copied, 159 empty::{empty, Empty}, 160 enumerate::Enumerate, 161 filter::Filter, 162 filter_map::FilterMap, 163 flat_map::FlatMap, 164 flat_map_iter::FlatMapIter, 165 flatten::Flatten, 166 flatten_iter::FlattenIter, 167 fold::{Fold, FoldWith}, 168 inspect::Inspect, 169 interleave::Interleave, 170 interleave_shortest::InterleaveShortest, 171 intersperse::Intersperse, 172 len::{MaxLen, MinLen}, 173 map::Map, 174 map_with::{MapInit, MapWith}, 175 multizip::MultiZip, 176 once::{once, Once}, 177 panic_fuse::PanicFuse, 178 par_bridge::{IterBridge, ParallelBridge}, 179 positions::Positions, 180 repeat::{repeat, repeatn, Repeat, RepeatN}, 181 rev::Rev, 182 skip::Skip, 183 splitter::{split, Split}, 184 take::Take, 185 try_fold::{TryFold, TryFoldWith}, 186 update::Update, 187 while_some::WhileSome, 188 zip::Zip, 189 zip_eq::ZipEq, 190 }; 191 192 mod step_by; 193 #[cfg(step_by)] 194 pub use self::step_by::StepBy; 195 196 /// `IntoParallelIterator` implements the conversion to a [`ParallelIterator`]. 197 /// 198 /// By implementing `IntoParallelIterator` for a type, you define how it will 199 /// transformed into an iterator. This is a parallel version of the standard 200 /// library's [`std::iter::IntoIterator`] trait. 201 /// 202 /// [`ParallelIterator`]: trait.ParallelIterator.html 203 /// [`std::iter::IntoIterator`]: https://doc.rust-lang.org/std/iter/trait.IntoIterator.html 204 pub trait IntoParallelIterator { 205 /// The parallel iterator type that will be created. 206 type Iter: ParallelIterator<Item = Self::Item>; 207 208 /// The type of item that the parallel iterator will produce. 209 type Item: Send; 210 211 /// Converts `self` into a parallel iterator. 212 /// 213 /// # Examples 214 /// 215 /// ``` 216 /// use rayon::prelude::*; 217 /// 218 /// println!("counting in parallel:"); 219 /// (0..100).into_par_iter() 220 /// .for_each(|i| println!("{}", i)); 221 /// ``` 222 /// 223 /// This conversion is often implicit for arguments to methods like [`zip`]. 224 /// 225 /// ``` 226 /// use rayon::prelude::*; 227 /// 228 /// let v: Vec<_> = (0..5).into_par_iter().zip(5..10).collect(); 229 /// assert_eq!(v, [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]); 230 /// ``` 231 /// 232 /// [`zip`]: trait.IndexedParallelIterator.html#method.zip into_par_iter(self) -> Self::Iter233 fn into_par_iter(self) -> Self::Iter; 234 } 235 236 /// `IntoParallelRefIterator` implements the conversion to a 237 /// [`ParallelIterator`], providing shared references to the data. 238 /// 239 /// This is a parallel version of the `iter()` method 240 /// defined by various collections. 241 /// 242 /// This trait is automatically implemented 243 /// `for I where &I: IntoParallelIterator`. In most cases, users 244 /// will want to implement [`IntoParallelIterator`] rather than implement 245 /// this trait directly. 246 /// 247 /// [`ParallelIterator`]: trait.ParallelIterator.html 248 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 249 pub trait IntoParallelRefIterator<'data> { 250 /// The type of the parallel iterator that will be returned. 251 type Iter: ParallelIterator<Item = Self::Item>; 252 253 /// The type of item that the parallel iterator will produce. 254 /// This will typically be an `&'data T` reference type. 255 type Item: Send + 'data; 256 257 /// Converts `self` into a parallel iterator. 258 /// 259 /// # Examples 260 /// 261 /// ``` 262 /// use rayon::prelude::*; 263 /// 264 /// let v: Vec<_> = (0..100).collect(); 265 /// assert_eq!(v.par_iter().sum::<i32>(), 100 * 99 / 2); 266 /// 267 /// // `v.par_iter()` is shorthand for `(&v).into_par_iter()`, 268 /// // producing the exact same references. 269 /// assert!(v.par_iter().zip(&v) 270 /// .all(|(a, b)| std::ptr::eq(a, b))); 271 /// ``` par_iter(&'data self) -> Self::Iter272 fn par_iter(&'data self) -> Self::Iter; 273 } 274 275 impl<'data, I: 'data + ?Sized> IntoParallelRefIterator<'data> for I 276 where 277 &'data I: IntoParallelIterator, 278 { 279 type Iter = <&'data I as IntoParallelIterator>::Iter; 280 type Item = <&'data I as IntoParallelIterator>::Item; 281 par_iter(&'data self) -> Self::Iter282 fn par_iter(&'data self) -> Self::Iter { 283 self.into_par_iter() 284 } 285 } 286 287 /// `IntoParallelRefMutIterator` implements the conversion to a 288 /// [`ParallelIterator`], providing mutable references to the data. 289 /// 290 /// This is a parallel version of the `iter_mut()` method 291 /// defined by various collections. 292 /// 293 /// This trait is automatically implemented 294 /// `for I where &mut I: IntoParallelIterator`. In most cases, users 295 /// will want to implement [`IntoParallelIterator`] rather than implement 296 /// this trait directly. 297 /// 298 /// [`ParallelIterator`]: trait.ParallelIterator.html 299 /// [`IntoParallelIterator`]: trait.IntoParallelIterator.html 300 pub trait IntoParallelRefMutIterator<'data> { 301 /// The type of iterator that will be created. 302 type Iter: ParallelIterator<Item = Self::Item>; 303 304 /// The type of item that will be produced; this is typically an 305 /// `&'data mut T` reference. 306 type Item: Send + 'data; 307 308 /// Creates the parallel iterator from `self`. 309 /// 310 /// # Examples 311 /// 312 /// ``` 313 /// use rayon::prelude::*; 314 /// 315 /// let mut v = vec![0usize; 5]; 316 /// v.par_iter_mut().enumerate().for_each(|(i, x)| *x = i); 317 /// assert_eq!(v, [0, 1, 2, 3, 4]); 318 /// ``` par_iter_mut(&'data mut self) -> Self::Iter319 fn par_iter_mut(&'data mut self) -> Self::Iter; 320 } 321 322 impl<'data, I: 'data + ?Sized> IntoParallelRefMutIterator<'data> for I 323 where 324 &'data mut I: IntoParallelIterator, 325 { 326 type Iter = <&'data mut I as IntoParallelIterator>::Iter; 327 type Item = <&'data mut I as IntoParallelIterator>::Item; 328 par_iter_mut(&'data mut self) -> Self::Iter329 fn par_iter_mut(&'data mut self) -> Self::Iter { 330 self.into_par_iter() 331 } 332 } 333 334 /// Parallel version of the standard iterator trait. 335 /// 336 /// The combinators on this trait are available on **all** parallel 337 /// iterators. Additional methods can be found on the 338 /// [`IndexedParallelIterator`] trait: those methods are only 339 /// available for parallel iterators where the number of items is 340 /// known in advance (so, e.g., after invoking `filter`, those methods 341 /// become unavailable). 342 /// 343 /// For examples of using parallel iterators, see [the docs on the 344 /// `iter` module][iter]. 345 /// 346 /// [iter]: index.html 347 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 348 pub trait ParallelIterator: Sized + Send { 349 /// The type of item that this parallel iterator produces. 350 /// For example, if you use the [`for_each`] method, this is the type of 351 /// item that your closure will be invoked with. 352 /// 353 /// [`for_each`]: #method.for_each 354 type Item: Send; 355 356 /// Executes `OP` on each item produced by the iterator, in parallel. 357 /// 358 /// # Examples 359 /// 360 /// ``` 361 /// use rayon::prelude::*; 362 /// 363 /// (0..100).into_par_iter().for_each(|x| println!("{:?}", x)); 364 /// ``` for_each<OP>(self, op: OP) where OP: Fn(Self::Item) + Sync + Send,365 fn for_each<OP>(self, op: OP) 366 where 367 OP: Fn(Self::Item) + Sync + Send, 368 { 369 for_each::for_each(self, &op) 370 } 371 372 /// Executes `OP` on the given `init` value with each item produced by 373 /// the iterator, in parallel. 374 /// 375 /// The `init` value will be cloned only as needed to be paired with 376 /// the group of items in each rayon job. It does not require the type 377 /// to be `Sync`. 378 /// 379 /// # Examples 380 /// 381 /// ``` 382 /// use std::sync::mpsc::channel; 383 /// use rayon::prelude::*; 384 /// 385 /// let (sender, receiver) = channel(); 386 /// 387 /// (0..5).into_par_iter().for_each_with(sender, |s, x| s.send(x).unwrap()); 388 /// 389 /// let mut res: Vec<_> = receiver.iter().collect(); 390 /// 391 /// res.sort(); 392 /// 393 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4]) 394 /// ``` for_each_with<OP, T>(self, init: T, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, T: Send + Clone,395 fn for_each_with<OP, T>(self, init: T, op: OP) 396 where 397 OP: Fn(&mut T, Self::Item) + Sync + Send, 398 T: Send + Clone, 399 { 400 self.map_with(init, op).collect() 401 } 402 403 /// Executes `OP` on a value returned by `init` with each item produced by 404 /// the iterator, in parallel. 405 /// 406 /// The `init` function will be called only as needed for a value to be 407 /// paired with the group of items in each rayon job. There is no 408 /// constraint on that returned type at all! 409 /// 410 /// # Examples 411 /// 412 /// ``` 413 /// use rand::Rng; 414 /// use rayon::prelude::*; 415 /// 416 /// let mut v = vec![0u8; 1_000_000]; 417 /// 418 /// v.par_chunks_mut(1000) 419 /// .for_each_init( 420 /// || rand::thread_rng(), 421 /// |rng, chunk| rng.fill(chunk), 422 /// ); 423 /// 424 /// // There's a remote chance that this will fail... 425 /// for i in 0u8..=255 { 426 /// assert!(v.contains(&i)); 427 /// } 428 /// ``` for_each_init<OP, INIT, T>(self, init: INIT, op: OP) where OP: Fn(&mut T, Self::Item) + Sync + Send, INIT: Fn() -> T + Sync + Send,429 fn for_each_init<OP, INIT, T>(self, init: INIT, op: OP) 430 where 431 OP: Fn(&mut T, Self::Item) + Sync + Send, 432 INIT: Fn() -> T + Sync + Send, 433 { 434 self.map_init(init, op).collect() 435 } 436 437 /// Executes a fallible `OP` on each item produced by the iterator, in parallel. 438 /// 439 /// If the `OP` returns `Result::Err` or `Option::None`, we will attempt to 440 /// stop processing the rest of the items in the iterator as soon as 441 /// possible, and we will return that terminating value. Otherwise, we will 442 /// return an empty `Result::Ok(())` or `Option::Some(())`. If there are 443 /// multiple errors in parallel, it is not specified which will be returned. 444 /// 445 /// # Examples 446 /// 447 /// ``` 448 /// use rayon::prelude::*; 449 /// use std::io::{self, Write}; 450 /// 451 /// // This will stop iteration early if there's any write error, like 452 /// // having piped output get closed on the other end. 453 /// (0..100).into_par_iter() 454 /// .try_for_each(|x| writeln!(io::stdout(), "{:?}", x)) 455 /// .expect("expected no write errors"); 456 /// ``` try_for_each<OP, R>(self, op: OP) -> R where OP: Fn(Self::Item) -> R + Sync + Send, R: Try<Ok = ()> + Send,457 fn try_for_each<OP, R>(self, op: OP) -> R 458 where 459 OP: Fn(Self::Item) -> R + Sync + Send, 460 R: Try<Ok = ()> + Send, 461 { 462 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 463 R::from_ok(()) 464 } 465 466 self.map(op).try_reduce(<()>::default, ok) 467 } 468 469 /// Executes a fallible `OP` on the given `init` value with each item 470 /// produced by the iterator, in parallel. 471 /// 472 /// This combines the `init` semantics of [`for_each_with()`] and the 473 /// failure semantics of [`try_for_each()`]. 474 /// 475 /// [`for_each_with()`]: #method.for_each_with 476 /// [`try_for_each()`]: #method.try_for_each 477 /// 478 /// # Examples 479 /// 480 /// ``` 481 /// use std::sync::mpsc::channel; 482 /// use rayon::prelude::*; 483 /// 484 /// let (sender, receiver) = channel(); 485 /// 486 /// (0..5).into_par_iter() 487 /// .try_for_each_with(sender, |s, x| s.send(x)) 488 /// .expect("expected no send errors"); 489 /// 490 /// let mut res: Vec<_> = receiver.iter().collect(); 491 /// 492 /// res.sort(); 493 /// 494 /// assert_eq!(&res[..], &[0, 1, 2, 3, 4]) 495 /// ``` try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Try<Ok = ()> + Send,496 fn try_for_each_with<OP, T, R>(self, init: T, op: OP) -> R 497 where 498 OP: Fn(&mut T, Self::Item) -> R + Sync + Send, 499 T: Send + Clone, 500 R: Try<Ok = ()> + Send, 501 { 502 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 503 R::from_ok(()) 504 } 505 506 self.map_with(init, op).try_reduce(<()>::default, ok) 507 } 508 509 /// Executes a fallible `OP` on a value returned by `init` with each item 510 /// produced by the iterator, in parallel. 511 /// 512 /// This combines the `init` semantics of [`for_each_init()`] and the 513 /// failure semantics of [`try_for_each()`]. 514 /// 515 /// [`for_each_init()`]: #method.for_each_init 516 /// [`try_for_each()`]: #method.try_for_each 517 /// 518 /// # Examples 519 /// 520 /// ``` 521 /// use rand::Rng; 522 /// use rayon::prelude::*; 523 /// 524 /// let mut v = vec![0u8; 1_000_000]; 525 /// 526 /// v.par_chunks_mut(1000) 527 /// .try_for_each_init( 528 /// || rand::thread_rng(), 529 /// |rng, chunk| rng.try_fill(chunk), 530 /// ) 531 /// .expect("expected no rand errors"); 532 /// 533 /// // There's a remote chance that this will fail... 534 /// for i in 0u8..=255 { 535 /// assert!(v.contains(&i)); 536 /// } 537 /// ``` try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R where OP: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Try<Ok = ()> + Send,538 fn try_for_each_init<OP, INIT, T, R>(self, init: INIT, op: OP) -> R 539 where 540 OP: Fn(&mut T, Self::Item) -> R + Sync + Send, 541 INIT: Fn() -> T + Sync + Send, 542 R: Try<Ok = ()> + Send, 543 { 544 fn ok<R: Try<Ok = ()>>(_: (), _: ()) -> R { 545 R::from_ok(()) 546 } 547 548 self.map_init(init, op).try_reduce(<()>::default, ok) 549 } 550 551 /// Counts the number of items in this parallel iterator. 552 /// 553 /// # Examples 554 /// 555 /// ``` 556 /// use rayon::prelude::*; 557 /// 558 /// let count = (0..100).into_par_iter().count(); 559 /// 560 /// assert_eq!(count, 100); 561 /// ``` count(self) -> usize562 fn count(self) -> usize { 563 fn one<T>(_: T) -> usize { 564 1 565 } 566 567 self.map(one).sum() 568 } 569 570 /// Applies `map_op` to each item of this iterator, producing a new 571 /// iterator with the results. 572 /// 573 /// # Examples 574 /// 575 /// ``` 576 /// use rayon::prelude::*; 577 /// 578 /// let mut par_iter = (0..5).into_par_iter().map(|x| x * 2); 579 /// 580 /// let doubles: Vec<_> = par_iter.collect(); 581 /// 582 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 583 /// ``` map<F, R>(self, map_op: F) -> Map<Self, F> where F: Fn(Self::Item) -> R + Sync + Send, R: Send,584 fn map<F, R>(self, map_op: F) -> Map<Self, F> 585 where 586 F: Fn(Self::Item) -> R + Sync + Send, 587 R: Send, 588 { 589 Map::new(self, map_op) 590 } 591 592 /// Applies `map_op` to the given `init` value with each item of this 593 /// iterator, producing a new iterator with the results. 594 /// 595 /// The `init` value will be cloned only as needed to be paired with 596 /// the group of items in each rayon job. It does not require the type 597 /// to be `Sync`. 598 /// 599 /// # Examples 600 /// 601 /// ``` 602 /// use std::sync::mpsc::channel; 603 /// use rayon::prelude::*; 604 /// 605 /// let (sender, receiver) = channel(); 606 /// 607 /// let a: Vec<_> = (0..5) 608 /// .into_par_iter() // iterating over i32 609 /// .map_with(sender, |s, x| { 610 /// s.send(x).unwrap(); // sending i32 values through the channel 611 /// x // returning i32 612 /// }) 613 /// .collect(); // collecting the returned values into a vector 614 /// 615 /// let mut b: Vec<_> = receiver.iter() // iterating over the values in the channel 616 /// .collect(); // and collecting them 617 /// b.sort(); 618 /// 619 /// assert_eq!(a, b); 620 /// ``` map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, T: Send + Clone, R: Send,621 fn map_with<F, T, R>(self, init: T, map_op: F) -> MapWith<Self, T, F> 622 where 623 F: Fn(&mut T, Self::Item) -> R + Sync + Send, 624 T: Send + Clone, 625 R: Send, 626 { 627 MapWith::new(self, init, map_op) 628 } 629 630 /// Applies `map_op` to a value returned by `init` with each item of this 631 /// iterator, producing a new iterator with the results. 632 /// 633 /// The `init` function will be called only as needed for a value to be 634 /// paired with the group of items in each rayon job. There is no 635 /// constraint on that returned type at all! 636 /// 637 /// # Examples 638 /// 639 /// ``` 640 /// use rand::Rng; 641 /// use rayon::prelude::*; 642 /// 643 /// let a: Vec<_> = (1i32..1_000_000) 644 /// .into_par_iter() 645 /// .map_init( 646 /// || rand::thread_rng(), // get the thread-local RNG 647 /// |rng, x| if rng.gen() { // randomly negate items 648 /// -x 649 /// } else { 650 /// x 651 /// }, 652 /// ).collect(); 653 /// 654 /// // There's a remote chance that this will fail... 655 /// assert!(a.iter().any(|&x| x < 0)); 656 /// assert!(a.iter().any(|&x| x > 0)); 657 /// ``` map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> where F: Fn(&mut T, Self::Item) -> R + Sync + Send, INIT: Fn() -> T + Sync + Send, R: Send,658 fn map_init<F, INIT, T, R>(self, init: INIT, map_op: F) -> MapInit<Self, INIT, F> 659 where 660 F: Fn(&mut T, Self::Item) -> R + Sync + Send, 661 INIT: Fn() -> T + Sync + Send, 662 R: Send, 663 { 664 MapInit::new(self, init, map_op) 665 } 666 667 /// Creates an iterator which clones all of its elements. This may be 668 /// useful when you have an iterator over `&T`, but you need `T`, and 669 /// that type implements `Clone`. See also [`copied()`]. 670 /// 671 /// [`copied()`]: #method.copied 672 /// 673 /// # Examples 674 /// 675 /// ``` 676 /// use rayon::prelude::*; 677 /// 678 /// let a = [1, 2, 3]; 679 /// 680 /// let v_cloned: Vec<_> = a.par_iter().cloned().collect(); 681 /// 682 /// // cloned is the same as .map(|&x| x), for integers 683 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect(); 684 /// 685 /// assert_eq!(v_cloned, vec![1, 2, 3]); 686 /// assert_eq!(v_map, vec![1, 2, 3]); 687 /// ``` cloned<'a, T>(self) -> Cloned<Self> where T: 'a + Clone + Send, Self: ParallelIterator<Item = &'a T>,688 fn cloned<'a, T>(self) -> Cloned<Self> 689 where 690 T: 'a + Clone + Send, 691 Self: ParallelIterator<Item = &'a T>, 692 { 693 Cloned::new(self) 694 } 695 696 /// Creates an iterator which copies all of its elements. This may be 697 /// useful when you have an iterator over `&T`, but you need `T`, and 698 /// that type implements `Copy`. See also [`cloned()`]. 699 /// 700 /// [`cloned()`]: #method.cloned 701 /// 702 /// # Examples 703 /// 704 /// ``` 705 /// use rayon::prelude::*; 706 /// 707 /// let a = [1, 2, 3]; 708 /// 709 /// let v_copied: Vec<_> = a.par_iter().copied().collect(); 710 /// 711 /// // copied is the same as .map(|&x| x), for integers 712 /// let v_map: Vec<_> = a.par_iter().map(|&x| x).collect(); 713 /// 714 /// assert_eq!(v_copied, vec![1, 2, 3]); 715 /// assert_eq!(v_map, vec![1, 2, 3]); 716 /// ``` copied<'a, T>(self) -> Copied<Self> where T: 'a + Copy + Send, Self: ParallelIterator<Item = &'a T>,717 fn copied<'a, T>(self) -> Copied<Self> 718 where 719 T: 'a + Copy + Send, 720 Self: ParallelIterator<Item = &'a T>, 721 { 722 Copied::new(self) 723 } 724 725 /// Applies `inspect_op` to a reference to each item of this iterator, 726 /// producing a new iterator passing through the original items. This is 727 /// often useful for debugging to see what's happening in iterator stages. 728 /// 729 /// # Examples 730 /// 731 /// ``` 732 /// use rayon::prelude::*; 733 /// 734 /// let a = [1, 4, 2, 3]; 735 /// 736 /// // this iterator sequence is complex. 737 /// let sum = a.par_iter() 738 /// .cloned() 739 /// .filter(|&x| x % 2 == 0) 740 /// .reduce(|| 0, |sum, i| sum + i); 741 /// 742 /// println!("{}", sum); 743 /// 744 /// // let's add some inspect() calls to investigate what's happening 745 /// let sum = a.par_iter() 746 /// .cloned() 747 /// .inspect(|x| println!("about to filter: {}", x)) 748 /// .filter(|&x| x % 2 == 0) 749 /// .inspect(|x| println!("made it through filter: {}", x)) 750 /// .reduce(|| 0, |sum, i| sum + i); 751 /// 752 /// println!("{}", sum); 753 /// ``` inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> where OP: Fn(&Self::Item) + Sync + Send,754 fn inspect<OP>(self, inspect_op: OP) -> Inspect<Self, OP> 755 where 756 OP: Fn(&Self::Item) + Sync + Send, 757 { 758 Inspect::new(self, inspect_op) 759 } 760 761 /// Mutates each item of this iterator before yielding it. 762 /// 763 /// # Examples 764 /// 765 /// ``` 766 /// use rayon::prelude::*; 767 /// 768 /// let par_iter = (0..5).into_par_iter().update(|x| {*x *= 2;}); 769 /// 770 /// let doubles: Vec<_> = par_iter.collect(); 771 /// 772 /// assert_eq!(&doubles[..], &[0, 2, 4, 6, 8]); 773 /// ``` update<F>(self, update_op: F) -> Update<Self, F> where F: Fn(&mut Self::Item) + Sync + Send,774 fn update<F>(self, update_op: F) -> Update<Self, F> 775 where 776 F: Fn(&mut Self::Item) + Sync + Send, 777 { 778 Update::new(self, update_op) 779 } 780 781 /// Applies `filter_op` to each item of this iterator, producing a new 782 /// iterator with only the items that gave `true` results. 783 /// 784 /// # Examples 785 /// 786 /// ``` 787 /// use rayon::prelude::*; 788 /// 789 /// let mut par_iter = (0..10).into_par_iter().filter(|x| x % 2 == 0); 790 /// 791 /// let even_numbers: Vec<_> = par_iter.collect(); 792 /// 793 /// assert_eq!(&even_numbers[..], &[0, 2, 4, 6, 8]); 794 /// ``` filter<P>(self, filter_op: P) -> Filter<Self, P> where P: Fn(&Self::Item) -> bool + Sync + Send,795 fn filter<P>(self, filter_op: P) -> Filter<Self, P> 796 where 797 P: Fn(&Self::Item) -> bool + Sync + Send, 798 { 799 Filter::new(self, filter_op) 800 } 801 802 /// Applies `filter_op` to each item of this iterator to get an `Option`, 803 /// producing a new iterator with only the items from `Some` results. 804 /// 805 /// # Examples 806 /// 807 /// ``` 808 /// use rayon::prelude::*; 809 /// 810 /// let mut par_iter = (0..10).into_par_iter() 811 /// .filter_map(|x| { 812 /// if x % 2 == 0 { Some(x * 3) } 813 /// else { None } 814 /// }); 815 /// 816 /// let even_numbers: Vec<_> = par_iter.collect(); 817 /// 818 /// assert_eq!(&even_numbers[..], &[0, 6, 12, 18, 24]); 819 /// ``` filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,820 fn filter_map<P, R>(self, filter_op: P) -> FilterMap<Self, P> 821 where 822 P: Fn(Self::Item) -> Option<R> + Sync + Send, 823 R: Send, 824 { 825 FilterMap::new(self, filter_op) 826 } 827 828 /// Applies `map_op` to each item of this iterator to get nested parallel iterators, 829 /// producing a new parallel iterator that flattens these back into one. 830 /// 831 /// See also [`flat_map_iter`](#method.flat_map_iter). 832 /// 833 /// # Examples 834 /// 835 /// ``` 836 /// use rayon::prelude::*; 837 /// 838 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]]; 839 /// 840 /// let par_iter = a.par_iter().cloned().flat_map(|a| a.to_vec()); 841 /// 842 /// let vec: Vec<_> = par_iter.collect(); 843 /// 844 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]); 845 /// ``` flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> where F: Fn(Self::Item) -> PI + Sync + Send, PI: IntoParallelIterator,846 fn flat_map<F, PI>(self, map_op: F) -> FlatMap<Self, F> 847 where 848 F: Fn(Self::Item) -> PI + Sync + Send, 849 PI: IntoParallelIterator, 850 { 851 FlatMap::new(self, map_op) 852 } 853 854 /// Applies `map_op` to each item of this iterator to get nested serial iterators, 855 /// producing a new parallel iterator that flattens these back into one. 856 /// 857 /// # `flat_map_iter` versus `flat_map` 858 /// 859 /// These two methods are similar but behave slightly differently. With [`flat_map`], 860 /// each of the nested iterators must be a parallel iterator, and they will be further 861 /// split up with nested parallelism. With `flat_map_iter`, each nested iterator is a 862 /// sequential `Iterator`, and we only parallelize _between_ them, while the items 863 /// produced by each nested iterator are processed sequentially. 864 /// 865 /// When choosing between these methods, consider whether nested parallelism suits the 866 /// potential iterators at hand. If there's little computation involved, or its length 867 /// is much less than the outer parallel iterator, then it may perform better to avoid 868 /// the overhead of parallelism, just flattening sequentially with `flat_map_iter`. 869 /// If there is a lot of computation, potentially outweighing the outer parallel 870 /// iterator, then the nested parallelism of `flat_map` may be worthwhile. 871 /// 872 /// [`flat_map`]: #method.flat_map 873 /// 874 /// # Examples 875 /// 876 /// ``` 877 /// use rayon::prelude::*; 878 /// use std::cell::RefCell; 879 /// 880 /// let a = [[1, 2], [3, 4], [5, 6], [7, 8]]; 881 /// 882 /// let par_iter = a.par_iter().flat_map_iter(|a| { 883 /// // The serial iterator doesn't have to be thread-safe, just its items. 884 /// let cell_iter = RefCell::new(a.iter().cloned()); 885 /// std::iter::from_fn(move || cell_iter.borrow_mut().next()) 886 /// }); 887 /// 888 /// let vec: Vec<_> = par_iter.collect(); 889 /// 890 /// assert_eq!(&vec[..], &[1, 2, 3, 4, 5, 6, 7, 8]); 891 /// ``` flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F> where F: Fn(Self::Item) -> SI + Sync + Send, SI: IntoIterator, SI::Item: Send,892 fn flat_map_iter<F, SI>(self, map_op: F) -> FlatMapIter<Self, F> 893 where 894 F: Fn(Self::Item) -> SI + Sync + Send, 895 SI: IntoIterator, 896 SI::Item: Send, 897 { 898 FlatMapIter::new(self, map_op) 899 } 900 901 /// An adaptor that flattens parallel-iterable `Item`s into one large iterator. 902 /// 903 /// See also [`flatten_iter`](#method.flatten_iter). 904 /// 905 /// # Examples 906 /// 907 /// ``` 908 /// use rayon::prelude::*; 909 /// 910 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]]; 911 /// let y: Vec<_> = x.into_par_iter().flatten().collect(); 912 /// 913 /// assert_eq!(y, vec![1, 2, 3, 4]); 914 /// ``` flatten(self) -> Flatten<Self> where Self::Item: IntoParallelIterator,915 fn flatten(self) -> Flatten<Self> 916 where 917 Self::Item: IntoParallelIterator, 918 { 919 Flatten::new(self) 920 } 921 922 /// An adaptor that flattens serial-iterable `Item`s into one large iterator. 923 /// 924 /// See also [`flatten`](#method.flatten) and the analagous comparison of 925 /// [`flat_map_iter` versus `flat_map`](#flat_map_iter-versus-flat_map). 926 /// 927 /// # Examples 928 /// 929 /// ``` 930 /// use rayon::prelude::*; 931 /// 932 /// let x: Vec<Vec<_>> = vec![vec![1, 2], vec![3, 4]]; 933 /// let iters: Vec<_> = x.into_iter().map(Vec::into_iter).collect(); 934 /// let y: Vec<_> = iters.into_par_iter().flatten_iter().collect(); 935 /// 936 /// assert_eq!(y, vec![1, 2, 3, 4]); 937 /// ``` flatten_iter(self) -> FlattenIter<Self> where Self::Item: IntoIterator, <Self::Item as IntoIterator>::Item: Send,938 fn flatten_iter(self) -> FlattenIter<Self> 939 where 940 Self::Item: IntoIterator, 941 <Self::Item as IntoIterator>::Item: Send, 942 { 943 FlattenIter::new(self) 944 } 945 946 /// Reduces the items in the iterator into one item using `op`. 947 /// The argument `identity` should be a closure that can produce 948 /// "identity" value which may be inserted into the sequence as 949 /// needed to create opportunities for parallel execution. So, for 950 /// example, if you are doing a summation, then `identity()` ought 951 /// to produce something that represents the zero for your type 952 /// (but consider just calling `sum()` in that case). 953 /// 954 /// # Examples 955 /// 956 /// ``` 957 /// // Iterate over a sequence of pairs `(x0, y0), ..., (xN, yN)` 958 /// // and use reduce to compute one pair `(x0 + ... + xN, y0 + ... + yN)` 959 /// // where the first/second elements are summed separately. 960 /// use rayon::prelude::*; 961 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 962 /// .par_iter() // iterating over &(i32, i32) 963 /// .cloned() // iterating over (i32, i32) 964 /// .reduce(|| (0, 0), // the "identity" is 0 in both columns 965 /// |a, b| (a.0 + b.0, a.1 + b.1)); 966 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 967 /// ``` 968 /// 969 /// **Note:** unlike a sequential `fold` operation, the order in 970 /// which `op` will be applied to reduce the result is not fully 971 /// specified. So `op` should be [associative] or else the results 972 /// will be non-deterministic. And of course `identity()` should 973 /// produce a true identity. 974 /// 975 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, ID: Fn() -> Self::Item + Sync + Send,976 fn reduce<OP, ID>(self, identity: ID, op: OP) -> Self::Item 977 where 978 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, 979 ID: Fn() -> Self::Item + Sync + Send, 980 { 981 reduce::reduce(self, identity, op) 982 } 983 984 /// Reduces the items in the iterator into one item using `op`. 985 /// If the iterator is empty, `None` is returned; otherwise, 986 /// `Some` is returned. 987 /// 988 /// This version of `reduce` is simple but somewhat less 989 /// efficient. If possible, it is better to call `reduce()`, which 990 /// requires an identity element. 991 /// 992 /// # Examples 993 /// 994 /// ``` 995 /// use rayon::prelude::*; 996 /// let sums = [(0, 1), (5, 6), (16, 2), (8, 9)] 997 /// .par_iter() // iterating over &(i32, i32) 998 /// .cloned() // iterating over (i32, i32) 999 /// .reduce_with(|a, b| (a.0 + b.0, a.1 + b.1)) 1000 /// .unwrap(); 1001 /// assert_eq!(sums, (0 + 5 + 16 + 8, 1 + 6 + 2 + 9)); 1002 /// ``` 1003 /// 1004 /// **Note:** unlike a sequential `fold` operation, the order in 1005 /// which `op` will be applied to reduce the result is not fully 1006 /// specified. So `op` should be [associative] or else the results 1007 /// will be non-deterministic. 1008 /// 1009 /// [associative]: https://en.wikipedia.org/wiki/Associative_property reduce_with<OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send,1010 fn reduce_with<OP>(self, op: OP) -> Option<Self::Item> 1011 where 1012 OP: Fn(Self::Item, Self::Item) -> Self::Item + Sync + Send, 1013 { 1014 fn opt_fold<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, T) -> Option<T> { 1015 move |opt_a, b| match opt_a { 1016 Some(a) => Some(op(a, b)), 1017 None => Some(b), 1018 } 1019 } 1020 1021 fn opt_reduce<T>(op: impl Fn(T, T) -> T) -> impl Fn(Option<T>, Option<T>) -> Option<T> { 1022 move |opt_a, opt_b| match (opt_a, opt_b) { 1023 (Some(a), Some(b)) => Some(op(a, b)), 1024 (Some(v), None) | (None, Some(v)) => Some(v), 1025 (None, None) => None, 1026 } 1027 } 1028 1029 self.fold(<_>::default, opt_fold(&op)) 1030 .reduce(<_>::default, opt_reduce(&op)) 1031 } 1032 1033 /// Reduces the items in the iterator into one item using a fallible `op`. 1034 /// The `identity` argument is used the same way as in [`reduce()`]. 1035 /// 1036 /// [`reduce()`]: #method.reduce 1037 /// 1038 /// If a `Result::Err` or `Option::None` item is found, or if `op` reduces 1039 /// to one, we will attempt to stop processing the rest of the items in the 1040 /// iterator as soon as possible, and we will return that terminating value. 1041 /// Otherwise, we will return the final reduced `Result::Ok(T)` or 1042 /// `Option::Some(T)`. If there are multiple errors in parallel, it is not 1043 /// specified which will be returned. 1044 /// 1045 /// # Examples 1046 /// 1047 /// ``` 1048 /// use rayon::prelude::*; 1049 /// 1050 /// // Compute the sum of squares, being careful about overflow. 1051 /// fn sum_squares<I: IntoParallelIterator<Item = i32>>(iter: I) -> Option<i32> { 1052 /// iter.into_par_iter() 1053 /// .map(|i| i.checked_mul(i)) // square each item, 1054 /// .try_reduce(|| 0, i32::checked_add) // and add them up! 1055 /// } 1056 /// assert_eq!(sum_squares(0..5), Some(0 + 1 + 4 + 9 + 16)); 1057 /// 1058 /// // The sum might overflow 1059 /// assert_eq!(sum_squares(0..10_000), None); 1060 /// 1061 /// // Or the squares might overflow before it even reaches `try_reduce` 1062 /// assert_eq!(sum_squares(1_000_000..1_000_001), None); 1063 /// ``` try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item where OP: Fn(T, T) -> Self::Item + Sync + Send, ID: Fn() -> T + Sync + Send, Self::Item: Try<Ok = T>,1064 fn try_reduce<T, OP, ID>(self, identity: ID, op: OP) -> Self::Item 1065 where 1066 OP: Fn(T, T) -> Self::Item + Sync + Send, 1067 ID: Fn() -> T + Sync + Send, 1068 Self::Item: Try<Ok = T>, 1069 { 1070 try_reduce::try_reduce(self, identity, op) 1071 } 1072 1073 /// Reduces the items in the iterator into one item using a fallible `op`. 1074 /// 1075 /// Like [`reduce_with()`], if the iterator is empty, `None` is returned; 1076 /// otherwise, `Some` is returned. Beyond that, it behaves like 1077 /// [`try_reduce()`] for handling `Err`/`None`. 1078 /// 1079 /// [`reduce_with()`]: #method.reduce_with 1080 /// [`try_reduce()`]: #method.try_reduce 1081 /// 1082 /// For instance, with `Option` items, the return value may be: 1083 /// - `None`, the iterator was empty 1084 /// - `Some(None)`, we stopped after encountering `None`. 1085 /// - `Some(Some(x))`, the entire iterator reduced to `x`. 1086 /// 1087 /// With `Result` items, the nesting is more obvious: 1088 /// - `None`, the iterator was empty 1089 /// - `Some(Err(e))`, we stopped after encountering an error `e`. 1090 /// - `Some(Ok(x))`, the entire iterator reduced to `x`. 1091 /// 1092 /// # Examples 1093 /// 1094 /// ``` 1095 /// use rayon::prelude::*; 1096 /// 1097 /// let files = ["/dev/null", "/does/not/exist"]; 1098 /// 1099 /// // Find the biggest file 1100 /// files.into_par_iter() 1101 /// .map(|path| std::fs::metadata(path).map(|m| (path, m.len()))) 1102 /// .try_reduce_with(|a, b| { 1103 /// Ok(if a.1 >= b.1 { a } else { b }) 1104 /// }) 1105 /// .expect("Some value, since the iterator is not empty") 1106 /// .expect_err("not found"); 1107 /// ``` try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> where OP: Fn(T, T) -> Self::Item + Sync + Send, Self::Item: Try<Ok = T>,1108 fn try_reduce_with<T, OP>(self, op: OP) -> Option<Self::Item> 1109 where 1110 OP: Fn(T, T) -> Self::Item + Sync + Send, 1111 Self::Item: Try<Ok = T>, 1112 { 1113 try_reduce_with::try_reduce_with(self, op) 1114 } 1115 1116 /// Parallel fold is similar to sequential fold except that the 1117 /// sequence of items may be subdivided before it is 1118 /// folded. Consider a list of numbers like `22 3 77 89 46`. If 1119 /// you used sequential fold to add them (`fold(0, |a,b| a+b)`, 1120 /// you would wind up first adding 0 + 22, then 22 + 3, then 25 + 1121 /// 77, and so forth. The **parallel fold** works similarly except 1122 /// that it first breaks up your list into sublists, and hence 1123 /// instead of yielding up a single sum at the end, it yields up 1124 /// multiple sums. The number of results is nondeterministic, as 1125 /// is the point where the breaks occur. 1126 /// 1127 /// So if did the same parallel fold (`fold(0, |a,b| a+b)`) on 1128 /// our example list, we might wind up with a sequence of two numbers, 1129 /// like so: 1130 /// 1131 /// ```notrust 1132 /// 22 3 77 89 46 1133 /// | | 1134 /// 102 135 1135 /// ``` 1136 /// 1137 /// Or perhaps these three numbers: 1138 /// 1139 /// ```notrust 1140 /// 22 3 77 89 46 1141 /// | | | 1142 /// 102 89 46 1143 /// ``` 1144 /// 1145 /// In general, Rayon will attempt to find good breaking points 1146 /// that keep all of your cores busy. 1147 /// 1148 /// ### Fold versus reduce 1149 /// 1150 /// The `fold()` and `reduce()` methods each take an identity element 1151 /// and a combining function, but they operate rather differently. 1152 /// 1153 /// `reduce()` requires that the identity function has the same 1154 /// type as the things you are iterating over, and it fully 1155 /// reduces the list of items into a single item. So, for example, 1156 /// imagine we are iterating over a list of bytes `bytes: [128_u8, 1157 /// 64_u8, 64_u8]`. If we used `bytes.reduce(|| 0_u8, |a: u8, b: 1158 /// u8| a + b)`, we would get an overflow. This is because `0`, 1159 /// `a`, and `b` here are all bytes, just like the numbers in the 1160 /// list (I wrote the types explicitly above, but those are the 1161 /// only types you can use). To avoid the overflow, we would need 1162 /// to do something like `bytes.map(|b| b as u32).reduce(|| 0, |a, 1163 /// b| a + b)`, in which case our result would be `256`. 1164 /// 1165 /// In contrast, with `fold()`, the identity function does not 1166 /// have to have the same type as the things you are iterating 1167 /// over, and you potentially get back many results. So, if we 1168 /// continue with the `bytes` example from the previous paragraph, 1169 /// we could do `bytes.fold(|| 0_u32, |a, b| a + (b as u32))` to 1170 /// convert our bytes into `u32`. And of course we might not get 1171 /// back a single sum. 1172 /// 1173 /// There is a more subtle distinction as well, though it's 1174 /// actually implied by the above points. When you use `reduce()`, 1175 /// your reduction function is sometimes called with values that 1176 /// were never part of your original parallel iterator (for 1177 /// example, both the left and right might be a partial sum). With 1178 /// `fold()`, in contrast, the left value in the fold function is 1179 /// always the accumulator, and the right value is always from 1180 /// your original sequence. 1181 /// 1182 /// ### Fold vs Map/Reduce 1183 /// 1184 /// Fold makes sense if you have some operation where it is 1185 /// cheaper to create groups of elements at a time. For example, 1186 /// imagine collecting characters into a string. If you were going 1187 /// to use map/reduce, you might try this: 1188 /// 1189 /// ``` 1190 /// use rayon::prelude::*; 1191 /// 1192 /// let s = 1193 /// ['a', 'b', 'c', 'd', 'e'] 1194 /// .par_iter() 1195 /// .map(|c: &char| format!("{}", c)) 1196 /// .reduce(|| String::new(), 1197 /// |mut a: String, b: String| { a.push_str(&b); a }); 1198 /// 1199 /// assert_eq!(s, "abcde"); 1200 /// ``` 1201 /// 1202 /// Because reduce produces the same type of element as its input, 1203 /// you have to first map each character into a string, and then 1204 /// you can reduce them. This means we create one string per 1205 /// element in our iterator -- not so great. Using `fold`, we can 1206 /// do this instead: 1207 /// 1208 /// ``` 1209 /// use rayon::prelude::*; 1210 /// 1211 /// let s = 1212 /// ['a', 'b', 'c', 'd', 'e'] 1213 /// .par_iter() 1214 /// .fold(|| String::new(), 1215 /// |mut s: String, c: &char| { s.push(*c); s }) 1216 /// .reduce(|| String::new(), 1217 /// |mut a: String, b: String| { a.push_str(&b); a }); 1218 /// 1219 /// assert_eq!(s, "abcde"); 1220 /// ``` 1221 /// 1222 /// Now `fold` will process groups of our characters at a time, 1223 /// and we only make one string per group. We should wind up with 1224 /// some small-ish number of strings roughly proportional to the 1225 /// number of CPUs you have (it will ultimately depend on how busy 1226 /// your processors are). Note that we still need to do a reduce 1227 /// afterwards to combine those groups of strings into a single 1228 /// string. 1229 /// 1230 /// You could use a similar trick to save partial results (e.g., a 1231 /// cache) or something similar. 1232 /// 1233 /// ### Combining fold with other operations 1234 /// 1235 /// You can combine `fold` with `reduce` if you want to produce a 1236 /// single value. This is then roughly equivalent to a map/reduce 1237 /// combination in effect: 1238 /// 1239 /// ``` 1240 /// use rayon::prelude::*; 1241 /// 1242 /// let bytes = 0..22_u8; 1243 /// let sum = bytes.into_par_iter() 1244 /// .fold(|| 0_u32, |a: u32, b: u8| a + (b as u32)) 1245 /// .sum::<u32>(); 1246 /// 1247 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 1248 /// ``` fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> where F: Fn(T, Self::Item) -> T + Sync + Send, ID: Fn() -> T + Sync + Send, T: Send,1249 fn fold<T, ID, F>(self, identity: ID, fold_op: F) -> Fold<Self, ID, F> 1250 where 1251 F: Fn(T, Self::Item) -> T + Sync + Send, 1252 ID: Fn() -> T + Sync + Send, 1253 T: Send, 1254 { 1255 Fold::new(self, identity, fold_op) 1256 } 1257 1258 /// Applies `fold_op` to the given `init` value with each item of this 1259 /// iterator, finally producing the value for further use. 1260 /// 1261 /// This works essentially like `fold(|| init.clone(), fold_op)`, except 1262 /// it doesn't require the `init` type to be `Sync`, nor any other form 1263 /// of added synchronization. 1264 /// 1265 /// # Examples 1266 /// 1267 /// ``` 1268 /// use rayon::prelude::*; 1269 /// 1270 /// let bytes = 0..22_u8; 1271 /// let sum = bytes.into_par_iter() 1272 /// .fold_with(0_u32, |a: u32, b: u8| a + (b as u32)) 1273 /// .sum::<u32>(); 1274 /// 1275 /// assert_eq!(sum, (0..22).sum()); // compare to sequential 1276 /// ``` fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> where F: Fn(T, Self::Item) -> T + Sync + Send, T: Send + Clone,1277 fn fold_with<F, T>(self, init: T, fold_op: F) -> FoldWith<Self, T, F> 1278 where 1279 F: Fn(T, Self::Item) -> T + Sync + Send, 1280 T: Send + Clone, 1281 { 1282 FoldWith::new(self, init, fold_op) 1283 } 1284 1285 /// Performs a fallible parallel fold. 1286 /// 1287 /// This is a variation of [`fold()`] for operations which can fail with 1288 /// `Option::None` or `Result::Err`. The first such failure stops 1289 /// processing the local set of items, without affecting other folds in the 1290 /// iterator's subdivisions. 1291 /// 1292 /// Often, `try_fold()` will be followed by [`try_reduce()`] 1293 /// for a final reduction and global short-circuiting effect. 1294 /// 1295 /// [`fold()`]: #method.fold 1296 /// [`try_reduce()`]: #method.try_reduce 1297 /// 1298 /// # Examples 1299 /// 1300 /// ``` 1301 /// use rayon::prelude::*; 1302 /// 1303 /// let bytes = 0..22_u8; 1304 /// let sum = bytes.into_par_iter() 1305 /// .try_fold(|| 0_u32, |a: u32, b: u8| a.checked_add(b as u32)) 1306 /// .try_reduce(|| 0, u32::checked_add); 1307 /// 1308 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential 1309 /// ``` try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> where F: Fn(T, Self::Item) -> R + Sync + Send, ID: Fn() -> T + Sync + Send, R: Try<Ok = T> + Send,1310 fn try_fold<T, R, ID, F>(self, identity: ID, fold_op: F) -> TryFold<Self, R, ID, F> 1311 where 1312 F: Fn(T, Self::Item) -> R + Sync + Send, 1313 ID: Fn() -> T + Sync + Send, 1314 R: Try<Ok = T> + Send, 1315 { 1316 TryFold::new(self, identity, fold_op) 1317 } 1318 1319 /// Performs a fallible parallel fold with a cloneable `init` value. 1320 /// 1321 /// This combines the `init` semantics of [`fold_with()`] and the failure 1322 /// semantics of [`try_fold()`]. 1323 /// 1324 /// [`fold_with()`]: #method.fold_with 1325 /// [`try_fold()`]: #method.try_fold 1326 /// 1327 /// ``` 1328 /// use rayon::prelude::*; 1329 /// 1330 /// let bytes = 0..22_u8; 1331 /// let sum = bytes.into_par_iter() 1332 /// .try_fold_with(0_u32, |a: u32, b: u8| a.checked_add(b as u32)) 1333 /// .try_reduce(|| 0, u32::checked_add); 1334 /// 1335 /// assert_eq!(sum, Some((0..22).sum())); // compare to sequential 1336 /// ``` try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> where F: Fn(T, Self::Item) -> R + Sync + Send, R: Try<Ok = T> + Send, T: Clone + Send,1337 fn try_fold_with<F, T, R>(self, init: T, fold_op: F) -> TryFoldWith<Self, R, F> 1338 where 1339 F: Fn(T, Self::Item) -> R + Sync + Send, 1340 R: Try<Ok = T> + Send, 1341 T: Clone + Send, 1342 { 1343 TryFoldWith::new(self, init, fold_op) 1344 } 1345 1346 /// Sums up the items in the iterator. 1347 /// 1348 /// Note that the order in items will be reduced is not specified, 1349 /// so if the `+` operator is not truly [associative] \(as is the 1350 /// case for floating point numbers), then the results are not 1351 /// fully deterministic. 1352 /// 1353 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 1354 /// 1355 /// Basically equivalent to `self.reduce(|| 0, |a, b| a + b)`, 1356 /// except that the type of `0` and the `+` operation may vary 1357 /// depending on the type of value being produced. 1358 /// 1359 /// # Examples 1360 /// 1361 /// ``` 1362 /// use rayon::prelude::*; 1363 /// 1364 /// let a = [1, 5, 7]; 1365 /// 1366 /// let sum: i32 = a.par_iter().sum(); 1367 /// 1368 /// assert_eq!(sum, 13); 1369 /// ``` sum<S>(self) -> S where S: Send + Sum<Self::Item> + Sum<S>,1370 fn sum<S>(self) -> S 1371 where 1372 S: Send + Sum<Self::Item> + Sum<S>, 1373 { 1374 sum::sum(self) 1375 } 1376 1377 /// Multiplies all the items in the iterator. 1378 /// 1379 /// Note that the order in items will be reduced is not specified, 1380 /// so if the `*` operator is not truly [associative] \(as is the 1381 /// case for floating point numbers), then the results are not 1382 /// fully deterministic. 1383 /// 1384 /// [associative]: https://en.wikipedia.org/wiki/Associative_property 1385 /// 1386 /// Basically equivalent to `self.reduce(|| 1, |a, b| a * b)`, 1387 /// except that the type of `1` and the `*` operation may vary 1388 /// depending on the type of value being produced. 1389 /// 1390 /// # Examples 1391 /// 1392 /// ``` 1393 /// use rayon::prelude::*; 1394 /// 1395 /// fn factorial(n: u32) -> u32 { 1396 /// (1..n+1).into_par_iter().product() 1397 /// } 1398 /// 1399 /// assert_eq!(factorial(0), 1); 1400 /// assert_eq!(factorial(1), 1); 1401 /// assert_eq!(factorial(5), 120); 1402 /// ``` product<P>(self) -> P where P: Send + Product<Self::Item> + Product<P>,1403 fn product<P>(self) -> P 1404 where 1405 P: Send + Product<Self::Item> + Product<P>, 1406 { 1407 product::product(self) 1408 } 1409 1410 /// Computes the minimum of all the items in the iterator. If the 1411 /// iterator is empty, `None` is returned; otherwise, `Some(min)` 1412 /// is returned. 1413 /// 1414 /// Note that the order in which the items will be reduced is not 1415 /// specified, so if the `Ord` impl is not truly associative, then 1416 /// the results are not deterministic. 1417 /// 1418 /// Basically equivalent to `self.reduce_with(|a, b| cmp::min(a, b))`. 1419 /// 1420 /// # Examples 1421 /// 1422 /// ``` 1423 /// use rayon::prelude::*; 1424 /// 1425 /// let a = [45, 74, 32]; 1426 /// 1427 /// assert_eq!(a.par_iter().min(), Some(&32)); 1428 /// 1429 /// let b: [i32; 0] = []; 1430 /// 1431 /// assert_eq!(b.par_iter().min(), None); 1432 /// ``` min(self) -> Option<Self::Item> where Self::Item: Ord,1433 fn min(self) -> Option<Self::Item> 1434 where 1435 Self::Item: Ord, 1436 { 1437 self.reduce_with(cmp::min) 1438 } 1439 1440 /// Computes the minimum of all the items in the iterator with respect to 1441 /// the given comparison function. If the iterator is empty, `None` is 1442 /// returned; otherwise, `Some(min)` is returned. 1443 /// 1444 /// Note that the order in which the items will be reduced is not 1445 /// specified, so if the comparison function is not associative, then 1446 /// the results are not deterministic. 1447 /// 1448 /// # Examples 1449 /// 1450 /// ``` 1451 /// use rayon::prelude::*; 1452 /// 1453 /// let a = [-3_i32, 77, 53, 240, -1]; 1454 /// 1455 /// assert_eq!(a.par_iter().min_by(|x, y| x.cmp(y)), Some(&-3)); 1456 /// ``` min_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1457 fn min_by<F>(self, f: F) -> Option<Self::Item> 1458 where 1459 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering, 1460 { 1461 fn min<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T { 1462 move |a, b| match f(&a, &b) { 1463 Ordering::Greater => b, 1464 _ => a, 1465 } 1466 } 1467 1468 self.reduce_with(min(f)) 1469 } 1470 1471 /// Computes the item that yields the minimum value for the given 1472 /// function. If the iterator is empty, `None` is returned; 1473 /// otherwise, `Some(item)` is returned. 1474 /// 1475 /// Note that the order in which the items will be reduced is not 1476 /// specified, so if the `Ord` impl is not truly associative, then 1477 /// the results are not deterministic. 1478 /// 1479 /// # Examples 1480 /// 1481 /// ``` 1482 /// use rayon::prelude::*; 1483 /// 1484 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 1485 /// 1486 /// assert_eq!(a.par_iter().min_by_key(|x| x.abs()), Some(&2)); 1487 /// ``` min_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1488 fn min_by_key<K, F>(self, f: F) -> Option<Self::Item> 1489 where 1490 K: Ord + Send, 1491 F: Sync + Send + Fn(&Self::Item) -> K, 1492 { 1493 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) { 1494 move |x| (f(&x), x) 1495 } 1496 1497 fn min_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) { 1498 match (a.0).cmp(&b.0) { 1499 Ordering::Greater => b, 1500 _ => a, 1501 } 1502 } 1503 1504 let (_, x) = self.map(key(f)).reduce_with(min_key)?; 1505 Some(x) 1506 } 1507 1508 /// Computes the maximum of all the items in the iterator. If the 1509 /// iterator is empty, `None` is returned; otherwise, `Some(max)` 1510 /// is returned. 1511 /// 1512 /// Note that the order in which the items will be reduced is not 1513 /// specified, so if the `Ord` impl is not truly associative, then 1514 /// the results are not deterministic. 1515 /// 1516 /// Basically equivalent to `self.reduce_with(|a, b| cmp::max(a, b))`. 1517 /// 1518 /// # Examples 1519 /// 1520 /// ``` 1521 /// use rayon::prelude::*; 1522 /// 1523 /// let a = [45, 74, 32]; 1524 /// 1525 /// assert_eq!(a.par_iter().max(), Some(&74)); 1526 /// 1527 /// let b: [i32; 0] = []; 1528 /// 1529 /// assert_eq!(b.par_iter().max(), None); 1530 /// ``` max(self) -> Option<Self::Item> where Self::Item: Ord,1531 fn max(self) -> Option<Self::Item> 1532 where 1533 Self::Item: Ord, 1534 { 1535 self.reduce_with(cmp::max) 1536 } 1537 1538 /// Computes the maximum of all the items in the iterator with respect to 1539 /// the given comparison function. If the iterator is empty, `None` is 1540 /// returned; otherwise, `Some(min)` is returned. 1541 /// 1542 /// Note that the order in which the items will be reduced is not 1543 /// specified, so if the comparison function is not associative, then 1544 /// the results are not deterministic. 1545 /// 1546 /// # Examples 1547 /// 1548 /// ``` 1549 /// use rayon::prelude::*; 1550 /// 1551 /// let a = [-3_i32, 77, 53, 240, -1]; 1552 /// 1553 /// assert_eq!(a.par_iter().max_by(|x, y| x.abs().cmp(&y.abs())), Some(&240)); 1554 /// ``` max_by<F>(self, f: F) -> Option<Self::Item> where F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering,1555 fn max_by<F>(self, f: F) -> Option<Self::Item> 1556 where 1557 F: Sync + Send + Fn(&Self::Item, &Self::Item) -> Ordering, 1558 { 1559 fn max<T>(f: impl Fn(&T, &T) -> Ordering) -> impl Fn(T, T) -> T { 1560 move |a, b| match f(&a, &b) { 1561 Ordering::Greater => a, 1562 _ => b, 1563 } 1564 } 1565 1566 self.reduce_with(max(f)) 1567 } 1568 1569 /// Computes the item that yields the maximum value for the given 1570 /// function. If the iterator is empty, `None` is returned; 1571 /// otherwise, `Some(item)` is returned. 1572 /// 1573 /// Note that the order in which the items will be reduced is not 1574 /// specified, so if the `Ord` impl is not truly associative, then 1575 /// the results are not deterministic. 1576 /// 1577 /// # Examples 1578 /// 1579 /// ``` 1580 /// use rayon::prelude::*; 1581 /// 1582 /// let a = [-3_i32, 34, 2, 5, -10, -3, -23]; 1583 /// 1584 /// assert_eq!(a.par_iter().max_by_key(|x| x.abs()), Some(&34)); 1585 /// ``` max_by_key<K, F>(self, f: F) -> Option<Self::Item> where K: Ord + Send, F: Sync + Send + Fn(&Self::Item) -> K,1586 fn max_by_key<K, F>(self, f: F) -> Option<Self::Item> 1587 where 1588 K: Ord + Send, 1589 F: Sync + Send + Fn(&Self::Item) -> K, 1590 { 1591 fn key<T, K>(f: impl Fn(&T) -> K) -> impl Fn(T) -> (K, T) { 1592 move |x| (f(&x), x) 1593 } 1594 1595 fn max_key<T, K: Ord>(a: (K, T), b: (K, T)) -> (K, T) { 1596 match (a.0).cmp(&b.0) { 1597 Ordering::Greater => a, 1598 _ => b, 1599 } 1600 } 1601 1602 let (_, x) = self.map(key(f)).reduce_with(max_key)?; 1603 Some(x) 1604 } 1605 1606 /// Takes two iterators and creates a new iterator over both. 1607 /// 1608 /// # Examples 1609 /// 1610 /// ``` 1611 /// use rayon::prelude::*; 1612 /// 1613 /// let a = [0, 1, 2]; 1614 /// let b = [9, 8, 7]; 1615 /// 1616 /// let par_iter = a.par_iter().chain(b.par_iter()); 1617 /// 1618 /// let chained: Vec<_> = par_iter.cloned().collect(); 1619 /// 1620 /// assert_eq!(&chained[..], &[0, 1, 2, 9, 8, 7]); 1621 /// ``` chain<C>(self, chain: C) -> Chain<Self, C::Iter> where C: IntoParallelIterator<Item = Self::Item>,1622 fn chain<C>(self, chain: C) -> Chain<Self, C::Iter> 1623 where 1624 C: IntoParallelIterator<Item = Self::Item>, 1625 { 1626 Chain::new(self, chain.into_par_iter()) 1627 } 1628 1629 /// Searches for **some** item in the parallel iterator that 1630 /// matches the given predicate and returns it. This operation 1631 /// is similar to [`find` on sequential iterators][find] but 1632 /// the item returned may not be the **first** one in the parallel 1633 /// sequence which matches, since we search the entire sequence in parallel. 1634 /// 1635 /// Once a match is found, we will attempt to stop processing 1636 /// the rest of the items in the iterator as soon as possible 1637 /// (just as `find` stops iterating once a match is found). 1638 /// 1639 /// [find]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.find 1640 /// 1641 /// # Examples 1642 /// 1643 /// ``` 1644 /// use rayon::prelude::*; 1645 /// 1646 /// let a = [1, 2, 3, 3]; 1647 /// 1648 /// assert_eq!(a.par_iter().find_any(|&&x| x == 3), Some(&3)); 1649 /// 1650 /// assert_eq!(a.par_iter().find_any(|&&x| x == 100), None); 1651 /// ``` find_any<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1652 fn find_any<P>(self, predicate: P) -> Option<Self::Item> 1653 where 1654 P: Fn(&Self::Item) -> bool + Sync + Send, 1655 { 1656 find::find(self, predicate) 1657 } 1658 1659 /// Searches for the sequentially **first** item in the parallel iterator 1660 /// that matches the given predicate and returns it. 1661 /// 1662 /// Once a match is found, all attempts to the right of the match 1663 /// will be stopped, while attempts to the left must continue in case 1664 /// an earlier match is found. 1665 /// 1666 /// Note that not all parallel iterators have a useful order, much like 1667 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1668 /// just want the first match that discovered anywhere in the iterator, 1669 /// `find_any` is a better choice. 1670 /// 1671 /// # Examples 1672 /// 1673 /// ``` 1674 /// use rayon::prelude::*; 1675 /// 1676 /// let a = [1, 2, 3, 3]; 1677 /// 1678 /// assert_eq!(a.par_iter().find_first(|&&x| x == 3), Some(&3)); 1679 /// 1680 /// assert_eq!(a.par_iter().find_first(|&&x| x == 100), None); 1681 /// ``` find_first<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1682 fn find_first<P>(self, predicate: P) -> Option<Self::Item> 1683 where 1684 P: Fn(&Self::Item) -> bool + Sync + Send, 1685 { 1686 find_first_last::find_first(self, predicate) 1687 } 1688 1689 /// Searches for the sequentially **last** item in the parallel iterator 1690 /// that matches the given predicate and returns it. 1691 /// 1692 /// Once a match is found, all attempts to the left of the match 1693 /// will be stopped, while attempts to the right must continue in case 1694 /// a later match is found. 1695 /// 1696 /// Note that not all parallel iterators have a useful order, much like 1697 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 1698 /// order doesn't actually matter to you, `find_any` is a better choice. 1699 /// 1700 /// # Examples 1701 /// 1702 /// ``` 1703 /// use rayon::prelude::*; 1704 /// 1705 /// let a = [1, 2, 3, 3]; 1706 /// 1707 /// assert_eq!(a.par_iter().find_last(|&&x| x == 3), Some(&3)); 1708 /// 1709 /// assert_eq!(a.par_iter().find_last(|&&x| x == 100), None); 1710 /// ``` find_last<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1711 fn find_last<P>(self, predicate: P) -> Option<Self::Item> 1712 where 1713 P: Fn(&Self::Item) -> bool + Sync + Send, 1714 { 1715 find_first_last::find_last(self, predicate) 1716 } 1717 1718 /// Applies the given predicate to the items in the parallel iterator 1719 /// and returns **any** non-None result of the map operation. 1720 /// 1721 /// Once a non-None value is produced from the map operation, we will 1722 /// attempt to stop processing the rest of the items in the iterator 1723 /// as soon as possible. 1724 /// 1725 /// Note that this method only returns **some** item in the parallel 1726 /// iterator that is not None from the map predicate. The item returned 1727 /// may not be the **first** non-None value produced in the parallel 1728 /// sequence, since the entire sequence is mapped over in parallel. 1729 /// 1730 /// # Examples 1731 /// 1732 /// ``` 1733 /// use rayon::prelude::*; 1734 /// 1735 /// let c = ["lol", "NaN", "5", "5"]; 1736 /// 1737 /// let found_number = c.par_iter().find_map_any(|s| s.parse().ok()); 1738 /// 1739 /// assert_eq!(found_number, Some(5)); 1740 /// ``` find_map_any<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1741 fn find_map_any<P, R>(self, predicate: P) -> Option<R> 1742 where 1743 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1744 R: Send, 1745 { 1746 fn yes<T>(_: &T) -> bool { 1747 true 1748 } 1749 self.filter_map(predicate).find_any(yes) 1750 } 1751 1752 /// Applies the given predicate to the items in the parallel iterator and 1753 /// returns the sequentially **first** non-None result of the map operation. 1754 /// 1755 /// Once a non-None value is produced from the map operation, all attempts 1756 /// to the right of the match will be stopped, while attempts to the left 1757 /// must continue in case an earlier match is found. 1758 /// 1759 /// Note that not all parallel iterators have a useful order, much like 1760 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1761 /// just want the first non-None value discovered anywhere in the iterator, 1762 /// `find_map_any` is a better choice. 1763 /// 1764 /// # Examples 1765 /// 1766 /// ``` 1767 /// use rayon::prelude::*; 1768 /// 1769 /// let c = ["lol", "NaN", "2", "5"]; 1770 /// 1771 /// let first_number = c.par_iter().find_map_first(|s| s.parse().ok()); 1772 /// 1773 /// assert_eq!(first_number, Some(2)); 1774 /// ``` find_map_first<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1775 fn find_map_first<P, R>(self, predicate: P) -> Option<R> 1776 where 1777 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1778 R: Send, 1779 { 1780 fn yes<T>(_: &T) -> bool { 1781 true 1782 } 1783 self.filter_map(predicate).find_first(yes) 1784 } 1785 1786 /// Applies the given predicate to the items in the parallel iterator and 1787 /// returns the sequentially **last** non-None result of the map operation. 1788 /// 1789 /// Once a non-None value is produced from the map operation, all attempts 1790 /// to the left of the match will be stopped, while attempts to the right 1791 /// must continue in case a later match is found. 1792 /// 1793 /// Note that not all parallel iterators have a useful order, much like 1794 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 1795 /// just want the first non-None value discovered anywhere in the iterator, 1796 /// `find_map_any` is a better choice. 1797 /// 1798 /// # Examples 1799 /// 1800 /// ``` 1801 /// use rayon::prelude::*; 1802 /// 1803 /// let c = ["lol", "NaN", "2", "5"]; 1804 /// 1805 /// let last_number = c.par_iter().find_map_last(|s| s.parse().ok()); 1806 /// 1807 /// assert_eq!(last_number, Some(5)); 1808 /// ``` find_map_last<P, R>(self, predicate: P) -> Option<R> where P: Fn(Self::Item) -> Option<R> + Sync + Send, R: Send,1809 fn find_map_last<P, R>(self, predicate: P) -> Option<R> 1810 where 1811 P: Fn(Self::Item) -> Option<R> + Sync + Send, 1812 R: Send, 1813 { 1814 fn yes<T>(_: &T) -> bool { 1815 true 1816 } 1817 self.filter_map(predicate).find_last(yes) 1818 } 1819 1820 #[doc(hidden)] 1821 #[deprecated(note = "parallel `find` does not search in order -- use `find_any`, \\ 1822 `find_first`, or `find_last`")] find<P>(self, predicate: P) -> Option<Self::Item> where P: Fn(&Self::Item) -> bool + Sync + Send,1823 fn find<P>(self, predicate: P) -> Option<Self::Item> 1824 where 1825 P: Fn(&Self::Item) -> bool + Sync + Send, 1826 { 1827 self.find_any(predicate) 1828 } 1829 1830 /// Searches for **some** item in the parallel iterator that 1831 /// matches the given predicate, and if so returns true. Once 1832 /// a match is found, we'll attempt to stop process the rest 1833 /// of the items. Proving that there's no match, returning false, 1834 /// does require visiting every item. 1835 /// 1836 /// # Examples 1837 /// 1838 /// ``` 1839 /// use rayon::prelude::*; 1840 /// 1841 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1842 /// 1843 /// let is_valid = a.par_iter().any(|&x| x > 10); 1844 /// 1845 /// assert!(is_valid); 1846 /// ``` any<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1847 fn any<P>(self, predicate: P) -> bool 1848 where 1849 P: Fn(Self::Item) -> bool + Sync + Send, 1850 { 1851 self.map(predicate).find_any(bool::clone).is_some() 1852 } 1853 1854 /// Tests that every item in the parallel iterator matches the given 1855 /// predicate, and if so returns true. If a counter-example is found, 1856 /// we'll attempt to stop processing more items, then return false. 1857 /// 1858 /// # Examples 1859 /// 1860 /// ``` 1861 /// use rayon::prelude::*; 1862 /// 1863 /// let a = [0, 12, 3, 4, 0, 23, 0]; 1864 /// 1865 /// let is_valid = a.par_iter().all(|&x| x > 10); 1866 /// 1867 /// assert!(!is_valid); 1868 /// ``` all<P>(self, predicate: P) -> bool where P: Fn(Self::Item) -> bool + Sync + Send,1869 fn all<P>(self, predicate: P) -> bool 1870 where 1871 P: Fn(Self::Item) -> bool + Sync + Send, 1872 { 1873 #[inline] 1874 fn is_false(x: &bool) -> bool { 1875 !x 1876 } 1877 1878 self.map(predicate).find_any(is_false).is_none() 1879 } 1880 1881 /// Creates an iterator over the `Some` items of this iterator, halting 1882 /// as soon as any `None` is found. 1883 /// 1884 /// # Examples 1885 /// 1886 /// ``` 1887 /// use rayon::prelude::*; 1888 /// use std::sync::atomic::{AtomicUsize, Ordering}; 1889 /// 1890 /// let counter = AtomicUsize::new(0); 1891 /// let value = (0_i32..2048) 1892 /// .into_par_iter() 1893 /// .map(|x| { 1894 /// counter.fetch_add(1, Ordering::SeqCst); 1895 /// if x < 1024 { Some(x) } else { None } 1896 /// }) 1897 /// .while_some() 1898 /// .max(); 1899 /// 1900 /// assert!(value < Some(1024)); 1901 /// assert!(counter.load(Ordering::SeqCst) < 2048); // should not have visited every single one 1902 /// ``` while_some<T>(self) -> WhileSome<Self> where Self: ParallelIterator<Item = Option<T>>, T: Send,1903 fn while_some<T>(self) -> WhileSome<Self> 1904 where 1905 Self: ParallelIterator<Item = Option<T>>, 1906 T: Send, 1907 { 1908 WhileSome::new(self) 1909 } 1910 1911 /// Wraps an iterator with a fuse in case of panics, to halt all threads 1912 /// as soon as possible. 1913 /// 1914 /// Panics within parallel iterators are always propagated to the caller, 1915 /// but they don't always halt the rest of the iterator right away, due to 1916 /// the internal semantics of [`join`]. This adaptor makes a greater effort 1917 /// to stop processing other items sooner, with the cost of additional 1918 /// synchronization overhead, which may also inhibit some optimizations. 1919 /// 1920 /// [`join`]: ../fn.join.html#panics 1921 /// 1922 /// # Examples 1923 /// 1924 /// If this code didn't use `panic_fuse()`, it would continue processing 1925 /// many more items in other threads (with long sleep delays) before the 1926 /// panic is finally propagated. 1927 /// 1928 /// ```should_panic 1929 /// use rayon::prelude::*; 1930 /// use std::{thread, time}; 1931 /// 1932 /// (0..1_000_000) 1933 /// .into_par_iter() 1934 /// .panic_fuse() 1935 /// .for_each(|i| { 1936 /// // simulate some work 1937 /// thread::sleep(time::Duration::from_secs(1)); 1938 /// assert!(i > 0); // oops! 1939 /// }); 1940 /// ``` panic_fuse(self) -> PanicFuse<Self>1941 fn panic_fuse(self) -> PanicFuse<Self> { 1942 PanicFuse::new(self) 1943 } 1944 1945 /// Creates a fresh collection containing all the elements produced 1946 /// by this parallel iterator. 1947 /// 1948 /// You may prefer [`collect_into_vec()`] implemented on 1949 /// [`IndexedParallelIterator`], if your underlying iterator also implements 1950 /// it. [`collect_into_vec()`] allocates efficiently with precise knowledge 1951 /// of how many elements the iterator contains, and even allows you to reuse 1952 /// an existing vector's backing store rather than allocating a fresh vector. 1953 /// 1954 /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 1955 /// [`collect_into_vec()`]: 1956 /// trait.IndexedParallelIterator.html#method.collect_into_vec 1957 /// 1958 /// # Examples 1959 /// 1960 /// ``` 1961 /// use rayon::prelude::*; 1962 /// 1963 /// let sync_vec: Vec<_> = (0..100).into_iter().collect(); 1964 /// 1965 /// let async_vec: Vec<_> = (0..100).into_par_iter().collect(); 1966 /// 1967 /// assert_eq!(sync_vec, async_vec); 1968 /// ``` collect<C>(self) -> C where C: FromParallelIterator<Self::Item>,1969 fn collect<C>(self) -> C 1970 where 1971 C: FromParallelIterator<Self::Item>, 1972 { 1973 C::from_par_iter(self) 1974 } 1975 1976 /// Unzips the items of a parallel iterator into a pair of arbitrary 1977 /// `ParallelExtend` containers. 1978 /// 1979 /// You may prefer to use `unzip_into_vecs()`, which allocates more 1980 /// efficiently with precise knowledge of how many elements the 1981 /// iterator contains, and even allows you to reuse existing 1982 /// vectors' backing stores rather than allocating fresh vectors. 1983 /// 1984 /// # Examples 1985 /// 1986 /// ``` 1987 /// use rayon::prelude::*; 1988 /// 1989 /// let a = [(0, 1), (1, 2), (2, 3), (3, 4)]; 1990 /// 1991 /// let (left, right): (Vec<_>, Vec<_>) = a.par_iter().cloned().unzip(); 1992 /// 1993 /// assert_eq!(left, [0, 1, 2, 3]); 1994 /// assert_eq!(right, [1, 2, 3, 4]); 1995 /// ``` 1996 /// 1997 /// Nested pairs can be unzipped too. 1998 /// 1999 /// ``` 2000 /// use rayon::prelude::*; 2001 /// 2002 /// let (values, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = (0..4).into_par_iter() 2003 /// .map(|i| (i, (i * i, i * i * i))) 2004 /// .unzip(); 2005 /// 2006 /// assert_eq!(values, [0, 1, 2, 3]); 2007 /// assert_eq!(squares, [0, 1, 4, 9]); 2008 /// assert_eq!(cubes, [0, 1, 8, 27]); 2009 /// ``` unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where Self: ParallelIterator<Item = (A, B)>, FromA: Default + Send + ParallelExtend<A>, FromB: Default + Send + ParallelExtend<B>, A: Send, B: Send,2010 fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) 2011 where 2012 Self: ParallelIterator<Item = (A, B)>, 2013 FromA: Default + Send + ParallelExtend<A>, 2014 FromB: Default + Send + ParallelExtend<B>, 2015 A: Send, 2016 B: Send, 2017 { 2018 unzip::unzip(self) 2019 } 2020 2021 /// Partitions the items of a parallel iterator into a pair of arbitrary 2022 /// `ParallelExtend` containers. Items for which the `predicate` returns 2023 /// true go into the first container, and the rest go into the second. 2024 /// 2025 /// Note: unlike the standard `Iterator::partition`, this allows distinct 2026 /// collection types for the left and right items. This is more flexible, 2027 /// but may require new type annotations when converting sequential code 2028 /// that used type inferrence assuming the two were the same. 2029 /// 2030 /// # Examples 2031 /// 2032 /// ``` 2033 /// use rayon::prelude::*; 2034 /// 2035 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter().partition(|x| x % 2 == 0); 2036 /// 2037 /// assert_eq!(left, [0, 2, 4, 6]); 2038 /// assert_eq!(right, [1, 3, 5, 7]); 2039 /// ``` partition<A, B, P>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<Self::Item>, B: Default + Send + ParallelExtend<Self::Item>, P: Fn(&Self::Item) -> bool + Sync + Send,2040 fn partition<A, B, P>(self, predicate: P) -> (A, B) 2041 where 2042 A: Default + Send + ParallelExtend<Self::Item>, 2043 B: Default + Send + ParallelExtend<Self::Item>, 2044 P: Fn(&Self::Item) -> bool + Sync + Send, 2045 { 2046 unzip::partition(self, predicate) 2047 } 2048 2049 /// Partitions and maps the items of a parallel iterator into a pair of 2050 /// arbitrary `ParallelExtend` containers. `Either::Left` items go into 2051 /// the first container, and `Either::Right` items go into the second. 2052 /// 2053 /// # Examples 2054 /// 2055 /// ``` 2056 /// use rayon::prelude::*; 2057 /// use rayon::iter::Either; 2058 /// 2059 /// let (left, right): (Vec<_>, Vec<_>) = (0..8).into_par_iter() 2060 /// .partition_map(|x| { 2061 /// if x % 2 == 0 { 2062 /// Either::Left(x * 4) 2063 /// } else { 2064 /// Either::Right(x * 3) 2065 /// } 2066 /// }); 2067 /// 2068 /// assert_eq!(left, [0, 8, 16, 24]); 2069 /// assert_eq!(right, [3, 9, 15, 21]); 2070 /// ``` 2071 /// 2072 /// Nested `Either` enums can be split as well. 2073 /// 2074 /// ``` 2075 /// use rayon::prelude::*; 2076 /// use rayon::iter::Either::*; 2077 /// 2078 /// let ((fizzbuzz, fizz), (buzz, other)): ((Vec<_>, Vec<_>), (Vec<_>, Vec<_>)) = (1..20) 2079 /// .into_par_iter() 2080 /// .partition_map(|x| match (x % 3, x % 5) { 2081 /// (0, 0) => Left(Left(x)), 2082 /// (0, _) => Left(Right(x)), 2083 /// (_, 0) => Right(Left(x)), 2084 /// (_, _) => Right(Right(x)), 2085 /// }); 2086 /// 2087 /// assert_eq!(fizzbuzz, [15]); 2088 /// assert_eq!(fizz, [3, 6, 9, 12, 18]); 2089 /// assert_eq!(buzz, [5, 10]); 2090 /// assert_eq!(other, [1, 2, 4, 7, 8, 11, 13, 14, 16, 17, 19]); 2091 /// ``` partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) where A: Default + Send + ParallelExtend<L>, B: Default + Send + ParallelExtend<R>, P: Fn(Self::Item) -> Either<L, R> + Sync + Send, L: Send, R: Send,2092 fn partition_map<A, B, P, L, R>(self, predicate: P) -> (A, B) 2093 where 2094 A: Default + Send + ParallelExtend<L>, 2095 B: Default + Send + ParallelExtend<R>, 2096 P: Fn(Self::Item) -> Either<L, R> + Sync + Send, 2097 L: Send, 2098 R: Send, 2099 { 2100 unzip::partition_map(self, predicate) 2101 } 2102 2103 /// Intersperses clones of an element between items of this iterator. 2104 /// 2105 /// # Examples 2106 /// 2107 /// ``` 2108 /// use rayon::prelude::*; 2109 /// 2110 /// let x = vec![1, 2, 3]; 2111 /// let r: Vec<_> = x.into_par_iter().intersperse(-1).collect(); 2112 /// 2113 /// assert_eq!(r, vec![1, -1, 2, -1, 3]); 2114 /// ``` intersperse(self, element: Self::Item) -> Intersperse<Self> where Self::Item: Clone,2115 fn intersperse(self, element: Self::Item) -> Intersperse<Self> 2116 where 2117 Self::Item: Clone, 2118 { 2119 Intersperse::new(self, element) 2120 } 2121 2122 /// Internal method used to define the behavior of this parallel 2123 /// iterator. You should not need to call this directly. 2124 /// 2125 /// This method causes the iterator `self` to start producing 2126 /// items and to feed them to the consumer `consumer` one by one. 2127 /// It may split the consumer before doing so to create the 2128 /// opportunity to produce in parallel. 2129 /// 2130 /// See the [README] for more details on the internals of parallel 2131 /// iterators. 2132 /// 2133 /// [README]: README.md drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>2134 fn drive_unindexed<C>(self, consumer: C) -> C::Result 2135 where 2136 C: UnindexedConsumer<Self::Item>; 2137 2138 /// Internal method used to define the behavior of this parallel 2139 /// iterator. You should not need to call this directly. 2140 /// 2141 /// Returns the number of items produced by this iterator, if known 2142 /// statically. This can be used by consumers to trigger special fast 2143 /// paths. Therefore, if `Some(_)` is returned, this iterator must only 2144 /// use the (indexed) `Consumer` methods when driving a consumer, such 2145 /// as `split_at()`. Calling `UnindexedConsumer::split_off_left()` or 2146 /// other `UnindexedConsumer` methods -- or returning an inaccurate 2147 /// value -- may result in panics. 2148 /// 2149 /// This method is currently used to optimize `collect` for want 2150 /// of true Rust specialization; it may be removed when 2151 /// specialization is stable. opt_len(&self) -> Option<usize>2152 fn opt_len(&self) -> Option<usize> { 2153 None 2154 } 2155 } 2156 2157 impl<T: ParallelIterator> IntoParallelIterator for T { 2158 type Iter = T; 2159 type Item = T::Item; 2160 into_par_iter(self) -> T2161 fn into_par_iter(self) -> T { 2162 self 2163 } 2164 } 2165 2166 /// An iterator that supports "random access" to its data, meaning 2167 /// that you can split it at arbitrary indices and draw data from 2168 /// those points. 2169 /// 2170 /// **Note:** Not implemented for `u64`, `i64`, `u128`, or `i128` ranges 2171 pub trait IndexedParallelIterator: ParallelIterator { 2172 /// Collects the results of the iterator into the specified 2173 /// vector. The vector is always truncated before execution 2174 /// begins. If possible, reusing the vector across calls can lead 2175 /// to better performance since it reuses the same backing buffer. 2176 /// 2177 /// # Examples 2178 /// 2179 /// ``` 2180 /// use rayon::prelude::*; 2181 /// 2182 /// // any prior data will be truncated 2183 /// let mut vec = vec![-1, -2, -3]; 2184 /// 2185 /// (0..5).into_par_iter() 2186 /// .collect_into_vec(&mut vec); 2187 /// 2188 /// assert_eq!(vec, [0, 1, 2, 3, 4]); 2189 /// ``` collect_into_vec(self, target: &mut Vec<Self::Item>)2190 fn collect_into_vec(self, target: &mut Vec<Self::Item>) { 2191 collect::collect_into_vec(self, target); 2192 } 2193 2194 /// Unzips the results of the iterator into the specified 2195 /// vectors. The vectors are always truncated before execution 2196 /// begins. If possible, reusing the vectors across calls can lead 2197 /// to better performance since they reuse the same backing buffer. 2198 /// 2199 /// # Examples 2200 /// 2201 /// ``` 2202 /// use rayon::prelude::*; 2203 /// 2204 /// // any prior data will be truncated 2205 /// let mut left = vec![42; 10]; 2206 /// let mut right = vec![-1; 10]; 2207 /// 2208 /// (10..15).into_par_iter() 2209 /// .enumerate() 2210 /// .unzip_into_vecs(&mut left, &mut right); 2211 /// 2212 /// assert_eq!(left, [0, 1, 2, 3, 4]); 2213 /// assert_eq!(right, [10, 11, 12, 13, 14]); 2214 /// ``` unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send,2215 fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) 2216 where 2217 Self: IndexedParallelIterator<Item = (A, B)>, 2218 A: Send, 2219 B: Send, 2220 { 2221 collect::unzip_into_vecs(self, left, right); 2222 } 2223 2224 /// Iterates over tuples `(A, B)`, where the items `A` are from 2225 /// this iterator and `B` are from the iterator given as argument. 2226 /// Like the `zip` method on ordinary iterators, if the two 2227 /// iterators are of unequal length, you only get the items they 2228 /// have in common. 2229 /// 2230 /// # Examples 2231 /// 2232 /// ``` 2233 /// use rayon::prelude::*; 2234 /// 2235 /// let result: Vec<_> = (1..4) 2236 /// .into_par_iter() 2237 /// .zip(vec!['a', 'b', 'c']) 2238 /// .collect(); 2239 /// 2240 /// assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]); 2241 /// ``` zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2242 fn zip<Z>(self, zip_op: Z) -> Zip<Self, Z::Iter> 2243 where 2244 Z: IntoParallelIterator, 2245 Z::Iter: IndexedParallelIterator, 2246 { 2247 Zip::new(self, zip_op.into_par_iter()) 2248 } 2249 2250 /// The same as `Zip`, but requires that both iterators have the same length. 2251 /// 2252 /// # Panics 2253 /// Will panic if `self` and `zip_op` are not the same length. 2254 /// 2255 /// ```should_panic 2256 /// use rayon::prelude::*; 2257 /// 2258 /// let one = [1u8]; 2259 /// let two = [2u8, 2]; 2260 /// let one_iter = one.par_iter(); 2261 /// let two_iter = two.par_iter(); 2262 /// 2263 /// // this will panic 2264 /// let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect(); 2265 /// 2266 /// // we should never get here 2267 /// assert_eq!(1, zipped.len()); 2268 /// ``` zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> where Z: IntoParallelIterator, Z::Iter: IndexedParallelIterator,2269 fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, Z::Iter> 2270 where 2271 Z: IntoParallelIterator, 2272 Z::Iter: IndexedParallelIterator, 2273 { 2274 let zip_op_iter = zip_op.into_par_iter(); 2275 assert_eq!(self.len(), zip_op_iter.len()); 2276 ZipEq::new(self, zip_op_iter) 2277 } 2278 2279 /// Interleaves elements of this iterator and the other given 2280 /// iterator. Alternately yields elements from this iterator and 2281 /// the given iterator, until both are exhausted. If one iterator 2282 /// is exhausted before the other, the last elements are provided 2283 /// from the other. 2284 /// 2285 /// # Examples 2286 /// 2287 /// ``` 2288 /// use rayon::prelude::*; 2289 /// let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]); 2290 /// let r: Vec<i32> = x.into_par_iter().interleave(y).collect(); 2291 /// assert_eq!(r, vec![1, 3, 2, 4, 5, 6]); 2292 /// ``` interleave<I>(self, other: I) -> Interleave<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2293 fn interleave<I>(self, other: I) -> Interleave<Self, I::Iter> 2294 where 2295 I: IntoParallelIterator<Item = Self::Item>, 2296 I::Iter: IndexedParallelIterator<Item = Self::Item>, 2297 { 2298 Interleave::new(self, other.into_par_iter()) 2299 } 2300 2301 /// Interleaves elements of this iterator and the other given 2302 /// iterator, until one is exhausted. 2303 /// 2304 /// # Examples 2305 /// 2306 /// ``` 2307 /// use rayon::prelude::*; 2308 /// let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]); 2309 /// let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect(); 2310 /// assert_eq!(r, vec![1, 5, 2, 6, 3]); 2311 /// ``` interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator<Item = Self::Item>,2312 fn interleave_shortest<I>(self, other: I) -> InterleaveShortest<Self, I::Iter> 2313 where 2314 I: IntoParallelIterator<Item = Self::Item>, 2315 I::Iter: IndexedParallelIterator<Item = Self::Item>, 2316 { 2317 InterleaveShortest::new(self, other.into_par_iter()) 2318 } 2319 2320 /// Splits an iterator up into fixed-size chunks. 2321 /// 2322 /// Returns an iterator that returns `Vec`s of the given number of elements. 2323 /// If the number of elements in the iterator is not divisible by `chunk_size`, 2324 /// the last chunk may be shorter than `chunk_size`. 2325 /// 2326 /// See also [`par_chunks()`] and [`par_chunks_mut()`] for similar behavior on 2327 /// slices, without having to allocate intermediate `Vec`s for the chunks. 2328 /// 2329 /// [`par_chunks()`]: ../slice/trait.ParallelSlice.html#method.par_chunks 2330 /// [`par_chunks_mut()`]: ../slice/trait.ParallelSliceMut.html#method.par_chunks_mut 2331 /// 2332 /// # Examples 2333 /// 2334 /// ``` 2335 /// use rayon::prelude::*; 2336 /// let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; 2337 /// let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect(); 2338 /// assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]); 2339 /// ``` chunks(self, chunk_size: usize) -> Chunks<Self>2340 fn chunks(self, chunk_size: usize) -> Chunks<Self> { 2341 assert!(chunk_size != 0, "chunk_size must not be zero"); 2342 Chunks::new(self, chunk_size) 2343 } 2344 2345 /// Lexicographically compares the elements of this `ParallelIterator` with those of 2346 /// another. 2347 /// 2348 /// # Examples 2349 /// 2350 /// ``` 2351 /// use rayon::prelude::*; 2352 /// use std::cmp::Ordering::*; 2353 /// 2354 /// let x = vec![1, 2, 3]; 2355 /// assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less); 2356 /// assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal); 2357 /// assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater); 2358 /// ``` cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, I::Iter: IndexedParallelIterator, Self::Item: Ord,2359 fn cmp<I>(self, other: I) -> Ordering 2360 where 2361 I: IntoParallelIterator<Item = Self::Item>, 2362 I::Iter: IndexedParallelIterator, 2363 Self::Item: Ord, 2364 { 2365 #[inline] 2366 fn ordering<T: Ord>((x, y): (T, T)) -> Ordering { 2367 Ord::cmp(&x, &y) 2368 } 2369 2370 #[inline] 2371 fn inequal(&ord: &Ordering) -> bool { 2372 ord != Ordering::Equal 2373 } 2374 2375 let other = other.into_par_iter(); 2376 let ord_len = self.len().cmp(&other.len()); 2377 self.zip(other) 2378 .map(ordering) 2379 .find_first(inequal) 2380 .unwrap_or(ord_len) 2381 } 2382 2383 /// Lexicographically compares the elements of this `ParallelIterator` with those of 2384 /// another. 2385 /// 2386 /// # Examples 2387 /// 2388 /// ``` 2389 /// use rayon::prelude::*; 2390 /// use std::cmp::Ordering::*; 2391 /// use std::f64::NAN; 2392 /// 2393 /// let x = vec![1.0, 2.0, 3.0]; 2394 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less)); 2395 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal)); 2396 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater)); 2397 /// assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None); 2398 /// ``` partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2399 fn partial_cmp<I>(self, other: I) -> Option<Ordering> 2400 where 2401 I: IntoParallelIterator, 2402 I::Iter: IndexedParallelIterator, 2403 Self::Item: PartialOrd<I::Item>, 2404 { 2405 #[inline] 2406 fn ordering<T: PartialOrd<U>, U>((x, y): (T, U)) -> Option<Ordering> { 2407 PartialOrd::partial_cmp(&x, &y) 2408 } 2409 2410 #[inline] 2411 fn inequal(&ord: &Option<Ordering>) -> bool { 2412 ord != Some(Ordering::Equal) 2413 } 2414 2415 let other = other.into_par_iter(); 2416 let ord_len = self.len().cmp(&other.len()); 2417 self.zip(other) 2418 .map(ordering) 2419 .find_first(inequal) 2420 .unwrap_or(Some(ord_len)) 2421 } 2422 2423 /// Determines if the elements of this `ParallelIterator` 2424 /// are equal to those of another eq<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2425 fn eq<I>(self, other: I) -> bool 2426 where 2427 I: IntoParallelIterator, 2428 I::Iter: IndexedParallelIterator, 2429 Self::Item: PartialEq<I::Item>, 2430 { 2431 #[inline] 2432 fn eq<T: PartialEq<U>, U>((x, y): (T, U)) -> bool { 2433 PartialEq::eq(&x, &y) 2434 } 2435 2436 let other = other.into_par_iter(); 2437 self.len() == other.len() && self.zip(other).all(eq) 2438 } 2439 2440 /// Determines if the elements of this `ParallelIterator` 2441 /// are unequal to those of another ne<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialEq<I::Item>,2442 fn ne<I>(self, other: I) -> bool 2443 where 2444 I: IntoParallelIterator, 2445 I::Iter: IndexedParallelIterator, 2446 Self::Item: PartialEq<I::Item>, 2447 { 2448 !self.eq(other) 2449 } 2450 2451 /// Determines if the elements of this `ParallelIterator` 2452 /// are lexicographically less than those of another. lt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2453 fn lt<I>(self, other: I) -> bool 2454 where 2455 I: IntoParallelIterator, 2456 I::Iter: IndexedParallelIterator, 2457 Self::Item: PartialOrd<I::Item>, 2458 { 2459 self.partial_cmp(other) == Some(Ordering::Less) 2460 } 2461 2462 /// Determines if the elements of this `ParallelIterator` 2463 /// are less or equal to those of another. le<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2464 fn le<I>(self, other: I) -> bool 2465 where 2466 I: IntoParallelIterator, 2467 I::Iter: IndexedParallelIterator, 2468 Self::Item: PartialOrd<I::Item>, 2469 { 2470 let ord = self.partial_cmp(other); 2471 ord == Some(Ordering::Equal) || ord == Some(Ordering::Less) 2472 } 2473 2474 /// Determines if the elements of this `ParallelIterator` 2475 /// are lexicographically greater than those of another. gt<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2476 fn gt<I>(self, other: I) -> bool 2477 where 2478 I: IntoParallelIterator, 2479 I::Iter: IndexedParallelIterator, 2480 Self::Item: PartialOrd<I::Item>, 2481 { 2482 self.partial_cmp(other) == Some(Ordering::Greater) 2483 } 2484 2485 /// Determines if the elements of this `ParallelIterator` 2486 /// are less or equal to those of another. ge<I>(self, other: I) -> bool where I: IntoParallelIterator, I::Iter: IndexedParallelIterator, Self::Item: PartialOrd<I::Item>,2487 fn ge<I>(self, other: I) -> bool 2488 where 2489 I: IntoParallelIterator, 2490 I::Iter: IndexedParallelIterator, 2491 Self::Item: PartialOrd<I::Item>, 2492 { 2493 let ord = self.partial_cmp(other); 2494 ord == Some(Ordering::Equal) || ord == Some(Ordering::Greater) 2495 } 2496 2497 /// Yields an index along with each item. 2498 /// 2499 /// # Examples 2500 /// 2501 /// ``` 2502 /// use rayon::prelude::*; 2503 /// 2504 /// let chars = vec!['a', 'b', 'c']; 2505 /// let result: Vec<_> = chars 2506 /// .into_par_iter() 2507 /// .enumerate() 2508 /// .collect(); 2509 /// 2510 /// assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]); 2511 /// ``` enumerate(self) -> Enumerate<Self>2512 fn enumerate(self) -> Enumerate<Self> { 2513 Enumerate::new(self) 2514 } 2515 2516 /// Creates an iterator that steps by the given amount 2517 /// 2518 /// # Examples 2519 /// 2520 /// ``` 2521 ///use rayon::prelude::*; 2522 /// 2523 /// let range = (3..10); 2524 /// let result: Vec<i32> = range 2525 /// .into_par_iter() 2526 /// .step_by(3) 2527 /// .collect(); 2528 /// 2529 /// assert_eq!(result, [3, 6, 9]) 2530 /// ``` 2531 /// 2532 /// # Compatibility 2533 /// 2534 /// This method is only available on Rust 1.38 or greater. 2535 #[cfg(step_by)] step_by(self, step: usize) -> StepBy<Self>2536 fn step_by(self, step: usize) -> StepBy<Self> { 2537 StepBy::new(self, step) 2538 } 2539 2540 /// Creates an iterator that skips the first `n` elements. 2541 /// 2542 /// # Examples 2543 /// 2544 /// ``` 2545 /// use rayon::prelude::*; 2546 /// 2547 /// let result: Vec<_> = (0..100) 2548 /// .into_par_iter() 2549 /// .skip(95) 2550 /// .collect(); 2551 /// 2552 /// assert_eq!(result, [95, 96, 97, 98, 99]); 2553 /// ``` skip(self, n: usize) -> Skip<Self>2554 fn skip(self, n: usize) -> Skip<Self> { 2555 Skip::new(self, n) 2556 } 2557 2558 /// Creates an iterator that yields the first `n` elements. 2559 /// 2560 /// # Examples 2561 /// 2562 /// ``` 2563 /// use rayon::prelude::*; 2564 /// 2565 /// let result: Vec<_> = (0..100) 2566 /// .into_par_iter() 2567 /// .take(5) 2568 /// .collect(); 2569 /// 2570 /// assert_eq!(result, [0, 1, 2, 3, 4]); 2571 /// ``` take(self, n: usize) -> Take<Self>2572 fn take(self, n: usize) -> Take<Self> { 2573 Take::new(self, n) 2574 } 2575 2576 /// Searches for **some** item in the parallel iterator that 2577 /// matches the given predicate, and returns its index. Like 2578 /// `ParallelIterator::find_any`, the parallel search will not 2579 /// necessarily find the **first** match, and once a match is 2580 /// found we'll attempt to stop processing any more. 2581 /// 2582 /// # Examples 2583 /// 2584 /// ``` 2585 /// use rayon::prelude::*; 2586 /// 2587 /// let a = [1, 2, 3, 3]; 2588 /// 2589 /// let i = a.par_iter().position_any(|&x| x == 3).expect("found"); 2590 /// assert!(i == 2 || i == 3); 2591 /// 2592 /// assert_eq!(a.par_iter().position_any(|&x| x == 100), None); 2593 /// ``` position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2594 fn position_any<P>(self, predicate: P) -> Option<usize> 2595 where 2596 P: Fn(Self::Item) -> bool + Sync + Send, 2597 { 2598 #[inline] 2599 fn check(&(_, p): &(usize, bool)) -> bool { 2600 p 2601 } 2602 2603 let (i, _) = self.map(predicate).enumerate().find_any(check)?; 2604 Some(i) 2605 } 2606 2607 /// Searches for the sequentially **first** item in the parallel iterator 2608 /// that matches the given predicate, and returns its index. 2609 /// 2610 /// Like `ParallelIterator::find_first`, once a match is found, 2611 /// all attempts to the right of the match will be stopped, while 2612 /// attempts to the left must continue in case an earlier match 2613 /// is found. 2614 /// 2615 /// Note that not all parallel iterators have a useful order, much like 2616 /// sequential `HashMap` iteration, so "first" may be nebulous. If you 2617 /// just want the first match that discovered anywhere in the iterator, 2618 /// `position_any` is a better choice. 2619 /// 2620 /// # Examples 2621 /// 2622 /// ``` 2623 /// use rayon::prelude::*; 2624 /// 2625 /// let a = [1, 2, 3, 3]; 2626 /// 2627 /// assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2)); 2628 /// 2629 /// assert_eq!(a.par_iter().position_first(|&x| x == 100), None); 2630 /// ``` position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2631 fn position_first<P>(self, predicate: P) -> Option<usize> 2632 where 2633 P: Fn(Self::Item) -> bool + Sync + Send, 2634 { 2635 #[inline] 2636 fn check(&(_, p): &(usize, bool)) -> bool { 2637 p 2638 } 2639 2640 let (i, _) = self.map(predicate).enumerate().find_first(check)?; 2641 Some(i) 2642 } 2643 2644 /// Searches for the sequentially **last** item in the parallel iterator 2645 /// that matches the given predicate, and returns its index. 2646 /// 2647 /// Like `ParallelIterator::find_last`, once a match is found, 2648 /// all attempts to the left of the match will be stopped, while 2649 /// attempts to the right must continue in case a later match 2650 /// is found. 2651 /// 2652 /// Note that not all parallel iterators have a useful order, much like 2653 /// sequential `HashMap` iteration, so "last" may be nebulous. When the 2654 /// order doesn't actually matter to you, `position_any` is a better 2655 /// choice. 2656 /// 2657 /// # Examples 2658 /// 2659 /// ``` 2660 /// use rayon::prelude::*; 2661 /// 2662 /// let a = [1, 2, 3, 3]; 2663 /// 2664 /// assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3)); 2665 /// 2666 /// assert_eq!(a.par_iter().position_last(|&x| x == 100), None); 2667 /// ``` position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2668 fn position_last<P>(self, predicate: P) -> Option<usize> 2669 where 2670 P: Fn(Self::Item) -> bool + Sync + Send, 2671 { 2672 #[inline] 2673 fn check(&(_, p): &(usize, bool)) -> bool { 2674 p 2675 } 2676 2677 let (i, _) = self.map(predicate).enumerate().find_last(check)?; 2678 Some(i) 2679 } 2680 2681 #[doc(hidden)] 2682 #[deprecated( 2683 note = "parallel `position` does not search in order -- use `position_any`, \\ 2684 `position_first`, or `position_last`" 2685 )] position<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send,2686 fn position<P>(self, predicate: P) -> Option<usize> 2687 where 2688 P: Fn(Self::Item) -> bool + Sync + Send, 2689 { 2690 self.position_any(predicate) 2691 } 2692 2693 /// Searches for items in the parallel iterator that match the given 2694 /// predicate, and returns their indices. 2695 /// 2696 /// # Examples 2697 /// 2698 /// ``` 2699 /// use rayon::prelude::*; 2700 /// 2701 /// let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29]; 2702 /// 2703 /// // Find the positions of primes congruent to 1 modulo 6 2704 /// let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect(); 2705 /// assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19 2706 /// 2707 /// // Find the positions of primes congruent to 5 modulo 6 2708 /// let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect(); 2709 /// assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29 2710 /// ``` positions<P>(self, predicate: P) -> Positions<Self, P> where P: Fn(Self::Item) -> bool + Sync + Send,2711 fn positions<P>(self, predicate: P) -> Positions<Self, P> 2712 where 2713 P: Fn(Self::Item) -> bool + Sync + Send, 2714 { 2715 Positions::new(self, predicate) 2716 } 2717 2718 /// Produces a new iterator with the elements of this iterator in 2719 /// reverse order. 2720 /// 2721 /// # Examples 2722 /// 2723 /// ``` 2724 /// use rayon::prelude::*; 2725 /// 2726 /// let result: Vec<_> = (0..5) 2727 /// .into_par_iter() 2728 /// .rev() 2729 /// .collect(); 2730 /// 2731 /// assert_eq!(result, [4, 3, 2, 1, 0]); 2732 /// ``` rev(self) -> Rev<Self>2733 fn rev(self) -> Rev<Self> { 2734 Rev::new(self) 2735 } 2736 2737 /// Sets the minimum length of iterators desired to process in each 2738 /// thread. Rayon will not split any smaller than this length, but 2739 /// of course an iterator could already be smaller to begin with. 2740 /// 2741 /// Producers like `zip` and `interleave` will use greater of the two 2742 /// minimums. 2743 /// Chained iterators and iterators inside `flat_map` may each use 2744 /// their own minimum length. 2745 /// 2746 /// # Examples 2747 /// 2748 /// ``` 2749 /// use rayon::prelude::*; 2750 /// 2751 /// let min = (0..1_000_000) 2752 /// .into_par_iter() 2753 /// .with_min_len(1234) 2754 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 2755 /// .min().unwrap(); 2756 /// 2757 /// assert!(min >= 1234); 2758 /// ``` with_min_len(self, min: usize) -> MinLen<Self>2759 fn with_min_len(self, min: usize) -> MinLen<Self> { 2760 MinLen::new(self, min) 2761 } 2762 2763 /// Sets the maximum length of iterators desired to process in each 2764 /// thread. Rayon will try to split at least below this length, 2765 /// unless that would put it below the length from `with_min_len()`. 2766 /// For example, given min=10 and max=15, a length of 16 will not be 2767 /// split any further. 2768 /// 2769 /// Producers like `zip` and `interleave` will use lesser of the two 2770 /// maximums. 2771 /// Chained iterators and iterators inside `flat_map` may each use 2772 /// their own maximum length. 2773 /// 2774 /// # Examples 2775 /// 2776 /// ``` 2777 /// use rayon::prelude::*; 2778 /// 2779 /// let max = (0..1_000_000) 2780 /// .into_par_iter() 2781 /// .with_max_len(1234) 2782 /// .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment 2783 /// .max().unwrap(); 2784 /// 2785 /// assert!(max <= 1234); 2786 /// ``` with_max_len(self, max: usize) -> MaxLen<Self>2787 fn with_max_len(self, max: usize) -> MaxLen<Self> { 2788 MaxLen::new(self, max) 2789 } 2790 2791 /// Produces an exact count of how many items this iterator will 2792 /// produce, presuming no panic occurs. 2793 /// 2794 /// # Examples 2795 /// 2796 /// ``` 2797 /// use rayon::prelude::*; 2798 /// 2799 /// let par_iter = (0..100).into_par_iter().zip(vec![0; 10]); 2800 /// assert_eq!(par_iter.len(), 10); 2801 /// 2802 /// let vec: Vec<_> = par_iter.collect(); 2803 /// assert_eq!(vec.len(), 10); 2804 /// ``` len(&self) -> usize2805 fn len(&self) -> usize; 2806 2807 /// Internal method used to define the behavior of this parallel 2808 /// iterator. You should not need to call this directly. 2809 /// 2810 /// This method causes the iterator `self` to start producing 2811 /// items and to feed them to the consumer `consumer` one by one. 2812 /// It may split the consumer before doing so to create the 2813 /// opportunity to produce in parallel. If a split does happen, it 2814 /// will inform the consumer of the index where the split should 2815 /// occur (unlike `ParallelIterator::drive_unindexed()`). 2816 /// 2817 /// See the [README] for more details on the internals of parallel 2818 /// iterators. 2819 /// 2820 /// [README]: README.md drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result2821 fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result; 2822 2823 /// Internal method used to define the behavior of this parallel 2824 /// iterator. You should not need to call this directly. 2825 /// 2826 /// This method converts the iterator into a producer P and then 2827 /// invokes `callback.callback()` with P. Note that the type of 2828 /// this producer is not defined as part of the API, since 2829 /// `callback` must be defined generically for all producers. This 2830 /// allows the producer type to contain references; it also means 2831 /// that parallel iterators can adjust that type without causing a 2832 /// breaking change. 2833 /// 2834 /// See the [README] for more details on the internals of parallel 2835 /// iterators. 2836 /// 2837 /// [README]: README.md with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output2838 fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output; 2839 } 2840 2841 /// `FromParallelIterator` implements the creation of a collection 2842 /// from a [`ParallelIterator`]. By implementing 2843 /// `FromParallelIterator` for a given type, you define how it will be 2844 /// created from an iterator. 2845 /// 2846 /// `FromParallelIterator` is used through [`ParallelIterator`]'s [`collect()`] method. 2847 /// 2848 /// [`ParallelIterator`]: trait.ParallelIterator.html 2849 /// [`collect()`]: trait.ParallelIterator.html#method.collect 2850 /// 2851 /// # Examples 2852 /// 2853 /// Implementing `FromParallelIterator` for your type: 2854 /// 2855 /// ``` 2856 /// use rayon::prelude::*; 2857 /// use std::mem; 2858 /// 2859 /// struct BlackHole { 2860 /// mass: usize, 2861 /// } 2862 /// 2863 /// impl<T: Send> FromParallelIterator<T> for BlackHole { 2864 /// fn from_par_iter<I>(par_iter: I) -> Self 2865 /// where I: IntoParallelIterator<Item = T> 2866 /// { 2867 /// let par_iter = par_iter.into_par_iter(); 2868 /// BlackHole { 2869 /// mass: par_iter.count() * mem::size_of::<T>(), 2870 /// } 2871 /// } 2872 /// } 2873 /// 2874 /// let bh: BlackHole = (0i32..1000).into_par_iter().collect(); 2875 /// assert_eq!(bh.mass, 4000); 2876 /// ``` 2877 pub trait FromParallelIterator<T> 2878 where 2879 T: Send, 2880 { 2881 /// Creates an instance of the collection from the parallel iterator `par_iter`. 2882 /// 2883 /// If your collection is not naturally parallel, the easiest (and 2884 /// fastest) way to do this is often to collect `par_iter` into a 2885 /// [`LinkedList`] or other intermediate data structure and then 2886 /// sequentially extend your collection. However, a more 'native' 2887 /// technique is to use the [`par_iter.fold`] or 2888 /// [`par_iter.fold_with`] methods to create the collection. 2889 /// Alternatively, if your collection is 'natively' parallel, you 2890 /// can use `par_iter.for_each` to process each element in turn. 2891 /// 2892 /// [`LinkedList`]: https://doc.rust-lang.org/std/collections/struct.LinkedList.html 2893 /// [`par_iter.fold`]: trait.ParallelIterator.html#method.fold 2894 /// [`par_iter.fold_with`]: trait.ParallelIterator.html#method.fold_with 2895 /// [`par_iter.for_each`]: trait.ParallelIterator.html#method.for_each from_par_iter<I>(par_iter: I) -> Self where I: IntoParallelIterator<Item = T>2896 fn from_par_iter<I>(par_iter: I) -> Self 2897 where 2898 I: IntoParallelIterator<Item = T>; 2899 } 2900 2901 /// `ParallelExtend` extends an existing collection with items from a [`ParallelIterator`]. 2902 /// 2903 /// [`ParallelIterator`]: trait.ParallelIterator.html 2904 /// 2905 /// # Examples 2906 /// 2907 /// Implementing `ParallelExtend` for your type: 2908 /// 2909 /// ``` 2910 /// use rayon::prelude::*; 2911 /// use std::mem; 2912 /// 2913 /// struct BlackHole { 2914 /// mass: usize, 2915 /// } 2916 /// 2917 /// impl<T: Send> ParallelExtend<T> for BlackHole { 2918 /// fn par_extend<I>(&mut self, par_iter: I) 2919 /// where I: IntoParallelIterator<Item = T> 2920 /// { 2921 /// let par_iter = par_iter.into_par_iter(); 2922 /// self.mass += par_iter.count() * mem::size_of::<T>(); 2923 /// } 2924 /// } 2925 /// 2926 /// let mut bh = BlackHole { mass: 0 }; 2927 /// bh.par_extend(0i32..1000); 2928 /// assert_eq!(bh.mass, 4000); 2929 /// bh.par_extend(0i64..10); 2930 /// assert_eq!(bh.mass, 4080); 2931 /// ``` 2932 pub trait ParallelExtend<T> 2933 where 2934 T: Send, 2935 { 2936 /// Extends an instance of the collection with the elements drawn 2937 /// from the parallel iterator `par_iter`. 2938 /// 2939 /// # Examples 2940 /// 2941 /// ``` 2942 /// use rayon::prelude::*; 2943 /// 2944 /// let mut vec = vec![]; 2945 /// vec.par_extend(0..5); 2946 /// vec.par_extend((0..5).into_par_iter().map(|i| i * i)); 2947 /// assert_eq!(vec, [0, 1, 2, 3, 4, 0, 1, 4, 9, 16]); 2948 /// ``` par_extend<I>(&mut self, par_iter: I) where I: IntoParallelIterator<Item = T>2949 fn par_extend<I>(&mut self, par_iter: I) 2950 where 2951 I: IntoParallelIterator<Item = T>; 2952 } 2953 2954 /// `ParallelDrainFull` creates a parallel iterator that moves all items 2955 /// from a collection while retaining the original capacity. 2956 /// 2957 /// Types which are indexable typically implement [`ParallelDrainRange`] 2958 /// instead, where you can drain fully with `par_drain(..)`. 2959 /// 2960 /// [`ParallelDrainRange`]: trait.ParallelDrainRange.html 2961 pub trait ParallelDrainFull { 2962 /// The draining parallel iterator type that will be created. 2963 type Iter: ParallelIterator<Item = Self::Item>; 2964 2965 /// The type of item that the parallel iterator will produce. 2966 /// This is usually the same as `IntoParallelIterator::Item`. 2967 type Item: Send; 2968 2969 /// Returns a draining parallel iterator over an entire collection. 2970 /// 2971 /// When the iterator is dropped, all items are removed, even if the 2972 /// iterator was not fully consumed. If the iterator is leaked, for example 2973 /// using `std::mem::forget`, it is unspecified how many items are removed. 2974 /// 2975 /// # Examples 2976 /// 2977 /// ``` 2978 /// use rayon::prelude::*; 2979 /// use std::collections::{BinaryHeap, HashSet}; 2980 /// 2981 /// let squares: HashSet<i32> = (0..10).map(|x| x * x).collect(); 2982 /// 2983 /// let mut heap: BinaryHeap<_> = squares.iter().copied().collect(); 2984 /// assert_eq!( 2985 /// // heaps are drained in arbitrary order 2986 /// heap.par_drain() 2987 /// .inspect(|x| assert!(squares.contains(x))) 2988 /// .count(), 2989 /// squares.len(), 2990 /// ); 2991 /// assert!(heap.is_empty()); 2992 /// assert!(heap.capacity() >= squares.len()); 2993 /// ``` par_drain(self) -> Self::Iter2994 fn par_drain(self) -> Self::Iter; 2995 } 2996 2997 /// `ParallelDrainRange` creates a parallel iterator that moves a range of items 2998 /// from a collection while retaining the original capacity. 2999 /// 3000 /// Types which are not indexable may implement [`ParallelDrainFull`] instead. 3001 /// 3002 /// [`ParallelDrainFull`]: trait.ParallelDrainFull.html 3003 pub trait ParallelDrainRange<Idx = usize> { 3004 /// The draining parallel iterator type that will be created. 3005 type Iter: ParallelIterator<Item = Self::Item>; 3006 3007 /// The type of item that the parallel iterator will produce. 3008 /// This is usually the same as `IntoParallelIterator::Item`. 3009 type Item: Send; 3010 3011 /// Returns a draining parallel iterator over a range of the collection. 3012 /// 3013 /// When the iterator is dropped, all items in the range are removed, even 3014 /// if the iterator was not fully consumed. If the iterator is leaked, for 3015 /// example using `std::mem::forget`, it is unspecified how many items are 3016 /// removed. 3017 /// 3018 /// # Examples 3019 /// 3020 /// ``` 3021 /// use rayon::prelude::*; 3022 /// 3023 /// let squares: Vec<i32> = (0..10).map(|x| x * x).collect(); 3024 /// 3025 /// println!("RangeFull"); 3026 /// let mut vec = squares.clone(); 3027 /// assert!(vec.par_drain(..) 3028 /// .eq(squares.par_iter().copied())); 3029 /// assert!(vec.is_empty()); 3030 /// assert!(vec.capacity() >= squares.len()); 3031 /// 3032 /// println!("RangeFrom"); 3033 /// let mut vec = squares.clone(); 3034 /// assert!(vec.par_drain(5..) 3035 /// .eq(squares[5..].par_iter().copied())); 3036 /// assert_eq!(&vec[..], &squares[..5]); 3037 /// assert!(vec.capacity() >= squares.len()); 3038 /// 3039 /// println!("RangeTo"); 3040 /// let mut vec = squares.clone(); 3041 /// assert!(vec.par_drain(..5) 3042 /// .eq(squares[..5].par_iter().copied())); 3043 /// assert_eq!(&vec[..], &squares[5..]); 3044 /// assert!(vec.capacity() >= squares.len()); 3045 /// 3046 /// println!("RangeToInclusive"); 3047 /// let mut vec = squares.clone(); 3048 /// assert!(vec.par_drain(..=5) 3049 /// .eq(squares[..=5].par_iter().copied())); 3050 /// assert_eq!(&vec[..], &squares[6..]); 3051 /// assert!(vec.capacity() >= squares.len()); 3052 /// 3053 /// println!("Range"); 3054 /// let mut vec = squares.clone(); 3055 /// assert!(vec.par_drain(3..7) 3056 /// .eq(squares[3..7].par_iter().copied())); 3057 /// assert_eq!(&vec[..3], &squares[..3]); 3058 /// assert_eq!(&vec[3..], &squares[7..]); 3059 /// assert!(vec.capacity() >= squares.len()); 3060 /// 3061 /// println!("RangeInclusive"); 3062 /// let mut vec = squares.clone(); 3063 /// assert!(vec.par_drain(3..=7) 3064 /// .eq(squares[3..=7].par_iter().copied())); 3065 /// assert_eq!(&vec[..3], &squares[..3]); 3066 /// assert_eq!(&vec[3..], &squares[8..]); 3067 /// assert!(vec.capacity() >= squares.len()); 3068 /// ``` par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter3069 fn par_drain<R: RangeBounds<Idx>>(self, range: R) -> Self::Iter; 3070 } 3071 3072 /// We hide the `Try` trait in a private module, as it's only meant to be a 3073 /// stable clone of the standard library's `Try` trait, as yet unstable. 3074 mod private { 3075 /// Clone of `std::ops::Try`. 3076 /// 3077 /// Implementing this trait is not permitted outside of `rayon`. 3078 pub trait Try { 3079 private_decl! {} 3080 3081 type Ok; 3082 type Error; into_result(self) -> Result<Self::Ok, Self::Error>3083 fn into_result(self) -> Result<Self::Ok, Self::Error>; from_ok(v: Self::Ok) -> Self3084 fn from_ok(v: Self::Ok) -> Self; from_error(v: Self::Error) -> Self3085 fn from_error(v: Self::Error) -> Self; 3086 } 3087 3088 impl<T> Try for Option<T> { 3089 private_impl! {} 3090 3091 type Ok = T; 3092 type Error = (); 3093 into_result(self) -> Result<T, ()>3094 fn into_result(self) -> Result<T, ()> { 3095 self.ok_or(()) 3096 } from_ok(v: T) -> Self3097 fn from_ok(v: T) -> Self { 3098 Some(v) 3099 } from_error(_: ()) -> Self3100 fn from_error(_: ()) -> Self { 3101 None 3102 } 3103 } 3104 3105 impl<T, E> Try for Result<T, E> { 3106 private_impl! {} 3107 3108 type Ok = T; 3109 type Error = E; 3110 into_result(self) -> Result<T, E>3111 fn into_result(self) -> Result<T, E> { 3112 self 3113 } from_ok(v: T) -> Self3114 fn from_ok(v: T) -> Self { 3115 Ok(v) 3116 } from_error(v: E) -> Self3117 fn from_error(v: E) -> Self { 3118 Err(v) 3119 } 3120 } 3121 } 3122