• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! A priority queue implemented with a binary heap.
2 //!
3 //! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
4 //! Checking the largest element is *O*(1). Converting a vector to a binary heap
5 //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
6 //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
7 //! in-place heapsort.
8 //!
9 //! # Examples
10 //!
11 //! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
12 //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
13 //! It shows how to use [`BinaryHeap`] with custom types.
14 //!
15 //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
16 //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
17 //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
18 //!
19 //! ```
20 //! use std::cmp::Ordering;
21 //! use std::collections::BinaryHeap;
22 //!
23 //! #[derive(Copy, Clone, Eq, PartialEq)]
24 //! struct State {
25 //!     cost: usize,
26 //!     position: usize,
27 //! }
28 //!
29 //! // The priority queue depends on `Ord`.
30 //! // Explicitly implement the trait so the queue becomes a min-heap
31 //! // instead of a max-heap.
32 //! impl Ord for State {
33 //!     fn cmp(&self, other: &Self) -> Ordering {
34 //!         // Notice that the we flip the ordering on costs.
35 //!         // In case of a tie we compare positions - this step is necessary
36 //!         // to make implementations of `PartialEq` and `Ord` consistent.
37 //!         other.cost.cmp(&self.cost)
38 //!             .then_with(|| self.position.cmp(&other.position))
39 //!     }
40 //! }
41 //!
42 //! // `PartialOrd` needs to be implemented as well.
43 //! impl PartialOrd for State {
44 //!     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
45 //!         Some(self.cmp(other))
46 //!     }
47 //! }
48 //!
49 //! // Each node is represented as a `usize`, for a shorter implementation.
50 //! struct Edge {
51 //!     node: usize,
52 //!     cost: usize,
53 //! }
54 //!
55 //! // Dijkstra's shortest path algorithm.
56 //!
57 //! // Start at `start` and use `dist` to track the current shortest distance
58 //! // to each node. This implementation isn't memory-efficient as it may leave duplicate
59 //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
60 //! // for a simpler implementation.
61 //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
62 //!     // dist[node] = current shortest distance from `start` to `node`
63 //!     let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
64 //!
65 //!     let mut heap = BinaryHeap::new();
66 //!
67 //!     // We're at `start`, with a zero cost
68 //!     dist[start] = 0;
69 //!     heap.push(State { cost: 0, position: start });
70 //!
71 //!     // Examine the frontier with lower cost nodes first (min-heap)
72 //!     while let Some(State { cost, position }) = heap.pop() {
73 //!         // Alternatively we could have continued to find all shortest paths
74 //!         if position == goal { return Some(cost); }
75 //!
76 //!         // Important as we may have already found a better way
77 //!         if cost > dist[position] { continue; }
78 //!
79 //!         // For each node we can reach, see if we can find a way with
80 //!         // a lower cost going through this node
81 //!         for edge in &adj_list[position] {
82 //!             let next = State { cost: cost + edge.cost, position: edge.node };
83 //!
84 //!             // If so, add it to the frontier and continue
85 //!             if next.cost < dist[next.position] {
86 //!                 heap.push(next);
87 //!                 // Relaxation, we have now found a better way
88 //!                 dist[next.position] = next.cost;
89 //!             }
90 //!         }
91 //!     }
92 //!
93 //!     // Goal not reachable
94 //!     None
95 //! }
96 //!
97 //! fn main() {
98 //!     // This is the directed graph we're going to use.
99 //!     // The node numbers correspond to the different states,
100 //!     // and the edge weights symbolize the cost of moving
101 //!     // from one node to another.
102 //!     // Note that the edges are one-way.
103 //!     //
104 //!     //                  7
105 //!     //          +-----------------+
106 //!     //          |                 |
107 //!     //          v   1        2    |  2
108 //!     //          0 -----> 1 -----> 3 ---> 4
109 //!     //          |        ^        ^      ^
110 //!     //          |        | 1      |      |
111 //!     //          |        |        | 3    | 1
112 //!     //          +------> 2 -------+      |
113 //!     //           10      |               |
114 //!     //                   +---------------+
115 //!     //
116 //!     // The graph is represented as an adjacency list where each index,
117 //!     // corresponding to a node value, has a list of outgoing edges.
118 //!     // Chosen for its efficiency.
119 //!     let graph = vec![
120 //!         // Node 0
121 //!         vec![Edge { node: 2, cost: 10 },
122 //!              Edge { node: 1, cost: 1 }],
123 //!         // Node 1
124 //!         vec![Edge { node: 3, cost: 2 }],
125 //!         // Node 2
126 //!         vec![Edge { node: 1, cost: 1 },
127 //!              Edge { node: 3, cost: 3 },
128 //!              Edge { node: 4, cost: 1 }],
129 //!         // Node 3
130 //!         vec![Edge { node: 0, cost: 7 },
131 //!              Edge { node: 4, cost: 2 }],
132 //!         // Node 4
133 //!         vec![]];
134 //!
135 //!     assert_eq!(shortest_path(&graph, 0, 1), Some(1));
136 //!     assert_eq!(shortest_path(&graph, 0, 3), Some(3));
137 //!     assert_eq!(shortest_path(&graph, 3, 0), Some(7));
138 //!     assert_eq!(shortest_path(&graph, 0, 4), Some(5));
139 //!     assert_eq!(shortest_path(&graph, 4, 0), None);
140 //! }
141 //! ```
142 
143 #![allow(missing_docs)]
144 #![stable(feature = "rust1", since = "1.0.0")]
145 
146 use core::alloc::Allocator;
147 use core::fmt;
148 use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
149 use core::mem::{self, swap, ManuallyDrop};
150 use core::num::NonZeroUsize;
151 use core::ops::{Deref, DerefMut};
152 use core::ptr;
153 
154 use crate::alloc::Global;
155 use crate::collections::TryReserveError;
156 use crate::slice;
157 use crate::vec::{self, AsVecIntoIter, Vec};
158 
159 #[cfg(test)]
160 mod tests;
161 
162 /// A priority queue implemented with a binary heap.
163 ///
164 /// This will be a max-heap.
165 ///
166 /// It is a logic error for an item to be modified in such a way that the
167 /// item's ordering relative to any other item, as determined by the [`Ord`]
168 /// trait, changes while it is in the heap. This is normally only possible
169 /// through interior mutability, global state, I/O, or unsafe code. The
170 /// behavior resulting from such a logic error is not specified, but will
171 /// be encapsulated to the `BinaryHeap` that observed the logic error and not
172 /// result in undefined behavior. This could include panics, incorrect results,
173 /// aborts, memory leaks, and non-termination.
174 ///
175 /// As long as no elements change their relative order while being in the heap
176 /// as described above, the API of `BinaryHeap` guarantees that the heap
177 /// invariant remains intact i.e. its methods all behave as documented. For
178 /// example if a method is documented as iterating in sorted order, that's
179 /// guaranteed to work as long as elements in the heap have not changed order,
180 /// even in the presence of closures getting unwinded out of, iterators getting
181 /// leaked, and similar foolishness.
182 ///
183 /// # Examples
184 ///
185 /// ```
186 /// use std::collections::BinaryHeap;
187 ///
188 /// // Type inference lets us omit an explicit type signature (which
189 /// // would be `BinaryHeap<i32>` in this example).
190 /// let mut heap = BinaryHeap::new();
191 ///
192 /// // We can use peek to look at the next item in the heap. In this case,
193 /// // there's no items in there yet so we get None.
194 /// assert_eq!(heap.peek(), None);
195 ///
196 /// // Let's add some scores...
197 /// heap.push(1);
198 /// heap.push(5);
199 /// heap.push(2);
200 ///
201 /// // Now peek shows the most important item in the heap.
202 /// assert_eq!(heap.peek(), Some(&5));
203 ///
204 /// // We can check the length of a heap.
205 /// assert_eq!(heap.len(), 3);
206 ///
207 /// // We can iterate over the items in the heap, although they are returned in
208 /// // a random order.
209 /// for x in &heap {
210 ///     println!("{x}");
211 /// }
212 ///
213 /// // If we instead pop these scores, they should come back in order.
214 /// assert_eq!(heap.pop(), Some(5));
215 /// assert_eq!(heap.pop(), Some(2));
216 /// assert_eq!(heap.pop(), Some(1));
217 /// assert_eq!(heap.pop(), None);
218 ///
219 /// // We can clear the heap of any remaining items.
220 /// heap.clear();
221 ///
222 /// // The heap should now be empty.
223 /// assert!(heap.is_empty())
224 /// ```
225 ///
226 /// A `BinaryHeap` with a known list of items can be initialized from an array:
227 ///
228 /// ```
229 /// use std::collections::BinaryHeap;
230 ///
231 /// let heap = BinaryHeap::from([1, 5, 2]);
232 /// ```
233 ///
234 /// ## Min-heap
235 ///
236 /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
237 /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
238 /// value instead of the greatest one.
239 ///
240 /// ```
241 /// use std::collections::BinaryHeap;
242 /// use std::cmp::Reverse;
243 ///
244 /// let mut heap = BinaryHeap::new();
245 ///
246 /// // Wrap values in `Reverse`
247 /// heap.push(Reverse(1));
248 /// heap.push(Reverse(5));
249 /// heap.push(Reverse(2));
250 ///
251 /// // If we pop these scores now, they should come back in the reverse order.
252 /// assert_eq!(heap.pop(), Some(Reverse(1)));
253 /// assert_eq!(heap.pop(), Some(Reverse(2)));
254 /// assert_eq!(heap.pop(), Some(Reverse(5)));
255 /// assert_eq!(heap.pop(), None);
256 /// ```
257 ///
258 /// # Time complexity
259 ///
260 /// | [push]  | [pop]         | [peek]/[peek\_mut] |
261 /// |---------|---------------|--------------------|
262 /// | *O*(1)~ | *O*(log(*n*)) | *O*(1)             |
263 ///
264 /// The value for `push` is an expected cost; the method documentation gives a
265 /// more detailed analysis.
266 ///
267 /// [`core::cmp::Reverse`]: core::cmp::Reverse
268 /// [`Cell`]: core::cell::Cell
269 /// [`RefCell`]: core::cell::RefCell
270 /// [push]: BinaryHeap::push
271 /// [pop]: BinaryHeap::pop
272 /// [peek]: BinaryHeap::peek
273 /// [peek\_mut]: BinaryHeap::peek_mut
274 #[stable(feature = "rust1", since = "1.0.0")]
275 #[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
276 pub struct BinaryHeap<
277     T,
278     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
279 > {
280     data: Vec<T, A>,
281 }
282 
283 /// Structure wrapping a mutable reference to the greatest item on a
284 /// `BinaryHeap`.
285 ///
286 /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
287 /// its documentation for more.
288 ///
289 /// [`peek_mut`]: BinaryHeap::peek_mut
290 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
291 pub struct PeekMut<
292     'a,
293     T: 'a + Ord,
294     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
295 > {
296     heap: &'a mut BinaryHeap<T, A>,
297     // If a set_len + sift_down are required, this is Some. If a &mut T has not
298     // yet been exposed to peek_mut()'s caller, it's None.
299     original_len: Option<NonZeroUsize>,
300 }
301 
302 #[stable(feature = "collection_debug", since = "1.17.0")]
303 impl<T: Ord + fmt::Debug, A: Allocator> fmt::Debug for PeekMut<'_, T, A> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result304     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
305         f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
306     }
307 }
308 
309 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
310 impl<T: Ord, A: Allocator> Drop for PeekMut<'_, T, A> {
drop(&mut self)311     fn drop(&mut self) {
312         if let Some(original_len) = self.original_len {
313             // SAFETY: That's how many elements were in the Vec at the time of
314             // the PeekMut::deref_mut call, and therefore also at the time of
315             // the BinaryHeap::peek_mut call. Since the PeekMut did not end up
316             // getting leaked, we are now undoing the leak amplification that
317             // the DerefMut prepared for.
318             unsafe { self.heap.data.set_len(original_len.get()) };
319 
320             // SAFETY: PeekMut is only instantiated for non-empty heaps.
321             unsafe { self.heap.sift_down(0) };
322         }
323     }
324 }
325 
326 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
327 impl<T: Ord, A: Allocator> Deref for PeekMut<'_, T, A> {
328     type Target = T;
deref(&self) -> &T329     fn deref(&self) -> &T {
330         debug_assert!(!self.heap.is_empty());
331         // SAFE: PeekMut is only instantiated for non-empty heaps
332         unsafe { self.heap.data.get_unchecked(0) }
333     }
334 }
335 
336 #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
337 impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> {
deref_mut(&mut self) -> &mut T338     fn deref_mut(&mut self) -> &mut T {
339         debug_assert!(!self.heap.is_empty());
340 
341         let len = self.heap.len();
342         if len > 1 {
343             // Here we preemptively leak all the rest of the underlying vector
344             // after the currently max element. If the caller mutates the &mut T
345             // we're about to give them, and then leaks the PeekMut, all these
346             // elements will remain leaked. If they don't leak the PeekMut, then
347             // either Drop or PeekMut::pop will un-leak the vector elements.
348             //
349             // This is technique is described throughout several other places in
350             // the standard library as "leak amplification".
351             unsafe {
352                 // SAFETY: len > 1 so len != 0.
353                 self.original_len = Some(NonZeroUsize::new_unchecked(len));
354                 // SAFETY: len > 1 so all this does for now is leak elements,
355                 // which is safe.
356                 self.heap.data.set_len(1);
357             }
358         }
359 
360         // SAFE: PeekMut is only instantiated for non-empty heaps
361         unsafe { self.heap.data.get_unchecked_mut(0) }
362     }
363 }
364 
365 impl<'a, T: Ord, A: Allocator> PeekMut<'a, T, A> {
366     /// Removes the peeked value from the heap and returns it.
367     #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
pop(mut this: PeekMut<'a, T, A>) -> T368     pub fn pop(mut this: PeekMut<'a, T, A>) -> T {
369         if let Some(original_len) = this.original_len.take() {
370             // SAFETY: This is how many elements were in the Vec at the time of
371             // the BinaryHeap::peek_mut call.
372             unsafe { this.heap.data.set_len(original_len.get()) };
373 
374             // Unlike in Drop, here we don't also need to do a sift_down even if
375             // the caller could've mutated the element. It is removed from the
376             // heap on the next line and pop() is not sensitive to its value.
377         }
378         this.heap.pop().unwrap()
379     }
380 }
381 
382 #[stable(feature = "rust1", since = "1.0.0")]
383 impl<T: Clone, A: Allocator + Clone> Clone for BinaryHeap<T, A> {
clone(&self) -> Self384     fn clone(&self) -> Self {
385         BinaryHeap { data: self.data.clone() }
386     }
387 
clone_from(&mut self, source: &Self)388     fn clone_from(&mut self, source: &Self) {
389         self.data.clone_from(&source.data);
390     }
391 }
392 
393 #[stable(feature = "rust1", since = "1.0.0")]
394 impl<T: Ord> Default for BinaryHeap<T> {
395     /// Creates an empty `BinaryHeap<T>`.
396     #[inline]
default() -> BinaryHeap<T>397     fn default() -> BinaryHeap<T> {
398         BinaryHeap::new()
399     }
400 }
401 
402 #[stable(feature = "binaryheap_debug", since = "1.4.0")]
403 impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result404     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
405         f.debug_list().entries(self.iter()).finish()
406     }
407 }
408 
409 struct RebuildOnDrop<
410     'a,
411     T: Ord,
412     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
413 > {
414     heap: &'a mut BinaryHeap<T, A>,
415     rebuild_from: usize,
416 }
417 
418 impl<T: Ord, A: Allocator> Drop for RebuildOnDrop<'_, T, A> {
drop(&mut self)419     fn drop(&mut self) {
420         self.heap.rebuild_tail(self.rebuild_from);
421     }
422 }
423 
424 impl<T: Ord> BinaryHeap<T> {
425     /// Creates an empty `BinaryHeap` as a max-heap.
426     ///
427     /// # Examples
428     ///
429     /// Basic usage:
430     ///
431     /// ```
432     /// use std::collections::BinaryHeap;
433     /// let mut heap = BinaryHeap::new();
434     /// heap.push(4);
435     /// ```
436     #[stable(feature = "rust1", since = "1.0.0")]
437     #[must_use]
new() -> BinaryHeap<T>438     pub fn new() -> BinaryHeap<T> {
439         BinaryHeap { data: vec![] }
440     }
441 
442     /// Creates an empty `BinaryHeap` with at least the specified capacity.
443     ///
444     /// The binary heap will be able to hold at least `capacity` elements without
445     /// reallocating. This method is allowed to allocate for more elements than
446     /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
447     ///
448     /// # Examples
449     ///
450     /// Basic usage:
451     ///
452     /// ```
453     /// use std::collections::BinaryHeap;
454     /// let mut heap = BinaryHeap::with_capacity(10);
455     /// heap.push(4);
456     /// ```
457     #[stable(feature = "rust1", since = "1.0.0")]
458     #[must_use]
with_capacity(capacity: usize) -> BinaryHeap<T>459     pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
460         BinaryHeap { data: Vec::with_capacity(capacity) }
461     }
462 }
463 
464 impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
465     /// Creates an empty `BinaryHeap` as a max-heap, using `A` as allocator.
466     ///
467     /// # Examples
468     ///
469     /// Basic usage:
470     ///
471     /// ```
472     /// #![feature(allocator_api)]
473     ///
474     /// use std::alloc::System;
475     /// use std::collections::BinaryHeap;
476     /// let mut heap = BinaryHeap::new_in(System);
477     /// heap.push(4);
478     /// ```
479     #[unstable(feature = "allocator_api", issue = "32838")]
480     #[must_use]
new_in(alloc: A) -> BinaryHeap<T, A>481     pub fn new_in(alloc: A) -> BinaryHeap<T, A> {
482         BinaryHeap { data: Vec::new_in(alloc) }
483     }
484 
485     /// Creates an empty `BinaryHeap` with at least the specified capacity, using `A` as allocator.
486     ///
487     /// The binary heap will be able to hold at least `capacity` elements without
488     /// reallocating. This method is allowed to allocate for more elements than
489     /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
490     ///
491     /// # Examples
492     ///
493     /// Basic usage:
494     ///
495     /// ```
496     /// #![feature(allocator_api)]
497     ///
498     /// use std::alloc::System;
499     /// use std::collections::BinaryHeap;
500     /// let mut heap = BinaryHeap::with_capacity_in(10, System);
501     /// heap.push(4);
502     /// ```
503     #[unstable(feature = "allocator_api", issue = "32838")]
504     #[must_use]
with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A>505     pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A> {
506         BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) }
507     }
508 
509     /// Returns a mutable reference to the greatest item in the binary heap, or
510     /// `None` if it is empty.
511     ///
512     /// Note: If the `PeekMut` value is leaked, some heap elements might get
513     /// leaked along with it, but the remaining elements will remain a valid
514     /// heap.
515     ///
516     /// # Examples
517     ///
518     /// Basic usage:
519     ///
520     /// ```
521     /// use std::collections::BinaryHeap;
522     /// let mut heap = BinaryHeap::new();
523     /// assert!(heap.peek_mut().is_none());
524     ///
525     /// heap.push(1);
526     /// heap.push(5);
527     /// heap.push(2);
528     /// {
529     ///     let mut val = heap.peek_mut().unwrap();
530     ///     *val = 0;
531     /// }
532     /// assert_eq!(heap.peek(), Some(&2));
533     /// ```
534     ///
535     /// # Time complexity
536     ///
537     /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
538     /// otherwise it's *O*(1).
539     #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
peek_mut(&mut self) -> Option<PeekMut<'_, T, A>>540     pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> {
541         if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) }
542     }
543 
544     /// Removes the greatest item from the binary heap and returns it, or `None` if it
545     /// is empty.
546     ///
547     /// # Examples
548     ///
549     /// Basic usage:
550     ///
551     /// ```
552     /// use std::collections::BinaryHeap;
553     /// let mut heap = BinaryHeap::from([1, 3]);
554     ///
555     /// assert_eq!(heap.pop(), Some(3));
556     /// assert_eq!(heap.pop(), Some(1));
557     /// assert_eq!(heap.pop(), None);
558     /// ```
559     ///
560     /// # Time complexity
561     ///
562     /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
563     #[stable(feature = "rust1", since = "1.0.0")]
pop(&mut self) -> Option<T>564     pub fn pop(&mut self) -> Option<T> {
565         self.data.pop().map(|mut item| {
566             if !self.is_empty() {
567                 swap(&mut item, &mut self.data[0]);
568                 // SAFETY: !self.is_empty() means that self.len() > 0
569                 unsafe { self.sift_down_to_bottom(0) };
570             }
571             item
572         })
573     }
574 
575     /// Pushes an item onto the binary heap.
576     ///
577     /// # Examples
578     ///
579     /// Basic usage:
580     ///
581     /// ```
582     /// use std::collections::BinaryHeap;
583     /// let mut heap = BinaryHeap::new();
584     /// heap.push(3);
585     /// heap.push(5);
586     /// heap.push(1);
587     ///
588     /// assert_eq!(heap.len(), 3);
589     /// assert_eq!(heap.peek(), Some(&5));
590     /// ```
591     ///
592     /// # Time complexity
593     ///
594     /// The expected cost of `push`, averaged over every possible ordering of
595     /// the elements being pushed, and over a sufficiently large number of
596     /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
597     /// elements that are *not* already in any sorted pattern.
598     ///
599     /// The time complexity degrades if elements are pushed in predominantly
600     /// ascending order. In the worst case, elements are pushed in ascending
601     /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
602     /// containing *n* elements.
603     ///
604     /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
605     /// occurs when capacity is exhausted and needs a resize. The resize cost
606     /// has been amortized in the previous figures.
607     #[stable(feature = "rust1", since = "1.0.0")]
push(&mut self, item: T)608     pub fn push(&mut self, item: T) {
609         let old_len = self.len();
610         self.data.push(item);
611         // SAFETY: Since we pushed a new item it means that
612         //  old_len = self.len() - 1 < self.len()
613         unsafe { self.sift_up(0, old_len) };
614     }
615 
616     /// Consumes the `BinaryHeap` and returns a vector in sorted
617     /// (ascending) order.
618     ///
619     /// # Examples
620     ///
621     /// Basic usage:
622     ///
623     /// ```
624     /// use std::collections::BinaryHeap;
625     ///
626     /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
627     /// heap.push(6);
628     /// heap.push(3);
629     ///
630     /// let vec = heap.into_sorted_vec();
631     /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
632     /// ```
633     #[must_use = "`self` will be dropped if the result is not used"]
634     #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
into_sorted_vec(mut self) -> Vec<T, A>635     pub fn into_sorted_vec(mut self) -> Vec<T, A> {
636         let mut end = self.len();
637         while end > 1 {
638             end -= 1;
639             // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
640             //  so it's always a valid index to access.
641             //  It is safe to access index 0 (i.e. `ptr`), because
642             //  1 <= end < self.len(), which means self.len() >= 2.
643             unsafe {
644                 let ptr = self.data.as_mut_ptr();
645                 ptr::swap(ptr, ptr.add(end));
646             }
647             // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
648             //  0 < 1 <= end <= self.len() - 1 < self.len()
649             //  Which means 0 < end and end < self.len().
650             unsafe { self.sift_down_range(0, end) };
651         }
652         self.into_vec()
653     }
654 
655     // The implementations of sift_up and sift_down use unsafe blocks in
656     // order to move an element out of the vector (leaving behind a
657     // hole), shift along the others and move the removed element back into the
658     // vector at the final location of the hole.
659     // The `Hole` type is used to represent this, and make sure
660     // the hole is filled back at the end of its scope, even on panic.
661     // Using a hole reduces the constant factor compared to using swaps,
662     // which involves twice as many moves.
663 
664     /// # Safety
665     ///
666     /// The caller must guarantee that `pos < self.len()`.
sift_up(&mut self, start: usize, pos: usize) -> usize667     unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
668         // Take out the value at `pos` and create a hole.
669         // SAFETY: The caller guarantees that pos < self.len()
670         let mut hole = unsafe { Hole::new(&mut self.data, pos) };
671 
672         while hole.pos() > start {
673             let parent = (hole.pos() - 1) / 2;
674 
675             // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
676             //  and so hole.pos() - 1 can't underflow.
677             //  This guarantees that parent < hole.pos() so
678             //  it's a valid index and also != hole.pos().
679             if hole.element() <= unsafe { hole.get(parent) } {
680                 break;
681             }
682 
683             // SAFETY: Same as above
684             unsafe { hole.move_to(parent) };
685         }
686 
687         hole.pos()
688     }
689 
690     /// Take an element at `pos` and move it down the heap,
691     /// while its children are larger.
692     ///
693     /// # Safety
694     ///
695     /// The caller must guarantee that `pos < end <= self.len()`.
sift_down_range(&mut self, pos: usize, end: usize)696     unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
697         // SAFETY: The caller guarantees that pos < end <= self.len().
698         let mut hole = unsafe { Hole::new(&mut self.data, pos) };
699         let mut child = 2 * hole.pos() + 1;
700 
701         // Loop invariant: child == 2 * hole.pos() + 1.
702         while child <= end.saturating_sub(2) {
703             // compare with the greater of the two children
704             // SAFETY: child < end - 1 < self.len() and
705             //  child + 1 < end <= self.len(), so they're valid indexes.
706             //  child == 2 * hole.pos() + 1 != hole.pos() and
707             //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
708             // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
709             //  if T is a ZST
710             child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
711 
712             // if we are already in order, stop.
713             // SAFETY: child is now either the old child or the old child+1
714             //  We already proven that both are < self.len() and != hole.pos()
715             if hole.element() >= unsafe { hole.get(child) } {
716                 return;
717             }
718 
719             // SAFETY: same as above.
720             unsafe { hole.move_to(child) };
721             child = 2 * hole.pos() + 1;
722         }
723 
724         // SAFETY: && short circuit, which means that in the
725         //  second condition it's already true that child == end - 1 < self.len().
726         if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
727             // SAFETY: child is already proven to be a valid index and
728             //  child == 2 * hole.pos() + 1 != hole.pos().
729             unsafe { hole.move_to(child) };
730         }
731     }
732 
733     /// # Safety
734     ///
735     /// The caller must guarantee that `pos < self.len()`.
sift_down(&mut self, pos: usize)736     unsafe fn sift_down(&mut self, pos: usize) {
737         let len = self.len();
738         // SAFETY: pos < len is guaranteed by the caller and
739         //  obviously len = self.len() <= self.len().
740         unsafe { self.sift_down_range(pos, len) };
741     }
742 
743     /// Take an element at `pos` and move it all the way down the heap,
744     /// then sift it up to its position.
745     ///
746     /// Note: This is faster when the element is known to be large / should
747     /// be closer to the bottom.
748     ///
749     /// # Safety
750     ///
751     /// The caller must guarantee that `pos < self.len()`.
sift_down_to_bottom(&mut self, mut pos: usize)752     unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
753         let end = self.len();
754         let start = pos;
755 
756         // SAFETY: The caller guarantees that pos < self.len().
757         let mut hole = unsafe { Hole::new(&mut self.data, pos) };
758         let mut child = 2 * hole.pos() + 1;
759 
760         // Loop invariant: child == 2 * hole.pos() + 1.
761         while child <= end.saturating_sub(2) {
762             // SAFETY: child < end - 1 < self.len() and
763             //  child + 1 < end <= self.len(), so they're valid indexes.
764             //  child == 2 * hole.pos() + 1 != hole.pos() and
765             //  child + 1 == 2 * hole.pos() + 2 != hole.pos().
766             // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
767             //  if T is a ZST
768             child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
769 
770             // SAFETY: Same as above
771             unsafe { hole.move_to(child) };
772             child = 2 * hole.pos() + 1;
773         }
774 
775         if child == end - 1 {
776             // SAFETY: child == end - 1 < self.len(), so it's a valid index
777             //  and child == 2 * hole.pos() + 1 != hole.pos().
778             unsafe { hole.move_to(child) };
779         }
780         pos = hole.pos();
781         drop(hole);
782 
783         // SAFETY: pos is the position in the hole and was already proven
784         //  to be a valid index.
785         unsafe { self.sift_up(start, pos) };
786     }
787 
788     /// Rebuild assuming data[0..start] is still a proper heap.
rebuild_tail(&mut self, start: usize)789     fn rebuild_tail(&mut self, start: usize) {
790         if start == self.len() {
791             return;
792         }
793 
794         let tail_len = self.len() - start;
795 
796         #[inline(always)]
797         fn log2_fast(x: usize) -> usize {
798             (usize::BITS - x.leading_zeros() - 1) as usize
799         }
800 
801         // `rebuild` takes O(self.len()) operations
802         // and about 2 * self.len() comparisons in the worst case
803         // while repeating `sift_up` takes O(tail_len * log(start)) operations
804         // and about 1 * tail_len * log_2(start) comparisons in the worst case,
805         // assuming start >= tail_len. For larger heaps, the crossover point
806         // no longer follows this reasoning and was determined empirically.
807         let better_to_rebuild = if start < tail_len {
808             true
809         } else if self.len() <= 2048 {
810             2 * self.len() < tail_len * log2_fast(start)
811         } else {
812             2 * self.len() < tail_len * 11
813         };
814 
815         if better_to_rebuild {
816             self.rebuild();
817         } else {
818             for i in start..self.len() {
819                 // SAFETY: The index `i` is always less than self.len().
820                 unsafe { self.sift_up(0, i) };
821             }
822         }
823     }
824 
rebuild(&mut self)825     fn rebuild(&mut self) {
826         let mut n = self.len() / 2;
827         while n > 0 {
828             n -= 1;
829             // SAFETY: n starts from self.len() / 2 and goes down to 0.
830             //  The only case when !(n < self.len()) is if
831             //  self.len() == 0, but it's ruled out by the loop condition.
832             unsafe { self.sift_down(n) };
833         }
834     }
835 
836     /// Moves all the elements of `other` into `self`, leaving `other` empty.
837     ///
838     /// # Examples
839     ///
840     /// Basic usage:
841     ///
842     /// ```
843     /// use std::collections::BinaryHeap;
844     ///
845     /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
846     /// let mut b = BinaryHeap::from([-20, 5, 43]);
847     ///
848     /// a.append(&mut b);
849     ///
850     /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
851     /// assert!(b.is_empty());
852     /// ```
853     #[stable(feature = "binary_heap_append", since = "1.11.0")]
append(&mut self, other: &mut Self)854     pub fn append(&mut self, other: &mut Self) {
855         if self.len() < other.len() {
856             swap(self, other);
857         }
858 
859         let start = self.data.len();
860 
861         self.data.append(&mut other.data);
862 
863         self.rebuild_tail(start);
864     }
865 
866     /// Clears the binary heap, returning an iterator over the removed elements
867     /// in heap order. If the iterator is dropped before being fully consumed,
868     /// it drops the remaining elements in heap order.
869     ///
870     /// The returned iterator keeps a mutable borrow on the heap to optimize
871     /// its implementation.
872     ///
873     /// Note:
874     /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
875     ///   You should use the latter for most cases.
876     ///
877     /// # Examples
878     ///
879     /// Basic usage:
880     ///
881     /// ```
882     /// #![feature(binary_heap_drain_sorted)]
883     /// use std::collections::BinaryHeap;
884     ///
885     /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
886     /// assert_eq!(heap.len(), 5);
887     ///
888     /// drop(heap.drain_sorted()); // removes all elements in heap order
889     /// assert_eq!(heap.len(), 0);
890     /// ```
891     #[inline]
892     #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
drain_sorted(&mut self) -> DrainSorted<'_, T, A>893     pub fn drain_sorted(&mut self) -> DrainSorted<'_, T, A> {
894         DrainSorted { inner: self }
895     }
896 
897     /// Retains only the elements specified by the predicate.
898     ///
899     /// In other words, remove all elements `e` for which `f(&e)` returns
900     /// `false`. The elements are visited in unsorted (and unspecified) order.
901     ///
902     /// # Examples
903     ///
904     /// Basic usage:
905     ///
906     /// ```
907     /// use std::collections::BinaryHeap;
908     ///
909     /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
910     ///
911     /// heap.retain(|x| x % 2 == 0); // only keep even numbers
912     ///
913     /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
914     /// ```
915     #[stable(feature = "binary_heap_retain", since = "1.70.0")]
retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool,916     pub fn retain<F>(&mut self, mut f: F)
917     where
918         F: FnMut(&T) -> bool,
919     {
920         // rebuild_start will be updated to the first touched element below, and the rebuild will
921         // only be done for the tail.
922         let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
923         let mut i = 0;
924 
925         guard.heap.data.retain(|e| {
926             let keep = f(e);
927             if !keep && i < guard.rebuild_from {
928                 guard.rebuild_from = i;
929             }
930             i += 1;
931             keep
932         });
933     }
934 }
935 
936 impl<T, A: Allocator> BinaryHeap<T, A> {
937     /// Returns an iterator visiting all values in the underlying vector, in
938     /// arbitrary order.
939     ///
940     /// # Examples
941     ///
942     /// Basic usage:
943     ///
944     /// ```
945     /// use std::collections::BinaryHeap;
946     /// let heap = BinaryHeap::from([1, 2, 3, 4]);
947     ///
948     /// // Print 1, 2, 3, 4 in arbitrary order
949     /// for x in heap.iter() {
950     ///     println!("{x}");
951     /// }
952     /// ```
953     #[stable(feature = "rust1", since = "1.0.0")]
iter(&self) -> Iter<'_, T>954     pub fn iter(&self) -> Iter<'_, T> {
955         Iter { iter: self.data.iter() }
956     }
957 
958     /// Returns an iterator which retrieves elements in heap order.
959     /// This method consumes the original heap.
960     ///
961     /// # Examples
962     ///
963     /// Basic usage:
964     ///
965     /// ```
966     /// #![feature(binary_heap_into_iter_sorted)]
967     /// use std::collections::BinaryHeap;
968     /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
969     ///
970     /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
971     /// ```
972     #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
into_iter_sorted(self) -> IntoIterSorted<T, A>973     pub fn into_iter_sorted(self) -> IntoIterSorted<T, A> {
974         IntoIterSorted { inner: self }
975     }
976 
977     /// Returns the greatest item in the binary heap, or `None` if it is empty.
978     ///
979     /// # Examples
980     ///
981     /// Basic usage:
982     ///
983     /// ```
984     /// use std::collections::BinaryHeap;
985     /// let mut heap = BinaryHeap::new();
986     /// assert_eq!(heap.peek(), None);
987     ///
988     /// heap.push(1);
989     /// heap.push(5);
990     /// heap.push(2);
991     /// assert_eq!(heap.peek(), Some(&5));
992     ///
993     /// ```
994     ///
995     /// # Time complexity
996     ///
997     /// Cost is *O*(1) in the worst case.
998     #[must_use]
999     #[stable(feature = "rust1", since = "1.0.0")]
peek(&self) -> Option<&T>1000     pub fn peek(&self) -> Option<&T> {
1001         self.data.get(0)
1002     }
1003 
1004     /// Returns the number of elements the binary heap can hold without reallocating.
1005     ///
1006     /// # Examples
1007     ///
1008     /// Basic usage:
1009     ///
1010     /// ```
1011     /// use std::collections::BinaryHeap;
1012     /// let mut heap = BinaryHeap::with_capacity(100);
1013     /// assert!(heap.capacity() >= 100);
1014     /// heap.push(4);
1015     /// ```
1016     #[must_use]
1017     #[stable(feature = "rust1", since = "1.0.0")]
capacity(&self) -> usize1018     pub fn capacity(&self) -> usize {
1019         self.data.capacity()
1020     }
1021 
1022     /// Reserves the minimum capacity for at least `additional` elements more than
1023     /// the current length. Unlike [`reserve`], this will not
1024     /// deliberately over-allocate to speculatively avoid frequent allocations.
1025     /// After calling `reserve_exact`, capacity will be greater than or equal to
1026     /// `self.len() + additional`. Does nothing if the capacity is already
1027     /// sufficient.
1028     ///
1029     /// [`reserve`]: BinaryHeap::reserve
1030     ///
1031     /// # Panics
1032     ///
1033     /// Panics if the new capacity overflows [`usize`].
1034     ///
1035     /// # Examples
1036     ///
1037     /// Basic usage:
1038     ///
1039     /// ```
1040     /// use std::collections::BinaryHeap;
1041     /// let mut heap = BinaryHeap::new();
1042     /// heap.reserve_exact(100);
1043     /// assert!(heap.capacity() >= 100);
1044     /// heap.push(4);
1045     /// ```
1046     ///
1047     /// [`reserve`]: BinaryHeap::reserve
1048     #[stable(feature = "rust1", since = "1.0.0")]
reserve_exact(&mut self, additional: usize)1049     pub fn reserve_exact(&mut self, additional: usize) {
1050         self.data.reserve_exact(additional);
1051     }
1052 
1053     /// Reserves capacity for at least `additional` elements more than the
1054     /// current length. The allocator may reserve more space to speculatively
1055     /// avoid frequent allocations. After calling `reserve`,
1056     /// capacity will be greater than or equal to `self.len() + additional`.
1057     /// Does nothing if capacity is already sufficient.
1058     ///
1059     /// # Panics
1060     ///
1061     /// Panics if the new capacity overflows [`usize`].
1062     ///
1063     /// # Examples
1064     ///
1065     /// Basic usage:
1066     ///
1067     /// ```
1068     /// use std::collections::BinaryHeap;
1069     /// let mut heap = BinaryHeap::new();
1070     /// heap.reserve(100);
1071     /// assert!(heap.capacity() >= 100);
1072     /// heap.push(4);
1073     /// ```
1074     #[stable(feature = "rust1", since = "1.0.0")]
reserve(&mut self, additional: usize)1075     pub fn reserve(&mut self, additional: usize) {
1076         self.data.reserve(additional);
1077     }
1078 
1079     /// Tries to reserve the minimum capacity for at least `additional` elements
1080     /// more than the current length. Unlike [`try_reserve`], this will not
1081     /// deliberately over-allocate to speculatively avoid frequent allocations.
1082     /// After calling `try_reserve_exact`, capacity will be greater than or
1083     /// equal to `self.len() + additional` if it returns `Ok(())`.
1084     /// Does nothing if the capacity is already sufficient.
1085     ///
1086     /// Note that the allocator may give the collection more space than it
1087     /// requests. Therefore, capacity can not be relied upon to be precisely
1088     /// minimal. Prefer [`try_reserve`] if future insertions are expected.
1089     ///
1090     /// [`try_reserve`]: BinaryHeap::try_reserve
1091     ///
1092     /// # Errors
1093     ///
1094     /// If the capacity overflows, or the allocator reports a failure, then an error
1095     /// is returned.
1096     ///
1097     /// # Examples
1098     ///
1099     /// ```
1100     /// use std::collections::BinaryHeap;
1101     /// use std::collections::TryReserveError;
1102     ///
1103     /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1104     ///     let mut heap = BinaryHeap::new();
1105     ///
1106     ///     // Pre-reserve the memory, exiting if we can't
1107     ///     heap.try_reserve_exact(data.len())?;
1108     ///
1109     ///     // Now we know this can't OOM in the middle of our complex work
1110     ///     heap.extend(data.iter());
1111     ///
1112     ///     Ok(heap.pop())
1113     /// }
1114     /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1115     /// ```
1116     #[stable(feature = "try_reserve_2", since = "1.63.0")]
try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError>1117     pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
1118         self.data.try_reserve_exact(additional)
1119     }
1120 
1121     /// Tries to reserve capacity for at least `additional` elements more than the
1122     /// current length. The allocator may reserve more space to speculatively
1123     /// avoid frequent allocations. After calling `try_reserve`, capacity will be
1124     /// greater than or equal to `self.len() + additional` if it returns
1125     /// `Ok(())`. Does nothing if capacity is already sufficient. This method
1126     /// preserves the contents even if an error occurs.
1127     ///
1128     /// # Errors
1129     ///
1130     /// If the capacity overflows, or the allocator reports a failure, then an error
1131     /// is returned.
1132     ///
1133     /// # Examples
1134     ///
1135     /// ```
1136     /// use std::collections::BinaryHeap;
1137     /// use std::collections::TryReserveError;
1138     ///
1139     /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
1140     ///     let mut heap = BinaryHeap::new();
1141     ///
1142     ///     // Pre-reserve the memory, exiting if we can't
1143     ///     heap.try_reserve(data.len())?;
1144     ///
1145     ///     // Now we know this can't OOM in the middle of our complex work
1146     ///     heap.extend(data.iter());
1147     ///
1148     ///     Ok(heap.pop())
1149     /// }
1150     /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
1151     /// ```
1152     #[stable(feature = "try_reserve_2", since = "1.63.0")]
try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError>1153     pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
1154         self.data.try_reserve(additional)
1155     }
1156 
1157     /// Discards as much additional capacity as possible.
1158     ///
1159     /// # Examples
1160     ///
1161     /// Basic usage:
1162     ///
1163     /// ```
1164     /// use std::collections::BinaryHeap;
1165     /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1166     ///
1167     /// assert!(heap.capacity() >= 100);
1168     /// heap.shrink_to_fit();
1169     /// assert!(heap.capacity() == 0);
1170     /// ```
1171     #[stable(feature = "rust1", since = "1.0.0")]
shrink_to_fit(&mut self)1172     pub fn shrink_to_fit(&mut self) {
1173         self.data.shrink_to_fit();
1174     }
1175 
1176     /// Discards capacity with a lower bound.
1177     ///
1178     /// The capacity will remain at least as large as both the length
1179     /// and the supplied value.
1180     ///
1181     /// If the current capacity is less than the lower limit, this is a no-op.
1182     ///
1183     /// # Examples
1184     ///
1185     /// ```
1186     /// use std::collections::BinaryHeap;
1187     /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
1188     ///
1189     /// assert!(heap.capacity() >= 100);
1190     /// heap.shrink_to(10);
1191     /// assert!(heap.capacity() >= 10);
1192     /// ```
1193     #[inline]
1194     #[stable(feature = "shrink_to", since = "1.56.0")]
shrink_to(&mut self, min_capacity: usize)1195     pub fn shrink_to(&mut self, min_capacity: usize) {
1196         self.data.shrink_to(min_capacity)
1197     }
1198 
1199     /// Returns a slice of all values in the underlying vector, in arbitrary
1200     /// order.
1201     ///
1202     /// # Examples
1203     ///
1204     /// Basic usage:
1205     ///
1206     /// ```
1207     /// #![feature(binary_heap_as_slice)]
1208     /// use std::collections::BinaryHeap;
1209     /// use std::io::{self, Write};
1210     ///
1211     /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1212     ///
1213     /// io::sink().write(heap.as_slice()).unwrap();
1214     /// ```
1215     #[must_use]
1216     #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
as_slice(&self) -> &[T]1217     pub fn as_slice(&self) -> &[T] {
1218         self.data.as_slice()
1219     }
1220 
1221     /// Consumes the `BinaryHeap` and returns the underlying vector
1222     /// in arbitrary order.
1223     ///
1224     /// # Examples
1225     ///
1226     /// Basic usage:
1227     ///
1228     /// ```
1229     /// use std::collections::BinaryHeap;
1230     /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
1231     /// let vec = heap.into_vec();
1232     ///
1233     /// // Will print in some order
1234     /// for x in vec {
1235     ///     println!("{x}");
1236     /// }
1237     /// ```
1238     #[must_use = "`self` will be dropped if the result is not used"]
1239     #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
into_vec(self) -> Vec<T, A>1240     pub fn into_vec(self) -> Vec<T, A> {
1241         self.into()
1242     }
1243 
1244     /// Returns a reference to the underlying allocator.
1245     #[unstable(feature = "allocator_api", issue = "32838")]
1246     #[inline]
allocator(&self) -> &A1247     pub fn allocator(&self) -> &A {
1248         self.data.allocator()
1249     }
1250 
1251     /// Returns the length of the binary heap.
1252     ///
1253     /// # Examples
1254     ///
1255     /// Basic usage:
1256     ///
1257     /// ```
1258     /// use std::collections::BinaryHeap;
1259     /// let heap = BinaryHeap::from([1, 3]);
1260     ///
1261     /// assert_eq!(heap.len(), 2);
1262     /// ```
1263     #[must_use]
1264     #[stable(feature = "rust1", since = "1.0.0")]
len(&self) -> usize1265     pub fn len(&self) -> usize {
1266         self.data.len()
1267     }
1268 
1269     /// Checks if the binary heap is empty.
1270     ///
1271     /// # Examples
1272     ///
1273     /// Basic usage:
1274     ///
1275     /// ```
1276     /// use std::collections::BinaryHeap;
1277     /// let mut heap = BinaryHeap::new();
1278     ///
1279     /// assert!(heap.is_empty());
1280     ///
1281     /// heap.push(3);
1282     /// heap.push(5);
1283     /// heap.push(1);
1284     ///
1285     /// assert!(!heap.is_empty());
1286     /// ```
1287     #[must_use]
1288     #[stable(feature = "rust1", since = "1.0.0")]
is_empty(&self) -> bool1289     pub fn is_empty(&self) -> bool {
1290         self.len() == 0
1291     }
1292 
1293     /// Clears the binary heap, returning an iterator over the removed elements
1294     /// in arbitrary order. If the iterator is dropped before being fully
1295     /// consumed, it drops the remaining elements in arbitrary order.
1296     ///
1297     /// The returned iterator keeps a mutable borrow on the heap to optimize
1298     /// its implementation.
1299     ///
1300     /// # Examples
1301     ///
1302     /// Basic usage:
1303     ///
1304     /// ```
1305     /// use std::collections::BinaryHeap;
1306     /// let mut heap = BinaryHeap::from([1, 3]);
1307     ///
1308     /// assert!(!heap.is_empty());
1309     ///
1310     /// for x in heap.drain() {
1311     ///     println!("{x}");
1312     /// }
1313     ///
1314     /// assert!(heap.is_empty());
1315     /// ```
1316     #[inline]
1317     #[stable(feature = "drain", since = "1.6.0")]
drain(&mut self) -> Drain<'_, T, A>1318     pub fn drain(&mut self) -> Drain<'_, T, A> {
1319         Drain { iter: self.data.drain(..) }
1320     }
1321 
1322     /// Drops all items from the binary heap.
1323     ///
1324     /// # Examples
1325     ///
1326     /// Basic usage:
1327     ///
1328     /// ```
1329     /// use std::collections::BinaryHeap;
1330     /// let mut heap = BinaryHeap::from([1, 3]);
1331     ///
1332     /// assert!(!heap.is_empty());
1333     ///
1334     /// heap.clear();
1335     ///
1336     /// assert!(heap.is_empty());
1337     /// ```
1338     #[stable(feature = "rust1", since = "1.0.0")]
clear(&mut self)1339     pub fn clear(&mut self) {
1340         self.drain();
1341     }
1342 }
1343 
1344 /// Hole represents a hole in a slice i.e., an index without valid value
1345 /// (because it was moved from or duplicated).
1346 /// In drop, `Hole` will restore the slice by filling the hole
1347 /// position with the value that was originally removed.
1348 struct Hole<'a, T: 'a> {
1349     data: &'a mut [T],
1350     elt: ManuallyDrop<T>,
1351     pos: usize,
1352 }
1353 
1354 impl<'a, T> Hole<'a, T> {
1355     /// Create a new `Hole` at index `pos`.
1356     ///
1357     /// Unsafe because pos must be within the data slice.
1358     #[inline]
new(data: &'a mut [T], pos: usize) -> Self1359     unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
1360         debug_assert!(pos < data.len());
1361         // SAFE: pos should be inside the slice
1362         let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
1363         Hole { data, elt: ManuallyDrop::new(elt), pos }
1364     }
1365 
1366     #[inline]
pos(&self) -> usize1367     fn pos(&self) -> usize {
1368         self.pos
1369     }
1370 
1371     /// Returns a reference to the element removed.
1372     #[inline]
element(&self) -> &T1373     fn element(&self) -> &T {
1374         &self.elt
1375     }
1376 
1377     /// Returns a reference to the element at `index`.
1378     ///
1379     /// Unsafe because index must be within the data slice and not equal to pos.
1380     #[inline]
get(&self, index: usize) -> &T1381     unsafe fn get(&self, index: usize) -> &T {
1382         debug_assert!(index != self.pos);
1383         debug_assert!(index < self.data.len());
1384         unsafe { self.data.get_unchecked(index) }
1385     }
1386 
1387     /// Move hole to new location
1388     ///
1389     /// Unsafe because index must be within the data slice and not equal to pos.
1390     #[inline]
move_to(&mut self, index: usize)1391     unsafe fn move_to(&mut self, index: usize) {
1392         debug_assert!(index != self.pos);
1393         debug_assert!(index < self.data.len());
1394         unsafe {
1395             let ptr = self.data.as_mut_ptr();
1396             let index_ptr: *const _ = ptr.add(index);
1397             let hole_ptr = ptr.add(self.pos);
1398             ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
1399         }
1400         self.pos = index;
1401     }
1402 }
1403 
1404 impl<T> Drop for Hole<'_, T> {
1405     #[inline]
drop(&mut self)1406     fn drop(&mut self) {
1407         // fill the hole again
1408         unsafe {
1409             let pos = self.pos;
1410             ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
1411         }
1412     }
1413 }
1414 
1415 /// An iterator over the elements of a `BinaryHeap`.
1416 ///
1417 /// This `struct` is created by [`BinaryHeap::iter()`]. See its
1418 /// documentation for more.
1419 ///
1420 /// [`iter`]: BinaryHeap::iter
1421 #[must_use = "iterators are lazy and do nothing unless consumed"]
1422 #[stable(feature = "rust1", since = "1.0.0")]
1423 pub struct Iter<'a, T: 'a> {
1424     iter: slice::Iter<'a, T>,
1425 }
1426 
1427 #[stable(feature = "collection_debug", since = "1.17.0")]
1428 impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1429     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1430         f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
1431     }
1432 }
1433 
1434 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
1435 #[stable(feature = "rust1", since = "1.0.0")]
1436 impl<T> Clone for Iter<'_, T> {
clone(&self) -> Self1437     fn clone(&self) -> Self {
1438         Iter { iter: self.iter.clone() }
1439     }
1440 }
1441 
1442 #[stable(feature = "rust1", since = "1.0.0")]
1443 impl<'a, T> Iterator for Iter<'a, T> {
1444     type Item = &'a T;
1445 
1446     #[inline]
next(&mut self) -> Option<&'a T>1447     fn next(&mut self) -> Option<&'a T> {
1448         self.iter.next()
1449     }
1450 
1451     #[inline]
size_hint(&self) -> (usize, Option<usize>)1452     fn size_hint(&self) -> (usize, Option<usize>) {
1453         self.iter.size_hint()
1454     }
1455 
1456     #[inline]
last(self) -> Option<&'a T>1457     fn last(self) -> Option<&'a T> {
1458         self.iter.last()
1459     }
1460 }
1461 
1462 #[stable(feature = "rust1", since = "1.0.0")]
1463 impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
1464     #[inline]
next_back(&mut self) -> Option<&'a T>1465     fn next_back(&mut self) -> Option<&'a T> {
1466         self.iter.next_back()
1467     }
1468 }
1469 
1470 #[stable(feature = "rust1", since = "1.0.0")]
1471 impl<T> ExactSizeIterator for Iter<'_, T> {
is_empty(&self) -> bool1472     fn is_empty(&self) -> bool {
1473         self.iter.is_empty()
1474     }
1475 }
1476 
1477 #[stable(feature = "fused", since = "1.26.0")]
1478 impl<T> FusedIterator for Iter<'_, T> {}
1479 
1480 /// An owning iterator over the elements of a `BinaryHeap`.
1481 ///
1482 /// This `struct` is created by [`BinaryHeap::into_iter()`]
1483 /// (provided by the [`IntoIterator`] trait). See its documentation for more.
1484 ///
1485 /// [`into_iter`]: BinaryHeap::into_iter
1486 #[stable(feature = "rust1", since = "1.0.0")]
1487 #[derive(Clone)]
1488 pub struct IntoIter<
1489     T,
1490     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1491 > {
1492     iter: vec::IntoIter<T, A>,
1493 }
1494 
1495 impl<T, A: Allocator> IntoIter<T, A> {
1496     /// Returns a reference to the underlying allocator.
1497     #[unstable(feature = "allocator_api", issue = "32838")]
allocator(&self) -> &A1498     pub fn allocator(&self) -> &A {
1499         self.iter.allocator()
1500     }
1501 }
1502 
1503 #[stable(feature = "collection_debug", since = "1.17.0")]
1504 impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1505     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1506         f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
1507     }
1508 }
1509 
1510 #[stable(feature = "rust1", since = "1.0.0")]
1511 impl<T, A: Allocator> Iterator for IntoIter<T, A> {
1512     type Item = T;
1513 
1514     #[inline]
next(&mut self) -> Option<T>1515     fn next(&mut self) -> Option<T> {
1516         self.iter.next()
1517     }
1518 
1519     #[inline]
size_hint(&self) -> (usize, Option<usize>)1520     fn size_hint(&self) -> (usize, Option<usize>) {
1521         self.iter.size_hint()
1522     }
1523 }
1524 
1525 #[stable(feature = "rust1", since = "1.0.0")]
1526 impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
1527     #[inline]
next_back(&mut self) -> Option<T>1528     fn next_back(&mut self) -> Option<T> {
1529         self.iter.next_back()
1530     }
1531 }
1532 
1533 #[stable(feature = "rust1", since = "1.0.0")]
1534 impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
is_empty(&self) -> bool1535     fn is_empty(&self) -> bool {
1536         self.iter.is_empty()
1537     }
1538 }
1539 
1540 #[stable(feature = "fused", since = "1.26.0")]
1541 impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
1542 
1543 #[stable(feature = "default_iters", since = "1.70.0")]
1544 impl<T> Default for IntoIter<T> {
1545     /// Creates an empty `binary_heap::IntoIter`.
1546     ///
1547     /// ```
1548     /// # use std::collections::binary_heap;
1549     /// let iter: binary_heap::IntoIter<u8> = Default::default();
1550     /// assert_eq!(iter.len(), 0);
1551     /// ```
default() -> Self1552     fn default() -> Self {
1553         IntoIter { iter: Default::default() }
1554     }
1555 }
1556 
1557 // In addition to the SAFETY invariants of the following three unsafe traits
1558 // also refer to the vec::in_place_collect module documentation to get an overview
1559 #[unstable(issue = "none", feature = "inplace_iteration")]
1560 #[doc(hidden)]
1561 unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
1562     type Source = IntoIter<T, A>;
1563 
1564     #[inline]
as_inner(&mut self) -> &mut Self::Source1565     unsafe fn as_inner(&mut self) -> &mut Self::Source {
1566         self
1567     }
1568 }
1569 
1570 #[unstable(issue = "none", feature = "inplace_iteration")]
1571 #[doc(hidden)]
1572 unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {}
1573 
1574 unsafe impl<I> AsVecIntoIter for IntoIter<I> {
1575     type Item = I;
1576 
as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item>1577     fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
1578         &mut self.iter
1579     }
1580 }
1581 
1582 #[must_use = "iterators are lazy and do nothing unless consumed"]
1583 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1584 #[derive(Clone, Debug)]
1585 pub struct IntoIterSorted<
1586     T,
1587     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1588 > {
1589     inner: BinaryHeap<T, A>,
1590 }
1591 
1592 impl<T, A: Allocator> IntoIterSorted<T, A> {
1593     /// Returns a reference to the underlying allocator.
1594     #[unstable(feature = "allocator_api", issue = "32838")]
allocator(&self) -> &A1595     pub fn allocator(&self) -> &A {
1596         self.inner.allocator()
1597     }
1598 }
1599 
1600 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1601 impl<T: Ord, A: Allocator> Iterator for IntoIterSorted<T, A> {
1602     type Item = T;
1603 
1604     #[inline]
next(&mut self) -> Option<T>1605     fn next(&mut self) -> Option<T> {
1606         self.inner.pop()
1607     }
1608 
1609     #[inline]
size_hint(&self) -> (usize, Option<usize>)1610     fn size_hint(&self) -> (usize, Option<usize>) {
1611         let exact = self.inner.len();
1612         (exact, Some(exact))
1613     }
1614 }
1615 
1616 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1617 impl<T: Ord, A: Allocator> ExactSizeIterator for IntoIterSorted<T, A> {}
1618 
1619 #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
1620 impl<T: Ord, A: Allocator> FusedIterator for IntoIterSorted<T, A> {}
1621 
1622 #[unstable(feature = "trusted_len", issue = "37572")]
1623 unsafe impl<T: Ord, A: Allocator> TrustedLen for IntoIterSorted<T, A> {}
1624 
1625 /// A draining iterator over the elements of a `BinaryHeap`.
1626 ///
1627 /// This `struct` is created by [`BinaryHeap::drain()`]. See its
1628 /// documentation for more.
1629 ///
1630 /// [`drain`]: BinaryHeap::drain
1631 #[stable(feature = "drain", since = "1.6.0")]
1632 #[derive(Debug)]
1633 pub struct Drain<
1634     'a,
1635     T: 'a,
1636     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1637 > {
1638     iter: vec::Drain<'a, T, A>,
1639 }
1640 
1641 impl<T, A: Allocator> Drain<'_, T, A> {
1642     /// Returns a reference to the underlying allocator.
1643     #[unstable(feature = "allocator_api", issue = "32838")]
allocator(&self) -> &A1644     pub fn allocator(&self) -> &A {
1645         self.iter.allocator()
1646     }
1647 }
1648 
1649 #[stable(feature = "drain", since = "1.6.0")]
1650 impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
1651     type Item = T;
1652 
1653     #[inline]
next(&mut self) -> Option<T>1654     fn next(&mut self) -> Option<T> {
1655         self.iter.next()
1656     }
1657 
1658     #[inline]
size_hint(&self) -> (usize, Option<usize>)1659     fn size_hint(&self) -> (usize, Option<usize>) {
1660         self.iter.size_hint()
1661     }
1662 }
1663 
1664 #[stable(feature = "drain", since = "1.6.0")]
1665 impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
1666     #[inline]
next_back(&mut self) -> Option<T>1667     fn next_back(&mut self) -> Option<T> {
1668         self.iter.next_back()
1669     }
1670 }
1671 
1672 #[stable(feature = "drain", since = "1.6.0")]
1673 impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
is_empty(&self) -> bool1674     fn is_empty(&self) -> bool {
1675         self.iter.is_empty()
1676     }
1677 }
1678 
1679 #[stable(feature = "fused", since = "1.26.0")]
1680 impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
1681 
1682 /// A draining iterator over the elements of a `BinaryHeap`.
1683 ///
1684 /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
1685 /// documentation for more.
1686 ///
1687 /// [`drain_sorted`]: BinaryHeap::drain_sorted
1688 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1689 #[derive(Debug)]
1690 pub struct DrainSorted<
1691     'a,
1692     T: Ord,
1693     #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
1694 > {
1695     inner: &'a mut BinaryHeap<T, A>,
1696 }
1697 
1698 impl<'a, T: Ord, A: Allocator> DrainSorted<'a, T, A> {
1699     /// Returns a reference to the underlying allocator.
1700     #[unstable(feature = "allocator_api", issue = "32838")]
allocator(&self) -> &A1701     pub fn allocator(&self) -> &A {
1702         self.inner.allocator()
1703     }
1704 }
1705 
1706 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1707 impl<'a, T: Ord, A: Allocator> Drop for DrainSorted<'a, T, A> {
1708     /// Removes heap elements in heap order.
drop(&mut self)1709     fn drop(&mut self) {
1710         struct DropGuard<'r, 'a, T: Ord, A: Allocator>(&'r mut DrainSorted<'a, T, A>);
1711 
1712         impl<'r, 'a, T: Ord, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
1713             fn drop(&mut self) {
1714                 while self.0.inner.pop().is_some() {}
1715             }
1716         }
1717 
1718         while let Some(item) = self.inner.pop() {
1719             let guard = DropGuard(self);
1720             drop(item);
1721             mem::forget(guard);
1722         }
1723     }
1724 }
1725 
1726 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1727 impl<T: Ord, A: Allocator> Iterator for DrainSorted<'_, T, A> {
1728     type Item = T;
1729 
1730     #[inline]
next(&mut self) -> Option<T>1731     fn next(&mut self) -> Option<T> {
1732         self.inner.pop()
1733     }
1734 
1735     #[inline]
size_hint(&self) -> (usize, Option<usize>)1736     fn size_hint(&self) -> (usize, Option<usize>) {
1737         let exact = self.inner.len();
1738         (exact, Some(exact))
1739     }
1740 }
1741 
1742 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1743 impl<T: Ord, A: Allocator> ExactSizeIterator for DrainSorted<'_, T, A> {}
1744 
1745 #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
1746 impl<T: Ord, A: Allocator> FusedIterator for DrainSorted<'_, T, A> {}
1747 
1748 #[unstable(feature = "trusted_len", issue = "37572")]
1749 unsafe impl<T: Ord, A: Allocator> TrustedLen for DrainSorted<'_, T, A> {}
1750 
1751 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1752 impl<T: Ord, A: Allocator> From<Vec<T, A>> for BinaryHeap<T, A> {
1753     /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
1754     ///
1755     /// This conversion happens in-place, and has *O*(*n*) time complexity.
from(vec: Vec<T, A>) -> BinaryHeap<T, A>1756     fn from(vec: Vec<T, A>) -> BinaryHeap<T, A> {
1757         let mut heap = BinaryHeap { data: vec };
1758         heap.rebuild();
1759         heap
1760     }
1761 }
1762 
1763 #[stable(feature = "std_collections_from_array", since = "1.56.0")]
1764 impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
1765     /// ```
1766     /// use std::collections::BinaryHeap;
1767     ///
1768     /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
1769     /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
1770     /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
1771     ///     assert_eq!(a, b);
1772     /// }
1773     /// ```
from(arr: [T; N]) -> Self1774     fn from(arr: [T; N]) -> Self {
1775         Self::from_iter(arr)
1776     }
1777 }
1778 
1779 #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
1780 impl<T, A: Allocator> From<BinaryHeap<T, A>> for Vec<T, A> {
1781     /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
1782     ///
1783     /// This conversion requires no data movement or allocation, and has
1784     /// constant time complexity.
from(heap: BinaryHeap<T, A>) -> Vec<T, A>1785     fn from(heap: BinaryHeap<T, A>) -> Vec<T, A> {
1786         heap.data
1787     }
1788 }
1789 
1790 #[stable(feature = "rust1", since = "1.0.0")]
1791 impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T>1792     fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
1793         BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
1794     }
1795 }
1796 
1797 #[stable(feature = "rust1", since = "1.0.0")]
1798 impl<T, A: Allocator> IntoIterator for BinaryHeap<T, A> {
1799     type Item = T;
1800     type IntoIter = IntoIter<T, A>;
1801 
1802     /// Creates a consuming iterator, that is, one that moves each value out of
1803     /// the binary heap in arbitrary order. The binary heap cannot be used
1804     /// after calling this.
1805     ///
1806     /// # Examples
1807     ///
1808     /// Basic usage:
1809     ///
1810     /// ```
1811     /// use std::collections::BinaryHeap;
1812     /// let heap = BinaryHeap::from([1, 2, 3, 4]);
1813     ///
1814     /// // Print 1, 2, 3, 4 in arbitrary order
1815     /// for x in heap.into_iter() {
1816     ///     // x has type i32, not &i32
1817     ///     println!("{x}");
1818     /// }
1819     /// ```
into_iter(self) -> IntoIter<T, A>1820     fn into_iter(self) -> IntoIter<T, A> {
1821         IntoIter { iter: self.data.into_iter() }
1822     }
1823 }
1824 
1825 #[stable(feature = "rust1", since = "1.0.0")]
1826 impl<'a, T, A: Allocator> IntoIterator for &'a BinaryHeap<T, A> {
1827     type Item = &'a T;
1828     type IntoIter = Iter<'a, T>;
1829 
into_iter(self) -> Iter<'a, T>1830     fn into_iter(self) -> Iter<'a, T> {
1831         self.iter()
1832     }
1833 }
1834 
1835 #[stable(feature = "rust1", since = "1.0.0")]
1836 impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> {
1837     #[inline]
extend<I: IntoIterator<Item = T>>(&mut self, iter: I)1838     fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
1839         let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
1840         guard.heap.data.extend(iter);
1841     }
1842 
1843     #[inline]
extend_one(&mut self, item: T)1844     fn extend_one(&mut self, item: T) {
1845         self.push(item);
1846     }
1847 
1848     #[inline]
extend_reserve(&mut self, additional: usize)1849     fn extend_reserve(&mut self, additional: usize) {
1850         self.reserve(additional);
1851     }
1852 }
1853 
1854 #[stable(feature = "extend_ref", since = "1.2.0")]
1855 impl<'a, T: 'a + Ord + Copy, A: Allocator> Extend<&'a T> for BinaryHeap<T, A> {
extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I)1856     fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
1857         self.extend(iter.into_iter().cloned());
1858     }
1859 
1860     #[inline]
extend_one(&mut self, &item: &'a T)1861     fn extend_one(&mut self, &item: &'a T) {
1862         self.push(item);
1863     }
1864 
1865     #[inline]
extend_reserve(&mut self, additional: usize)1866     fn extend_reserve(&mut self, additional: usize) {
1867         self.reserve(additional);
1868     }
1869 }
1870