1 | //! A priority queue implemented with a binary heap. |
2 | //! |
3 | //! Insertion and popping the largest element have *O*(log(*n*)) time complexity. |
4 | //! Checking the largest element is *O*(1). Converting a vector to a binary heap |
5 | //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be |
6 | //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*)) |
7 | //! in-place heapsort. |
8 | //! |
9 | //! # Examples |
10 | //! |
11 | //! This is a larger example that implements [Dijkstra's algorithm][dijkstra] |
12 | //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph]. |
13 | //! It shows how to use [`BinaryHeap`] with custom types. |
14 | //! |
15 | //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm |
16 | //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem |
17 | //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph |
18 | //! |
19 | //! ``` |
20 | //! use std::cmp::Ordering; |
21 | //! use std::collections::BinaryHeap; |
22 | //! |
23 | //! #[derive(Copy, Clone, Eq, PartialEq)] |
24 | //! struct State { |
25 | //! cost: usize, |
26 | //! position: usize, |
27 | //! } |
28 | //! |
29 | //! // The priority queue depends on `Ord`. |
30 | //! // Explicitly implement the trait so the queue becomes a min-heap |
31 | //! // instead of a max-heap. |
32 | //! impl Ord for State { |
33 | //! fn cmp(&self, other: &Self) -> Ordering { |
34 | //! // Notice that the we flip the ordering on costs. |
35 | //! // In case of a tie we compare positions - this step is necessary |
36 | //! // to make implementations of `PartialEq` and `Ord` consistent. |
37 | //! other.cost.cmp(&self.cost) |
38 | //! .then_with(|| self.position.cmp(&other.position)) |
39 | //! } |
40 | //! } |
41 | //! |
42 | //! // `PartialOrd` needs to be implemented as well. |
43 | //! impl PartialOrd for State { |
44 | //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
45 | //! Some(self.cmp(other)) |
46 | //! } |
47 | //! } |
48 | //! |
49 | //! // Each node is represented as a `usize`, for a shorter implementation. |
50 | //! struct Edge { |
51 | //! node: usize, |
52 | //! cost: usize, |
53 | //! } |
54 | //! |
55 | //! // Dijkstra's shortest path algorithm. |
56 | //! |
57 | //! // Start at `start` and use `dist` to track the current shortest distance |
58 | //! // to each node. This implementation isn't memory-efficient as it may leave duplicate |
59 | //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value, |
60 | //! // for a simpler implementation. |
61 | //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> { |
62 | //! // dist[node] = current shortest distance from `start` to `node` |
63 | //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect(); |
64 | //! |
65 | //! let mut heap = BinaryHeap::new(); |
66 | //! |
67 | //! // We're at `start`, with a zero cost |
68 | //! dist[start] = 0; |
69 | //! heap.push(State { cost: 0, position: start }); |
70 | //! |
71 | //! // Examine the frontier with lower cost nodes first (min-heap) |
72 | //! while let Some(State { cost, position }) = heap.pop() { |
73 | //! // Alternatively we could have continued to find all shortest paths |
74 | //! if position == goal { return Some(cost); } |
75 | //! |
76 | //! // Important as we may have already found a better way |
77 | //! if cost > dist[position] { continue; } |
78 | //! |
79 | //! // For each node we can reach, see if we can find a way with |
80 | //! // a lower cost going through this node |
81 | //! for edge in &adj_list[position] { |
82 | //! let next = State { cost: cost + edge.cost, position: edge.node }; |
83 | //! |
84 | //! // If so, add it to the frontier and continue |
85 | //! if next.cost < dist[next.position] { |
86 | //! heap.push(next); |
87 | //! // Relaxation, we have now found a better way |
88 | //! dist[next.position] = next.cost; |
89 | //! } |
90 | //! } |
91 | //! } |
92 | //! |
93 | //! // Goal not reachable |
94 | //! None |
95 | //! } |
96 | //! |
97 | //! fn main() { |
98 | //! // This is the directed graph we're going to use. |
99 | //! // The node numbers correspond to the different states, |
100 | //! // and the edge weights symbolize the cost of moving |
101 | //! // from one node to another. |
102 | //! // Note that the edges are one-way. |
103 | //! // |
104 | //! // 7 |
105 | //! // +-----------------+ |
106 | //! // | | |
107 | //! // v 1 2 | 2 |
108 | //! // 0 -----> 1 -----> 3 ---> 4 |
109 | //! // | ^ ^ ^ |
110 | //! // | | 1 | | |
111 | //! // | | | 3 | 1 |
112 | //! // +------> 2 -------+ | |
113 | //! // 10 | | |
114 | //! // +---------------+ |
115 | //! // |
116 | //! // The graph is represented as an adjacency list where each index, |
117 | //! // corresponding to a node value, has a list of outgoing edges. |
118 | //! // Chosen for its efficiency. |
119 | //! let graph = vec![ |
120 | //! // Node 0 |
121 | //! vec![Edge { node: 2, cost: 10 }, |
122 | //! Edge { node: 1, cost: 1 }], |
123 | //! // Node 1 |
124 | //! vec![Edge { node: 3, cost: 2 }], |
125 | //! // Node 2 |
126 | //! vec![Edge { node: 1, cost: 1 }, |
127 | //! Edge { node: 3, cost: 3 }, |
128 | //! Edge { node: 4, cost: 1 }], |
129 | //! // Node 3 |
130 | //! vec![Edge { node: 0, cost: 7 }, |
131 | //! Edge { node: 4, cost: 2 }], |
132 | //! // Node 4 |
133 | //! vec![]]; |
134 | //! |
135 | //! assert_eq!(shortest_path(&graph, 0, 1), Some(1)); |
136 | //! assert_eq!(shortest_path(&graph, 0, 3), Some(3)); |
137 | //! assert_eq!(shortest_path(&graph, 3, 0), Some(7)); |
138 | //! assert_eq!(shortest_path(&graph, 0, 4), Some(5)); |
139 | //! assert_eq!(shortest_path(&graph, 4, 0), None); |
140 | //! } |
141 | //! ``` |
142 | |
143 | #![allow (missing_docs)] |
144 | #![stable (feature = "rust1" , since = "1.0.0" )] |
145 | |
146 | use core::alloc::Allocator; |
147 | use core::fmt; |
148 | use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen}; |
149 | use core::mem::{self, swap, ManuallyDrop}; |
150 | use core::num::NonZeroUsize; |
151 | use core::ops::{Deref, DerefMut}; |
152 | use core::ptr; |
153 | |
154 | use crate::alloc::Global; |
155 | use crate::collections::TryReserveError; |
156 | use crate::slice; |
157 | use crate::vec::{self, AsVecIntoIter, Vec}; |
158 | |
159 | #[cfg (test)] |
160 | mod tests; |
161 | |
162 | /// A priority queue implemented with a binary heap. |
163 | /// |
164 | /// This will be a max-heap. |
165 | /// |
166 | /// It is a logic error for an item to be modified in such a way that the |
167 | /// item's ordering relative to any other item, as determined by the [`Ord`] |
168 | /// trait, changes while it is in the heap. This is normally only possible |
169 | /// through interior mutability, global state, I/O, or unsafe code. The |
170 | /// behavior resulting from such a logic error is not specified, but will |
171 | /// be encapsulated to the `BinaryHeap` that observed the logic error and not |
172 | /// result in undefined behavior. This could include panics, incorrect results, |
173 | /// aborts, memory leaks, and non-termination. |
174 | /// |
175 | /// As long as no elements change their relative order while being in the heap |
176 | /// as described above, the API of `BinaryHeap` guarantees that the heap |
177 | /// invariant remains intact i.e. its methods all behave as documented. For |
178 | /// example if a method is documented as iterating in sorted order, that's |
179 | /// guaranteed to work as long as elements in the heap have not changed order, |
180 | /// even in the presence of closures getting unwinded out of, iterators getting |
181 | /// leaked, and similar foolishness. |
182 | /// |
183 | /// # Examples |
184 | /// |
185 | /// ``` |
186 | /// use std::collections::BinaryHeap; |
187 | /// |
188 | /// // Type inference lets us omit an explicit type signature (which |
189 | /// // would be `BinaryHeap<i32>` in this example). |
190 | /// let mut heap = BinaryHeap::new(); |
191 | /// |
192 | /// // We can use peek to look at the next item in the heap. In this case, |
193 | /// // there's no items in there yet so we get None. |
194 | /// assert_eq!(heap.peek(), None); |
195 | /// |
196 | /// // Let's add some scores... |
197 | /// heap.push(1); |
198 | /// heap.push(5); |
199 | /// heap.push(2); |
200 | /// |
201 | /// // Now peek shows the most important item in the heap. |
202 | /// assert_eq!(heap.peek(), Some(&5)); |
203 | /// |
204 | /// // We can check the length of a heap. |
205 | /// assert_eq!(heap.len(), 3); |
206 | /// |
207 | /// // We can iterate over the items in the heap, although they are returned in |
208 | /// // a random order. |
209 | /// for x in &heap { |
210 | /// println!("{x}" ); |
211 | /// } |
212 | /// |
213 | /// // If we instead pop these scores, they should come back in order. |
214 | /// assert_eq!(heap.pop(), Some(5)); |
215 | /// assert_eq!(heap.pop(), Some(2)); |
216 | /// assert_eq!(heap.pop(), Some(1)); |
217 | /// assert_eq!(heap.pop(), None); |
218 | /// |
219 | /// // We can clear the heap of any remaining items. |
220 | /// heap.clear(); |
221 | /// |
222 | /// // The heap should now be empty. |
223 | /// assert!(heap.is_empty()) |
224 | /// ``` |
225 | /// |
226 | /// A `BinaryHeap` with a known list of items can be initialized from an array: |
227 | /// |
228 | /// ``` |
229 | /// use std::collections::BinaryHeap; |
230 | /// |
231 | /// let heap = BinaryHeap::from([1, 5, 2]); |
232 | /// ``` |
233 | /// |
234 | /// ## Min-heap |
235 | /// |
236 | /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to |
237 | /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest |
238 | /// value instead of the greatest one. |
239 | /// |
240 | /// ``` |
241 | /// use std::collections::BinaryHeap; |
242 | /// use std::cmp::Reverse; |
243 | /// |
244 | /// let mut heap = BinaryHeap::new(); |
245 | /// |
246 | /// // Wrap values in `Reverse` |
247 | /// heap.push(Reverse(1)); |
248 | /// heap.push(Reverse(5)); |
249 | /// heap.push(Reverse(2)); |
250 | /// |
251 | /// // If we pop these scores now, they should come back in the reverse order. |
252 | /// assert_eq!(heap.pop(), Some(Reverse(1))); |
253 | /// assert_eq!(heap.pop(), Some(Reverse(2))); |
254 | /// assert_eq!(heap.pop(), Some(Reverse(5))); |
255 | /// assert_eq!(heap.pop(), None); |
256 | /// ``` |
257 | /// |
258 | /// # Time complexity |
259 | /// |
260 | /// | [push] | [pop] | [peek]/[peek\_mut] | |
261 | /// |---------|---------------|--------------------| |
262 | /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) | |
263 | /// |
264 | /// The value for `push` is an expected cost; the method documentation gives a |
265 | /// more detailed analysis. |
266 | /// |
267 | /// [`core::cmp::Reverse`]: core::cmp::Reverse |
268 | /// [`Cell`]: core::cell::Cell |
269 | /// [`RefCell`]: core::cell::RefCell |
270 | /// [push]: BinaryHeap::push |
271 | /// [pop]: BinaryHeap::pop |
272 | /// [peek]: BinaryHeap::peek |
273 | /// [peek\_mut]: BinaryHeap::peek_mut |
274 | #[stable (feature = "rust1" , since = "1.0.0" )] |
275 | #[cfg_attr (not(test), rustc_diagnostic_item = "BinaryHeap" )] |
276 | pub struct BinaryHeap< |
277 | T, |
278 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
279 | > { |
280 | data: Vec<T, A>, |
281 | } |
282 | |
283 | /// Structure wrapping a mutable reference to the greatest item on a |
284 | /// `BinaryHeap`. |
285 | /// |
286 | /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See |
287 | /// its documentation for more. |
288 | /// |
289 | /// [`peek_mut`]: BinaryHeap::peek_mut |
290 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
291 | pub struct PeekMut< |
292 | 'a, |
293 | T: 'a + Ord, |
294 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
295 | > { |
296 | heap: &'a mut BinaryHeap<T, A>, |
297 | // If a set_len + sift_down are required, this is Some. If a &mut T has not |
298 | // yet been exposed to peek_mut()'s caller, it's None. |
299 | original_len: Option<NonZeroUsize>, |
300 | } |
301 | |
302 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
303 | impl<T: Ord + fmt::Debug, A: Allocator> fmt::Debug for PeekMut<'_, T, A> { |
304 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
305 | f.debug_tuple(name:"PeekMut" ).field(&self.heap.data[0]).finish() |
306 | } |
307 | } |
308 | |
309 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
310 | impl<T: Ord, A: Allocator> Drop for PeekMut<'_, T, A> { |
311 | fn drop(&mut self) { |
312 | if let Some(original_len: NonZero) = self.original_len { |
313 | // SAFETY: That's how many elements were in the Vec at the time of |
314 | // the PeekMut::deref_mut call, and therefore also at the time of |
315 | // the BinaryHeap::peek_mut call. Since the PeekMut did not end up |
316 | // getting leaked, we are now undoing the leak amplification that |
317 | // the DerefMut prepared for. |
318 | unsafe { self.heap.data.set_len(new_len:original_len.get()) }; |
319 | |
320 | // SAFETY: PeekMut is only instantiated for non-empty heaps. |
321 | unsafe { self.heap.sift_down(pos:0) }; |
322 | } |
323 | } |
324 | } |
325 | |
326 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
327 | impl<T: Ord, A: Allocator> Deref for PeekMut<'_, T, A> { |
328 | type Target = T; |
329 | fn deref(&self) -> &T { |
330 | debug_assert!(!self.heap.is_empty()); |
331 | // SAFE: PeekMut is only instantiated for non-empty heaps |
332 | unsafe { self.heap.data.get_unchecked(index:0) } |
333 | } |
334 | } |
335 | |
336 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
337 | impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> { |
338 | fn deref_mut(&mut self) -> &mut T { |
339 | debug_assert!(!self.heap.is_empty()); |
340 | |
341 | let len = self.heap.len(); |
342 | if len > 1 { |
343 | // Here we preemptively leak all the rest of the underlying vector |
344 | // after the currently max element. If the caller mutates the &mut T |
345 | // we're about to give them, and then leaks the PeekMut, all these |
346 | // elements will remain leaked. If they don't leak the PeekMut, then |
347 | // either Drop or PeekMut::pop will un-leak the vector elements. |
348 | // |
349 | // This is technique is described throughout several other places in |
350 | // the standard library as "leak amplification". |
351 | unsafe { |
352 | // SAFETY: len > 1 so len != 0. |
353 | self.original_len = Some(NonZeroUsize::new_unchecked(len)); |
354 | // SAFETY: len > 1 so all this does for now is leak elements, |
355 | // which is safe. |
356 | self.heap.data.set_len(1); |
357 | } |
358 | } |
359 | |
360 | // SAFE: PeekMut is only instantiated for non-empty heaps |
361 | unsafe { self.heap.data.get_unchecked_mut(0) } |
362 | } |
363 | } |
364 | |
365 | impl<'a, T: Ord, A: Allocator> PeekMut<'a, T, A> { |
366 | /// Removes the peeked value from the heap and returns it. |
367 | #[stable (feature = "binary_heap_peek_mut_pop" , since = "1.18.0" )] |
368 | pub fn pop(mut this: PeekMut<'a, T, A>) -> T { |
369 | if let Some(original_len: NonZero) = this.original_len.take() { |
370 | // SAFETY: This is how many elements were in the Vec at the time of |
371 | // the BinaryHeap::peek_mut call. |
372 | unsafe { this.heap.data.set_len(new_len:original_len.get()) }; |
373 | |
374 | // Unlike in Drop, here we don't also need to do a sift_down even if |
375 | // the caller could've mutated the element. It is removed from the |
376 | // heap on the next line and pop() is not sensitive to its value. |
377 | } |
378 | this.heap.pop().unwrap() |
379 | } |
380 | } |
381 | |
382 | #[stable (feature = "rust1" , since = "1.0.0" )] |
383 | impl<T: Clone, A: Allocator + Clone> Clone for BinaryHeap<T, A> { |
384 | fn clone(&self) -> Self { |
385 | BinaryHeap { data: self.data.clone() } |
386 | } |
387 | |
388 | fn clone_from(&mut self, source: &Self) { |
389 | self.data.clone_from(&source.data); |
390 | } |
391 | } |
392 | |
393 | #[stable (feature = "rust1" , since = "1.0.0" )] |
394 | impl<T: Ord> Default for BinaryHeap<T> { |
395 | /// Creates an empty `BinaryHeap<T>`. |
396 | #[inline ] |
397 | fn default() -> BinaryHeap<T> { |
398 | BinaryHeap::new() |
399 | } |
400 | } |
401 | |
402 | #[stable (feature = "binaryheap_debug" , since = "1.4.0" )] |
403 | impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> { |
404 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
405 | f.debug_list().entries(self.iter()).finish() |
406 | } |
407 | } |
408 | |
409 | struct RebuildOnDrop< |
410 | 'a, |
411 | T: Ord, |
412 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
413 | > { |
414 | heap: &'a mut BinaryHeap<T, A>, |
415 | rebuild_from: usize, |
416 | } |
417 | |
418 | impl<T: Ord, A: Allocator> Drop for RebuildOnDrop<'_, T, A> { |
419 | fn drop(&mut self) { |
420 | self.heap.rebuild_tail(self.rebuild_from); |
421 | } |
422 | } |
423 | |
424 | impl<T: Ord> BinaryHeap<T> { |
425 | /// Creates an empty `BinaryHeap` as a max-heap. |
426 | /// |
427 | /// # Examples |
428 | /// |
429 | /// Basic usage: |
430 | /// |
431 | /// ``` |
432 | /// use std::collections::BinaryHeap; |
433 | /// let mut heap = BinaryHeap::new(); |
434 | /// heap.push(4); |
435 | /// ``` |
436 | #[stable (feature = "rust1" , since = "1.0.0" )] |
437 | #[rustc_const_unstable (feature = "const_binary_heap_constructor" , issue = "112353" )] |
438 | #[must_use ] |
439 | pub const fn new() -> BinaryHeap<T> { |
440 | BinaryHeap { data: vec![] } |
441 | } |
442 | |
443 | /// Creates an empty `BinaryHeap` with at least the specified capacity. |
444 | /// |
445 | /// The binary heap will be able to hold at least `capacity` elements without |
446 | /// reallocating. This method is allowed to allocate for more elements than |
447 | /// `capacity`. If `capacity` is 0, the binary heap will not allocate. |
448 | /// |
449 | /// # Examples |
450 | /// |
451 | /// Basic usage: |
452 | /// |
453 | /// ``` |
454 | /// use std::collections::BinaryHeap; |
455 | /// let mut heap = BinaryHeap::with_capacity(10); |
456 | /// heap.push(4); |
457 | /// ``` |
458 | #[stable (feature = "rust1" , since = "1.0.0" )] |
459 | #[must_use ] |
460 | pub fn with_capacity(capacity: usize) -> BinaryHeap<T> { |
461 | BinaryHeap { data: Vec::with_capacity(capacity) } |
462 | } |
463 | } |
464 | |
465 | impl<T: Ord, A: Allocator> BinaryHeap<T, A> { |
466 | /// Creates an empty `BinaryHeap` as a max-heap, using `A` as allocator. |
467 | /// |
468 | /// # Examples |
469 | /// |
470 | /// Basic usage: |
471 | /// |
472 | /// ``` |
473 | /// #![feature(allocator_api)] |
474 | /// |
475 | /// use std::alloc::System; |
476 | /// use std::collections::BinaryHeap; |
477 | /// let mut heap = BinaryHeap::new_in(System); |
478 | /// heap.push(4); |
479 | /// ``` |
480 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
481 | #[rustc_const_unstable (feature = "const_binary_heap_constructor" , issue = "112353" )] |
482 | #[must_use ] |
483 | pub const fn new_in(alloc: A) -> BinaryHeap<T, A> { |
484 | BinaryHeap { data: Vec::new_in(alloc) } |
485 | } |
486 | |
487 | /// Creates an empty `BinaryHeap` with at least the specified capacity, using `A` as allocator. |
488 | /// |
489 | /// The binary heap will be able to hold at least `capacity` elements without |
490 | /// reallocating. This method is allowed to allocate for more elements than |
491 | /// `capacity`. If `capacity` is 0, the binary heap will not allocate. |
492 | /// |
493 | /// # Examples |
494 | /// |
495 | /// Basic usage: |
496 | /// |
497 | /// ``` |
498 | /// #![feature(allocator_api)] |
499 | /// |
500 | /// use std::alloc::System; |
501 | /// use std::collections::BinaryHeap; |
502 | /// let mut heap = BinaryHeap::with_capacity_in(10, System); |
503 | /// heap.push(4); |
504 | /// ``` |
505 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
506 | #[must_use ] |
507 | pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A> { |
508 | BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) } |
509 | } |
510 | |
511 | /// Returns a mutable reference to the greatest item in the binary heap, or |
512 | /// `None` if it is empty. |
513 | /// |
514 | /// Note: If the `PeekMut` value is leaked, some heap elements might get |
515 | /// leaked along with it, but the remaining elements will remain a valid |
516 | /// heap. |
517 | /// |
518 | /// # Examples |
519 | /// |
520 | /// Basic usage: |
521 | /// |
522 | /// ``` |
523 | /// use std::collections::BinaryHeap; |
524 | /// let mut heap = BinaryHeap::new(); |
525 | /// assert!(heap.peek_mut().is_none()); |
526 | /// |
527 | /// heap.push(1); |
528 | /// heap.push(5); |
529 | /// heap.push(2); |
530 | /// { |
531 | /// let mut val = heap.peek_mut().unwrap(); |
532 | /// *val = 0; |
533 | /// } |
534 | /// assert_eq!(heap.peek(), Some(&2)); |
535 | /// ``` |
536 | /// |
537 | /// # Time complexity |
538 | /// |
539 | /// If the item is modified then the worst case time complexity is *O*(log(*n*)), |
540 | /// otherwise it's *O*(1). |
541 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
542 | pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> { |
543 | if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) } |
544 | } |
545 | |
546 | /// Removes the greatest item from the binary heap and returns it, or `None` if it |
547 | /// is empty. |
548 | /// |
549 | /// # Examples |
550 | /// |
551 | /// Basic usage: |
552 | /// |
553 | /// ``` |
554 | /// use std::collections::BinaryHeap; |
555 | /// let mut heap = BinaryHeap::from([1, 3]); |
556 | /// |
557 | /// assert_eq!(heap.pop(), Some(3)); |
558 | /// assert_eq!(heap.pop(), Some(1)); |
559 | /// assert_eq!(heap.pop(), None); |
560 | /// ``` |
561 | /// |
562 | /// # Time complexity |
563 | /// |
564 | /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)). |
565 | #[stable (feature = "rust1" , since = "1.0.0" )] |
566 | pub fn pop(&mut self) -> Option<T> { |
567 | self.data.pop().map(|mut item| { |
568 | if !self.is_empty() { |
569 | swap(&mut item, &mut self.data[0]); |
570 | // SAFETY: !self.is_empty() means that self.len() > 0 |
571 | unsafe { self.sift_down_to_bottom(0) }; |
572 | } |
573 | item |
574 | }) |
575 | } |
576 | |
577 | /// Pushes an item onto the binary heap. |
578 | /// |
579 | /// # Examples |
580 | /// |
581 | /// Basic usage: |
582 | /// |
583 | /// ``` |
584 | /// use std::collections::BinaryHeap; |
585 | /// let mut heap = BinaryHeap::new(); |
586 | /// heap.push(3); |
587 | /// heap.push(5); |
588 | /// heap.push(1); |
589 | /// |
590 | /// assert_eq!(heap.len(), 3); |
591 | /// assert_eq!(heap.peek(), Some(&5)); |
592 | /// ``` |
593 | /// |
594 | /// # Time complexity |
595 | /// |
596 | /// The expected cost of `push`, averaged over every possible ordering of |
597 | /// the elements being pushed, and over a sufficiently large number of |
598 | /// pushes, is *O*(1). This is the most meaningful cost metric when pushing |
599 | /// elements that are *not* already in any sorted pattern. |
600 | /// |
601 | /// The time complexity degrades if elements are pushed in predominantly |
602 | /// ascending order. In the worst case, elements are pushed in ascending |
603 | /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap |
604 | /// containing *n* elements. |
605 | /// |
606 | /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case |
607 | /// occurs when capacity is exhausted and needs a resize. The resize cost |
608 | /// has been amortized in the previous figures. |
609 | #[stable (feature = "rust1" , since = "1.0.0" )] |
610 | pub fn push(&mut self, item: T) { |
611 | let old_len = self.len(); |
612 | self.data.push(item); |
613 | // SAFETY: Since we pushed a new item it means that |
614 | // old_len = self.len() - 1 < self.len() |
615 | unsafe { self.sift_up(0, old_len) }; |
616 | } |
617 | |
618 | /// Consumes the `BinaryHeap` and returns a vector in sorted |
619 | /// (ascending) order. |
620 | /// |
621 | /// # Examples |
622 | /// |
623 | /// Basic usage: |
624 | /// |
625 | /// ``` |
626 | /// use std::collections::BinaryHeap; |
627 | /// |
628 | /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]); |
629 | /// heap.push(6); |
630 | /// heap.push(3); |
631 | /// |
632 | /// let vec = heap.into_sorted_vec(); |
633 | /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]); |
634 | /// ``` |
635 | #[must_use = "`self` will be dropped if the result is not used" ] |
636 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
637 | pub fn into_sorted_vec(mut self) -> Vec<T, A> { |
638 | let mut end = self.len(); |
639 | while end > 1 { |
640 | end -= 1; |
641 | // SAFETY: `end` goes from `self.len() - 1` to 1 (both included), |
642 | // so it's always a valid index to access. |
643 | // It is safe to access index 0 (i.e. `ptr`), because |
644 | // 1 <= end < self.len(), which means self.len() >= 2. |
645 | unsafe { |
646 | let ptr = self.data.as_mut_ptr(); |
647 | ptr::swap(ptr, ptr.add(end)); |
648 | } |
649 | // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so: |
650 | // 0 < 1 <= end <= self.len() - 1 < self.len() |
651 | // Which means 0 < end and end < self.len(). |
652 | unsafe { self.sift_down_range(0, end) }; |
653 | } |
654 | self.into_vec() |
655 | } |
656 | |
657 | // The implementations of sift_up and sift_down use unsafe blocks in |
658 | // order to move an element out of the vector (leaving behind a |
659 | // hole), shift along the others and move the removed element back into the |
660 | // vector at the final location of the hole. |
661 | // The `Hole` type is used to represent this, and make sure |
662 | // the hole is filled back at the end of its scope, even on panic. |
663 | // Using a hole reduces the constant factor compared to using swaps, |
664 | // which involves twice as many moves. |
665 | |
666 | /// # Safety |
667 | /// |
668 | /// The caller must guarantee that `pos < self.len()`. |
669 | unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize { |
670 | // Take out the value at `pos` and create a hole. |
671 | // SAFETY: The caller guarantees that pos < self.len() |
672 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
673 | |
674 | while hole.pos() > start { |
675 | let parent = (hole.pos() - 1) / 2; |
676 | |
677 | // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0 |
678 | // and so hole.pos() - 1 can't underflow. |
679 | // This guarantees that parent < hole.pos() so |
680 | // it's a valid index and also != hole.pos(). |
681 | if hole.element() <= unsafe { hole.get(parent) } { |
682 | break; |
683 | } |
684 | |
685 | // SAFETY: Same as above |
686 | unsafe { hole.move_to(parent) }; |
687 | } |
688 | |
689 | hole.pos() |
690 | } |
691 | |
692 | /// Take an element at `pos` and move it down the heap, |
693 | /// while its children are larger. |
694 | /// |
695 | /// # Safety |
696 | /// |
697 | /// The caller must guarantee that `pos < end <= self.len()`. |
698 | unsafe fn sift_down_range(&mut self, pos: usize, end: usize) { |
699 | // SAFETY: The caller guarantees that pos < end <= self.len(). |
700 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
701 | let mut child = 2 * hole.pos() + 1; |
702 | |
703 | // Loop invariant: child == 2 * hole.pos() + 1. |
704 | while child <= end.saturating_sub(2) { |
705 | // compare with the greater of the two children |
706 | // SAFETY: child < end - 1 < self.len() and |
707 | // child + 1 < end <= self.len(), so they're valid indexes. |
708 | // child == 2 * hole.pos() + 1 != hole.pos() and |
709 | // child + 1 == 2 * hole.pos() + 2 != hole.pos(). |
710 | // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow |
711 | // if T is a ZST |
712 | child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize; |
713 | |
714 | // if we are already in order, stop. |
715 | // SAFETY: child is now either the old child or the old child+1 |
716 | // We already proven that both are < self.len() and != hole.pos() |
717 | if hole.element() >= unsafe { hole.get(child) } { |
718 | return; |
719 | } |
720 | |
721 | // SAFETY: same as above. |
722 | unsafe { hole.move_to(child) }; |
723 | child = 2 * hole.pos() + 1; |
724 | } |
725 | |
726 | // SAFETY: && short circuit, which means that in the |
727 | // second condition it's already true that child == end - 1 < self.len(). |
728 | if child == end - 1 && hole.element() < unsafe { hole.get(child) } { |
729 | // SAFETY: child is already proven to be a valid index and |
730 | // child == 2 * hole.pos() + 1 != hole.pos(). |
731 | unsafe { hole.move_to(child) }; |
732 | } |
733 | } |
734 | |
735 | /// # Safety |
736 | /// |
737 | /// The caller must guarantee that `pos < self.len()`. |
738 | unsafe fn sift_down(&mut self, pos: usize) { |
739 | let len = self.len(); |
740 | // SAFETY: pos < len is guaranteed by the caller and |
741 | // obviously len = self.len() <= self.len(). |
742 | unsafe { self.sift_down_range(pos, len) }; |
743 | } |
744 | |
745 | /// Take an element at `pos` and move it all the way down the heap, |
746 | /// then sift it up to its position. |
747 | /// |
748 | /// Note: This is faster when the element is known to be large / should |
749 | /// be closer to the bottom. |
750 | /// |
751 | /// # Safety |
752 | /// |
753 | /// The caller must guarantee that `pos < self.len()`. |
754 | unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) { |
755 | let end = self.len(); |
756 | let start = pos; |
757 | |
758 | // SAFETY: The caller guarantees that pos < self.len(). |
759 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
760 | let mut child = 2 * hole.pos() + 1; |
761 | |
762 | // Loop invariant: child == 2 * hole.pos() + 1. |
763 | while child <= end.saturating_sub(2) { |
764 | // SAFETY: child < end - 1 < self.len() and |
765 | // child + 1 < end <= self.len(), so they're valid indexes. |
766 | // child == 2 * hole.pos() + 1 != hole.pos() and |
767 | // child + 1 == 2 * hole.pos() + 2 != hole.pos(). |
768 | // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow |
769 | // if T is a ZST |
770 | child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize; |
771 | |
772 | // SAFETY: Same as above |
773 | unsafe { hole.move_to(child) }; |
774 | child = 2 * hole.pos() + 1; |
775 | } |
776 | |
777 | if child == end - 1 { |
778 | // SAFETY: child == end - 1 < self.len(), so it's a valid index |
779 | // and child == 2 * hole.pos() + 1 != hole.pos(). |
780 | unsafe { hole.move_to(child) }; |
781 | } |
782 | pos = hole.pos(); |
783 | drop(hole); |
784 | |
785 | // SAFETY: pos is the position in the hole and was already proven |
786 | // to be a valid index. |
787 | unsafe { self.sift_up(start, pos) }; |
788 | } |
789 | |
790 | /// Rebuild assuming data[0..start] is still a proper heap. |
791 | fn rebuild_tail(&mut self, start: usize) { |
792 | if start == self.len() { |
793 | return; |
794 | } |
795 | |
796 | let tail_len = self.len() - start; |
797 | |
798 | #[inline (always)] |
799 | fn log2_fast(x: usize) -> usize { |
800 | (usize::BITS - x.leading_zeros() - 1) as usize |
801 | } |
802 | |
803 | // `rebuild` takes O(self.len()) operations |
804 | // and about 2 * self.len() comparisons in the worst case |
805 | // while repeating `sift_up` takes O(tail_len * log(start)) operations |
806 | // and about 1 * tail_len * log_2(start) comparisons in the worst case, |
807 | // assuming start >= tail_len. For larger heaps, the crossover point |
808 | // no longer follows this reasoning and was determined empirically. |
809 | let better_to_rebuild = if start < tail_len { |
810 | true |
811 | } else if self.len() <= 2048 { |
812 | 2 * self.len() < tail_len * log2_fast(start) |
813 | } else { |
814 | 2 * self.len() < tail_len * 11 |
815 | }; |
816 | |
817 | if better_to_rebuild { |
818 | self.rebuild(); |
819 | } else { |
820 | for i in start..self.len() { |
821 | // SAFETY: The index `i` is always less than self.len(). |
822 | unsafe { self.sift_up(0, i) }; |
823 | } |
824 | } |
825 | } |
826 | |
827 | fn rebuild(&mut self) { |
828 | let mut n = self.len() / 2; |
829 | while n > 0 { |
830 | n -= 1; |
831 | // SAFETY: n starts from self.len() / 2 and goes down to 0. |
832 | // The only case when !(n < self.len()) is if |
833 | // self.len() == 0, but it's ruled out by the loop condition. |
834 | unsafe { self.sift_down(n) }; |
835 | } |
836 | } |
837 | |
838 | /// Moves all the elements of `other` into `self`, leaving `other` empty. |
839 | /// |
840 | /// # Examples |
841 | /// |
842 | /// Basic usage: |
843 | /// |
844 | /// ``` |
845 | /// use std::collections::BinaryHeap; |
846 | /// |
847 | /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]); |
848 | /// let mut b = BinaryHeap::from([-20, 5, 43]); |
849 | /// |
850 | /// a.append(&mut b); |
851 | /// |
852 | /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); |
853 | /// assert!(b.is_empty()); |
854 | /// ``` |
855 | #[stable (feature = "binary_heap_append" , since = "1.11.0" )] |
856 | pub fn append(&mut self, other: &mut Self) { |
857 | if self.len() < other.len() { |
858 | swap(self, other); |
859 | } |
860 | |
861 | let start = self.data.len(); |
862 | |
863 | self.data.append(&mut other.data); |
864 | |
865 | self.rebuild_tail(start); |
866 | } |
867 | |
868 | /// Clears the binary heap, returning an iterator over the removed elements |
869 | /// in heap order. If the iterator is dropped before being fully consumed, |
870 | /// it drops the remaining elements in heap order. |
871 | /// |
872 | /// The returned iterator keeps a mutable borrow on the heap to optimize |
873 | /// its implementation. |
874 | /// |
875 | /// Note: |
876 | /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`. |
877 | /// You should use the latter for most cases. |
878 | /// |
879 | /// # Examples |
880 | /// |
881 | /// Basic usage: |
882 | /// |
883 | /// ``` |
884 | /// #![feature(binary_heap_drain_sorted)] |
885 | /// use std::collections::BinaryHeap; |
886 | /// |
887 | /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]); |
888 | /// assert_eq!(heap.len(), 5); |
889 | /// |
890 | /// drop(heap.drain_sorted()); // removes all elements in heap order |
891 | /// assert_eq!(heap.len(), 0); |
892 | /// ``` |
893 | #[inline ] |
894 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
895 | pub fn drain_sorted(&mut self) -> DrainSorted<'_, T, A> { |
896 | DrainSorted { inner: self } |
897 | } |
898 | |
899 | /// Retains only the elements specified by the predicate. |
900 | /// |
901 | /// In other words, remove all elements `e` for which `f(&e)` returns |
902 | /// `false`. The elements are visited in unsorted (and unspecified) order. |
903 | /// |
904 | /// # Examples |
905 | /// |
906 | /// Basic usage: |
907 | /// |
908 | /// ``` |
909 | /// use std::collections::BinaryHeap; |
910 | /// |
911 | /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]); |
912 | /// |
913 | /// heap.retain(|x| x % 2 == 0); // only keep even numbers |
914 | /// |
915 | /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4]) |
916 | /// ``` |
917 | #[stable (feature = "binary_heap_retain" , since = "1.70.0" )] |
918 | pub fn retain<F>(&mut self, mut f: F) |
919 | where |
920 | F: FnMut(&T) -> bool, |
921 | { |
922 | // rebuild_start will be updated to the first touched element below, and the rebuild will |
923 | // only be done for the tail. |
924 | let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self }; |
925 | let mut i = 0; |
926 | |
927 | guard.heap.data.retain(|e| { |
928 | let keep = f(e); |
929 | if !keep && i < guard.rebuild_from { |
930 | guard.rebuild_from = i; |
931 | } |
932 | i += 1; |
933 | keep |
934 | }); |
935 | } |
936 | } |
937 | |
938 | impl<T, A: Allocator> BinaryHeap<T, A> { |
939 | /// Returns an iterator visiting all values in the underlying vector, in |
940 | /// arbitrary order. |
941 | /// |
942 | /// # Examples |
943 | /// |
944 | /// Basic usage: |
945 | /// |
946 | /// ``` |
947 | /// use std::collections::BinaryHeap; |
948 | /// let heap = BinaryHeap::from([1, 2, 3, 4]); |
949 | /// |
950 | /// // Print 1, 2, 3, 4 in arbitrary order |
951 | /// for x in heap.iter() { |
952 | /// println!("{x}" ); |
953 | /// } |
954 | /// ``` |
955 | #[stable (feature = "rust1" , since = "1.0.0" )] |
956 | pub fn iter(&self) -> Iter<'_, T> { |
957 | Iter { iter: self.data.iter() } |
958 | } |
959 | |
960 | /// Returns an iterator which retrieves elements in heap order. |
961 | /// This method consumes the original heap. |
962 | /// |
963 | /// # Examples |
964 | /// |
965 | /// Basic usage: |
966 | /// |
967 | /// ``` |
968 | /// #![feature(binary_heap_into_iter_sorted)] |
969 | /// use std::collections::BinaryHeap; |
970 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]); |
971 | /// |
972 | /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]); |
973 | /// ``` |
974 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
975 | pub fn into_iter_sorted(self) -> IntoIterSorted<T, A> { |
976 | IntoIterSorted { inner: self } |
977 | } |
978 | |
979 | /// Returns the greatest item in the binary heap, or `None` if it is empty. |
980 | /// |
981 | /// # Examples |
982 | /// |
983 | /// Basic usage: |
984 | /// |
985 | /// ``` |
986 | /// use std::collections::BinaryHeap; |
987 | /// let mut heap = BinaryHeap::new(); |
988 | /// assert_eq!(heap.peek(), None); |
989 | /// |
990 | /// heap.push(1); |
991 | /// heap.push(5); |
992 | /// heap.push(2); |
993 | /// assert_eq!(heap.peek(), Some(&5)); |
994 | /// |
995 | /// ``` |
996 | /// |
997 | /// # Time complexity |
998 | /// |
999 | /// Cost is *O*(1) in the worst case. |
1000 | #[must_use ] |
1001 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1002 | pub fn peek(&self) -> Option<&T> { |
1003 | self.data.get(0) |
1004 | } |
1005 | |
1006 | /// Returns the number of elements the binary heap can hold without reallocating. |
1007 | /// |
1008 | /// # Examples |
1009 | /// |
1010 | /// Basic usage: |
1011 | /// |
1012 | /// ``` |
1013 | /// use std::collections::BinaryHeap; |
1014 | /// let mut heap = BinaryHeap::with_capacity(100); |
1015 | /// assert!(heap.capacity() >= 100); |
1016 | /// heap.push(4); |
1017 | /// ``` |
1018 | #[must_use ] |
1019 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1020 | pub fn capacity(&self) -> usize { |
1021 | self.data.capacity() |
1022 | } |
1023 | |
1024 | /// Reserves the minimum capacity for at least `additional` elements more than |
1025 | /// the current length. Unlike [`reserve`], this will not |
1026 | /// deliberately over-allocate to speculatively avoid frequent allocations. |
1027 | /// After calling `reserve_exact`, capacity will be greater than or equal to |
1028 | /// `self.len() + additional`. Does nothing if the capacity is already |
1029 | /// sufficient. |
1030 | /// |
1031 | /// [`reserve`]: BinaryHeap::reserve |
1032 | /// |
1033 | /// # Panics |
1034 | /// |
1035 | /// Panics if the new capacity overflows [`usize`]. |
1036 | /// |
1037 | /// # Examples |
1038 | /// |
1039 | /// Basic usage: |
1040 | /// |
1041 | /// ``` |
1042 | /// use std::collections::BinaryHeap; |
1043 | /// let mut heap = BinaryHeap::new(); |
1044 | /// heap.reserve_exact(100); |
1045 | /// assert!(heap.capacity() >= 100); |
1046 | /// heap.push(4); |
1047 | /// ``` |
1048 | /// |
1049 | /// [`reserve`]: BinaryHeap::reserve |
1050 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1051 | pub fn reserve_exact(&mut self, additional: usize) { |
1052 | self.data.reserve_exact(additional); |
1053 | } |
1054 | |
1055 | /// Reserves capacity for at least `additional` elements more than the |
1056 | /// current length. The allocator may reserve more space to speculatively |
1057 | /// avoid frequent allocations. After calling `reserve`, |
1058 | /// capacity will be greater than or equal to `self.len() + additional`. |
1059 | /// Does nothing if capacity is already sufficient. |
1060 | /// |
1061 | /// # Panics |
1062 | /// |
1063 | /// Panics if the new capacity overflows [`usize`]. |
1064 | /// |
1065 | /// # Examples |
1066 | /// |
1067 | /// Basic usage: |
1068 | /// |
1069 | /// ``` |
1070 | /// use std::collections::BinaryHeap; |
1071 | /// let mut heap = BinaryHeap::new(); |
1072 | /// heap.reserve(100); |
1073 | /// assert!(heap.capacity() >= 100); |
1074 | /// heap.push(4); |
1075 | /// ``` |
1076 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1077 | pub fn reserve(&mut self, additional: usize) { |
1078 | self.data.reserve(additional); |
1079 | } |
1080 | |
1081 | /// Tries to reserve the minimum capacity for at least `additional` elements |
1082 | /// more than the current length. Unlike [`try_reserve`], this will not |
1083 | /// deliberately over-allocate to speculatively avoid frequent allocations. |
1084 | /// After calling `try_reserve_exact`, capacity will be greater than or |
1085 | /// equal to `self.len() + additional` if it returns `Ok(())`. |
1086 | /// Does nothing if the capacity is already sufficient. |
1087 | /// |
1088 | /// Note that the allocator may give the collection more space than it |
1089 | /// requests. Therefore, capacity can not be relied upon to be precisely |
1090 | /// minimal. Prefer [`try_reserve`] if future insertions are expected. |
1091 | /// |
1092 | /// [`try_reserve`]: BinaryHeap::try_reserve |
1093 | /// |
1094 | /// # Errors |
1095 | /// |
1096 | /// If the capacity overflows, or the allocator reports a failure, then an error |
1097 | /// is returned. |
1098 | /// |
1099 | /// # Examples |
1100 | /// |
1101 | /// ``` |
1102 | /// use std::collections::BinaryHeap; |
1103 | /// use std::collections::TryReserveError; |
1104 | /// |
1105 | /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> { |
1106 | /// let mut heap = BinaryHeap::new(); |
1107 | /// |
1108 | /// // Pre-reserve the memory, exiting if we can't |
1109 | /// heap.try_reserve_exact(data.len())?; |
1110 | /// |
1111 | /// // Now we know this can't OOM in the middle of our complex work |
1112 | /// heap.extend(data.iter()); |
1113 | /// |
1114 | /// Ok(heap.pop()) |
1115 | /// } |
1116 | /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?" ); |
1117 | /// ``` |
1118 | #[stable (feature = "try_reserve_2" , since = "1.63.0" )] |
1119 | pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { |
1120 | self.data.try_reserve_exact(additional) |
1121 | } |
1122 | |
1123 | /// Tries to reserve capacity for at least `additional` elements more than the |
1124 | /// current length. The allocator may reserve more space to speculatively |
1125 | /// avoid frequent allocations. After calling `try_reserve`, capacity will be |
1126 | /// greater than or equal to `self.len() + additional` if it returns |
1127 | /// `Ok(())`. Does nothing if capacity is already sufficient. This method |
1128 | /// preserves the contents even if an error occurs. |
1129 | /// |
1130 | /// # Errors |
1131 | /// |
1132 | /// If the capacity overflows, or the allocator reports a failure, then an error |
1133 | /// is returned. |
1134 | /// |
1135 | /// # Examples |
1136 | /// |
1137 | /// ``` |
1138 | /// use std::collections::BinaryHeap; |
1139 | /// use std::collections::TryReserveError; |
1140 | /// |
1141 | /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> { |
1142 | /// let mut heap = BinaryHeap::new(); |
1143 | /// |
1144 | /// // Pre-reserve the memory, exiting if we can't |
1145 | /// heap.try_reserve(data.len())?; |
1146 | /// |
1147 | /// // Now we know this can't OOM in the middle of our complex work |
1148 | /// heap.extend(data.iter()); |
1149 | /// |
1150 | /// Ok(heap.pop()) |
1151 | /// } |
1152 | /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?" ); |
1153 | /// ``` |
1154 | #[stable (feature = "try_reserve_2" , since = "1.63.0" )] |
1155 | pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { |
1156 | self.data.try_reserve(additional) |
1157 | } |
1158 | |
1159 | /// Discards as much additional capacity as possible. |
1160 | /// |
1161 | /// # Examples |
1162 | /// |
1163 | /// Basic usage: |
1164 | /// |
1165 | /// ``` |
1166 | /// use std::collections::BinaryHeap; |
1167 | /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100); |
1168 | /// |
1169 | /// assert!(heap.capacity() >= 100); |
1170 | /// heap.shrink_to_fit(); |
1171 | /// assert!(heap.capacity() == 0); |
1172 | /// ``` |
1173 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1174 | pub fn shrink_to_fit(&mut self) { |
1175 | self.data.shrink_to_fit(); |
1176 | } |
1177 | |
1178 | /// Discards capacity with a lower bound. |
1179 | /// |
1180 | /// The capacity will remain at least as large as both the length |
1181 | /// and the supplied value. |
1182 | /// |
1183 | /// If the current capacity is less than the lower limit, this is a no-op. |
1184 | /// |
1185 | /// # Examples |
1186 | /// |
1187 | /// ``` |
1188 | /// use std::collections::BinaryHeap; |
1189 | /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100); |
1190 | /// |
1191 | /// assert!(heap.capacity() >= 100); |
1192 | /// heap.shrink_to(10); |
1193 | /// assert!(heap.capacity() >= 10); |
1194 | /// ``` |
1195 | #[inline ] |
1196 | #[stable (feature = "shrink_to" , since = "1.56.0" )] |
1197 | pub fn shrink_to(&mut self, min_capacity: usize) { |
1198 | self.data.shrink_to(min_capacity) |
1199 | } |
1200 | |
1201 | /// Returns a slice of all values in the underlying vector, in arbitrary |
1202 | /// order. |
1203 | /// |
1204 | /// # Examples |
1205 | /// |
1206 | /// Basic usage: |
1207 | /// |
1208 | /// ``` |
1209 | /// #![feature(binary_heap_as_slice)] |
1210 | /// use std::collections::BinaryHeap; |
1211 | /// use std::io::{self, Write}; |
1212 | /// |
1213 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]); |
1214 | /// |
1215 | /// io::sink().write(heap.as_slice()).unwrap(); |
1216 | /// ``` |
1217 | #[must_use ] |
1218 | #[unstable (feature = "binary_heap_as_slice" , issue = "83659" )] |
1219 | pub fn as_slice(&self) -> &[T] { |
1220 | self.data.as_slice() |
1221 | } |
1222 | |
1223 | /// Consumes the `BinaryHeap` and returns the underlying vector |
1224 | /// in arbitrary order. |
1225 | /// |
1226 | /// # Examples |
1227 | /// |
1228 | /// Basic usage: |
1229 | /// |
1230 | /// ``` |
1231 | /// use std::collections::BinaryHeap; |
1232 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]); |
1233 | /// let vec = heap.into_vec(); |
1234 | /// |
1235 | /// // Will print in some order |
1236 | /// for x in vec { |
1237 | /// println!("{x}" ); |
1238 | /// } |
1239 | /// ``` |
1240 | #[must_use = "`self` will be dropped if the result is not used" ] |
1241 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1242 | pub fn into_vec(self) -> Vec<T, A> { |
1243 | self.into() |
1244 | } |
1245 | |
1246 | /// Returns a reference to the underlying allocator. |
1247 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1248 | #[inline ] |
1249 | pub fn allocator(&self) -> &A { |
1250 | self.data.allocator() |
1251 | } |
1252 | |
1253 | /// Returns the length of the binary heap. |
1254 | /// |
1255 | /// # Examples |
1256 | /// |
1257 | /// Basic usage: |
1258 | /// |
1259 | /// ``` |
1260 | /// use std::collections::BinaryHeap; |
1261 | /// let heap = BinaryHeap::from([1, 3]); |
1262 | /// |
1263 | /// assert_eq!(heap.len(), 2); |
1264 | /// ``` |
1265 | #[must_use ] |
1266 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1267 | pub fn len(&self) -> usize { |
1268 | self.data.len() |
1269 | } |
1270 | |
1271 | /// Checks if the binary heap is empty. |
1272 | /// |
1273 | /// # Examples |
1274 | /// |
1275 | /// Basic usage: |
1276 | /// |
1277 | /// ``` |
1278 | /// use std::collections::BinaryHeap; |
1279 | /// let mut heap = BinaryHeap::new(); |
1280 | /// |
1281 | /// assert!(heap.is_empty()); |
1282 | /// |
1283 | /// heap.push(3); |
1284 | /// heap.push(5); |
1285 | /// heap.push(1); |
1286 | /// |
1287 | /// assert!(!heap.is_empty()); |
1288 | /// ``` |
1289 | #[must_use ] |
1290 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1291 | pub fn is_empty(&self) -> bool { |
1292 | self.len() == 0 |
1293 | } |
1294 | |
1295 | /// Clears the binary heap, returning an iterator over the removed elements |
1296 | /// in arbitrary order. If the iterator is dropped before being fully |
1297 | /// consumed, it drops the remaining elements in arbitrary order. |
1298 | /// |
1299 | /// The returned iterator keeps a mutable borrow on the heap to optimize |
1300 | /// its implementation. |
1301 | /// |
1302 | /// # Examples |
1303 | /// |
1304 | /// Basic usage: |
1305 | /// |
1306 | /// ``` |
1307 | /// use std::collections::BinaryHeap; |
1308 | /// let mut heap = BinaryHeap::from([1, 3]); |
1309 | /// |
1310 | /// assert!(!heap.is_empty()); |
1311 | /// |
1312 | /// for x in heap.drain() { |
1313 | /// println!("{x}" ); |
1314 | /// } |
1315 | /// |
1316 | /// assert!(heap.is_empty()); |
1317 | /// ``` |
1318 | #[inline ] |
1319 | #[stable (feature = "drain" , since = "1.6.0" )] |
1320 | pub fn drain(&mut self) -> Drain<'_, T, A> { |
1321 | Drain { iter: self.data.drain(..) } |
1322 | } |
1323 | |
1324 | /// Drops all items from the binary heap. |
1325 | /// |
1326 | /// # Examples |
1327 | /// |
1328 | /// Basic usage: |
1329 | /// |
1330 | /// ``` |
1331 | /// use std::collections::BinaryHeap; |
1332 | /// let mut heap = BinaryHeap::from([1, 3]); |
1333 | /// |
1334 | /// assert!(!heap.is_empty()); |
1335 | /// |
1336 | /// heap.clear(); |
1337 | /// |
1338 | /// assert!(heap.is_empty()); |
1339 | /// ``` |
1340 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1341 | pub fn clear(&mut self) { |
1342 | self.drain(); |
1343 | } |
1344 | } |
1345 | |
1346 | /// Hole represents a hole in a slice i.e., an index without valid value |
1347 | /// (because it was moved from or duplicated). |
1348 | /// In drop, `Hole` will restore the slice by filling the hole |
1349 | /// position with the value that was originally removed. |
1350 | struct Hole<'a, T: 'a> { |
1351 | data: &'a mut [T], |
1352 | elt: ManuallyDrop<T>, |
1353 | pos: usize, |
1354 | } |
1355 | |
1356 | impl<'a, T> Hole<'a, T> { |
1357 | /// Create a new `Hole` at index `pos`. |
1358 | /// |
1359 | /// Unsafe because pos must be within the data slice. |
1360 | #[inline ] |
1361 | unsafe fn new(data: &'a mut [T], pos: usize) -> Self { |
1362 | debug_assert!(pos < data.len()); |
1363 | // SAFE: pos should be inside the slice |
1364 | let elt = unsafe { ptr::read(data.get_unchecked(pos)) }; |
1365 | Hole { data, elt: ManuallyDrop::new(elt), pos } |
1366 | } |
1367 | |
1368 | #[inline ] |
1369 | fn pos(&self) -> usize { |
1370 | self.pos |
1371 | } |
1372 | |
1373 | /// Returns a reference to the element removed. |
1374 | #[inline ] |
1375 | fn element(&self) -> &T { |
1376 | &self.elt |
1377 | } |
1378 | |
1379 | /// Returns a reference to the element at `index`. |
1380 | /// |
1381 | /// Unsafe because index must be within the data slice and not equal to pos. |
1382 | #[inline ] |
1383 | unsafe fn get(&self, index: usize) -> &T { |
1384 | debug_assert!(index != self.pos); |
1385 | debug_assert!(index < self.data.len()); |
1386 | unsafe { self.data.get_unchecked(index) } |
1387 | } |
1388 | |
1389 | /// Move hole to new location |
1390 | /// |
1391 | /// Unsafe because index must be within the data slice and not equal to pos. |
1392 | #[inline ] |
1393 | unsafe fn move_to(&mut self, index: usize) { |
1394 | debug_assert!(index != self.pos); |
1395 | debug_assert!(index < self.data.len()); |
1396 | unsafe { |
1397 | let ptr = self.data.as_mut_ptr(); |
1398 | let index_ptr: *const _ = ptr.add(index); |
1399 | let hole_ptr = ptr.add(self.pos); |
1400 | ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); |
1401 | } |
1402 | self.pos = index; |
1403 | } |
1404 | } |
1405 | |
1406 | impl<T> Drop for Hole<'_, T> { |
1407 | #[inline ] |
1408 | fn drop(&mut self) { |
1409 | // fill the hole again |
1410 | unsafe { |
1411 | let pos: usize = self.pos; |
1412 | ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), count:1); |
1413 | } |
1414 | } |
1415 | } |
1416 | |
1417 | /// An iterator over the elements of a `BinaryHeap`. |
1418 | /// |
1419 | /// This `struct` is created by [`BinaryHeap::iter()`]. See its |
1420 | /// documentation for more. |
1421 | /// |
1422 | /// [`iter`]: BinaryHeap::iter |
1423 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
1424 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1425 | pub struct Iter<'a, T: 'a> { |
1426 | iter: slice::Iter<'a, T>, |
1427 | } |
1428 | |
1429 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
1430 | impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> { |
1431 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1432 | f.debug_tuple(name:"Iter" ).field(&self.iter.as_slice()).finish() |
1433 | } |
1434 | } |
1435 | |
1436 | // FIXME(#26925) Remove in favor of `#[derive(Clone)]` |
1437 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1438 | impl<T> Clone for Iter<'_, T> { |
1439 | fn clone(&self) -> Self { |
1440 | Iter { iter: self.iter.clone() } |
1441 | } |
1442 | } |
1443 | |
1444 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1445 | impl<'a, T> Iterator for Iter<'a, T> { |
1446 | type Item = &'a T; |
1447 | |
1448 | #[inline ] |
1449 | fn next(&mut self) -> Option<&'a T> { |
1450 | self.iter.next() |
1451 | } |
1452 | |
1453 | #[inline ] |
1454 | fn size_hint(&self) -> (usize, Option<usize>) { |
1455 | self.iter.size_hint() |
1456 | } |
1457 | |
1458 | #[inline ] |
1459 | fn last(self) -> Option<&'a T> { |
1460 | self.iter.last() |
1461 | } |
1462 | } |
1463 | |
1464 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1465 | impl<'a, T> DoubleEndedIterator for Iter<'a, T> { |
1466 | #[inline ] |
1467 | fn next_back(&mut self) -> Option<&'a T> { |
1468 | self.iter.next_back() |
1469 | } |
1470 | } |
1471 | |
1472 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1473 | impl<T> ExactSizeIterator for Iter<'_, T> { |
1474 | fn is_empty(&self) -> bool { |
1475 | self.iter.is_empty() |
1476 | } |
1477 | } |
1478 | |
1479 | #[stable (feature = "fused" , since = "1.26.0" )] |
1480 | impl<T> FusedIterator for Iter<'_, T> {} |
1481 | |
1482 | /// An owning iterator over the elements of a `BinaryHeap`. |
1483 | /// |
1484 | /// This `struct` is created by [`BinaryHeap::into_iter()`] |
1485 | /// (provided by the [`IntoIterator`] trait). See its documentation for more. |
1486 | /// |
1487 | /// [`into_iter`]: BinaryHeap::into_iter |
1488 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1489 | #[derive (Clone)] |
1490 | pub struct IntoIter< |
1491 | T, |
1492 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1493 | > { |
1494 | iter: vec::IntoIter<T, A>, |
1495 | } |
1496 | |
1497 | impl<T, A: Allocator> IntoIter<T, A> { |
1498 | /// Returns a reference to the underlying allocator. |
1499 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1500 | pub fn allocator(&self) -> &A { |
1501 | self.iter.allocator() |
1502 | } |
1503 | } |
1504 | |
1505 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
1506 | impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> { |
1507 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1508 | f.debug_tuple(name:"IntoIter" ).field(&self.iter.as_slice()).finish() |
1509 | } |
1510 | } |
1511 | |
1512 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1513 | impl<T, A: Allocator> Iterator for IntoIter<T, A> { |
1514 | type Item = T; |
1515 | |
1516 | #[inline ] |
1517 | fn next(&mut self) -> Option<T> { |
1518 | self.iter.next() |
1519 | } |
1520 | |
1521 | #[inline ] |
1522 | fn size_hint(&self) -> (usize, Option<usize>) { |
1523 | self.iter.size_hint() |
1524 | } |
1525 | } |
1526 | |
1527 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1528 | impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { |
1529 | #[inline ] |
1530 | fn next_back(&mut self) -> Option<T> { |
1531 | self.iter.next_back() |
1532 | } |
1533 | } |
1534 | |
1535 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1536 | impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { |
1537 | fn is_empty(&self) -> bool { |
1538 | self.iter.is_empty() |
1539 | } |
1540 | } |
1541 | |
1542 | #[stable (feature = "fused" , since = "1.26.0" )] |
1543 | impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {} |
1544 | |
1545 | #[doc (hidden)] |
1546 | #[unstable (issue = "none" , feature = "trusted_fused" )] |
1547 | unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {} |
1548 | |
1549 | #[stable (feature = "default_iters" , since = "1.70.0" )] |
1550 | impl<T> Default for IntoIter<T> { |
1551 | /// Creates an empty `binary_heap::IntoIter`. |
1552 | /// |
1553 | /// ``` |
1554 | /// # use std::collections::binary_heap; |
1555 | /// let iter: binary_heap::IntoIter<u8> = Default::default(); |
1556 | /// assert_eq!(iter.len(), 0); |
1557 | /// ``` |
1558 | fn default() -> Self { |
1559 | IntoIter { iter: Default::default() } |
1560 | } |
1561 | } |
1562 | |
1563 | // In addition to the SAFETY invariants of the following three unsafe traits |
1564 | // also refer to the vec::in_place_collect module documentation to get an overview |
1565 | #[unstable (issue = "none" , feature = "inplace_iteration" )] |
1566 | #[doc (hidden)] |
1567 | unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> { |
1568 | type Source = IntoIter<T, A>; |
1569 | |
1570 | #[inline ] |
1571 | unsafe fn as_inner(&mut self) -> &mut Self::Source { |
1572 | self |
1573 | } |
1574 | } |
1575 | |
1576 | #[unstable (issue = "none" , feature = "inplace_iteration" )] |
1577 | #[doc (hidden)] |
1578 | unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> { |
1579 | const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1); |
1580 | const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1); |
1581 | } |
1582 | |
1583 | unsafe impl<I> AsVecIntoIter for IntoIter<I> { |
1584 | type Item = I; |
1585 | |
1586 | fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> { |
1587 | &mut self.iter |
1588 | } |
1589 | } |
1590 | |
1591 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
1592 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1593 | #[derive (Clone, Debug)] |
1594 | pub struct IntoIterSorted< |
1595 | T, |
1596 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1597 | > { |
1598 | inner: BinaryHeap<T, A>, |
1599 | } |
1600 | |
1601 | impl<T, A: Allocator> IntoIterSorted<T, A> { |
1602 | /// Returns a reference to the underlying allocator. |
1603 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1604 | pub fn allocator(&self) -> &A { |
1605 | self.inner.allocator() |
1606 | } |
1607 | } |
1608 | |
1609 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1610 | impl<T: Ord, A: Allocator> Iterator for IntoIterSorted<T, A> { |
1611 | type Item = T; |
1612 | |
1613 | #[inline ] |
1614 | fn next(&mut self) -> Option<T> { |
1615 | self.inner.pop() |
1616 | } |
1617 | |
1618 | #[inline ] |
1619 | fn size_hint(&self) -> (usize, Option<usize>) { |
1620 | let exact: usize = self.inner.len(); |
1621 | (exact, Some(exact)) |
1622 | } |
1623 | } |
1624 | |
1625 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1626 | impl<T: Ord, A: Allocator> ExactSizeIterator for IntoIterSorted<T, A> {} |
1627 | |
1628 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1629 | impl<T: Ord, A: Allocator> FusedIterator for IntoIterSorted<T, A> {} |
1630 | |
1631 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
1632 | unsafe impl<T: Ord, A: Allocator> TrustedLen for IntoIterSorted<T, A> {} |
1633 | |
1634 | /// A draining iterator over the elements of a `BinaryHeap`. |
1635 | /// |
1636 | /// This `struct` is created by [`BinaryHeap::drain()`]. See its |
1637 | /// documentation for more. |
1638 | /// |
1639 | /// [`drain`]: BinaryHeap::drain |
1640 | #[stable (feature = "drain" , since = "1.6.0" )] |
1641 | #[derive (Debug)] |
1642 | pub struct Drain< |
1643 | 'a, |
1644 | T: 'a, |
1645 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1646 | > { |
1647 | iter: vec::Drain<'a, T, A>, |
1648 | } |
1649 | |
1650 | impl<T, A: Allocator> Drain<'_, T, A> { |
1651 | /// Returns a reference to the underlying allocator. |
1652 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1653 | pub fn allocator(&self) -> &A { |
1654 | self.iter.allocator() |
1655 | } |
1656 | } |
1657 | |
1658 | #[stable (feature = "drain" , since = "1.6.0" )] |
1659 | impl<T, A: Allocator> Iterator for Drain<'_, T, A> { |
1660 | type Item = T; |
1661 | |
1662 | #[inline ] |
1663 | fn next(&mut self) -> Option<T> { |
1664 | self.iter.next() |
1665 | } |
1666 | |
1667 | #[inline ] |
1668 | fn size_hint(&self) -> (usize, Option<usize>) { |
1669 | self.iter.size_hint() |
1670 | } |
1671 | } |
1672 | |
1673 | #[stable (feature = "drain" , since = "1.6.0" )] |
1674 | impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { |
1675 | #[inline ] |
1676 | fn next_back(&mut self) -> Option<T> { |
1677 | self.iter.next_back() |
1678 | } |
1679 | } |
1680 | |
1681 | #[stable (feature = "drain" , since = "1.6.0" )] |
1682 | impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { |
1683 | fn is_empty(&self) -> bool { |
1684 | self.iter.is_empty() |
1685 | } |
1686 | } |
1687 | |
1688 | #[stable (feature = "fused" , since = "1.26.0" )] |
1689 | impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} |
1690 | |
1691 | /// A draining iterator over the elements of a `BinaryHeap`. |
1692 | /// |
1693 | /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its |
1694 | /// documentation for more. |
1695 | /// |
1696 | /// [`drain_sorted`]: BinaryHeap::drain_sorted |
1697 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1698 | #[derive (Debug)] |
1699 | pub struct DrainSorted< |
1700 | 'a, |
1701 | T: Ord, |
1702 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1703 | > { |
1704 | inner: &'a mut BinaryHeap<T, A>, |
1705 | } |
1706 | |
1707 | impl<'a, T: Ord, A: Allocator> DrainSorted<'a, T, A> { |
1708 | /// Returns a reference to the underlying allocator. |
1709 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1710 | pub fn allocator(&self) -> &A { |
1711 | self.inner.allocator() |
1712 | } |
1713 | } |
1714 | |
1715 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1716 | impl<'a, T: Ord, A: Allocator> Drop for DrainSorted<'a, T, A> { |
1717 | /// Removes heap elements in heap order. |
1718 | fn drop(&mut self) { |
1719 | struct DropGuard<'r, 'a, T: Ord, A: Allocator>(&'r mut DrainSorted<'a, T, A>); |
1720 | |
1721 | impl<'r, 'a, T: Ord, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { |
1722 | fn drop(&mut self) { |
1723 | while self.0.inner.pop().is_some() {} |
1724 | } |
1725 | } |
1726 | |
1727 | while let Some(item: T) = self.inner.pop() { |
1728 | let guard: DropGuard<'_, '_, T, A> = DropGuard(self); |
1729 | drop(item); |
1730 | mem::forget(guard); |
1731 | } |
1732 | } |
1733 | } |
1734 | |
1735 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1736 | impl<T: Ord, A: Allocator> Iterator for DrainSorted<'_, T, A> { |
1737 | type Item = T; |
1738 | |
1739 | #[inline ] |
1740 | fn next(&mut self) -> Option<T> { |
1741 | self.inner.pop() |
1742 | } |
1743 | |
1744 | #[inline ] |
1745 | fn size_hint(&self) -> (usize, Option<usize>) { |
1746 | let exact: usize = self.inner.len(); |
1747 | (exact, Some(exact)) |
1748 | } |
1749 | } |
1750 | |
1751 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1752 | impl<T: Ord, A: Allocator> ExactSizeIterator for DrainSorted<'_, T, A> {} |
1753 | |
1754 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1755 | impl<T: Ord, A: Allocator> FusedIterator for DrainSorted<'_, T, A> {} |
1756 | |
1757 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
1758 | unsafe impl<T: Ord, A: Allocator> TrustedLen for DrainSorted<'_, T, A> {} |
1759 | |
1760 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1761 | impl<T: Ord, A: Allocator> From<Vec<T, A>> for BinaryHeap<T, A> { |
1762 | /// Converts a `Vec<T>` into a `BinaryHeap<T>`. |
1763 | /// |
1764 | /// This conversion happens in-place, and has *O*(*n*) time complexity. |
1765 | fn from(vec: Vec<T, A>) -> BinaryHeap<T, A> { |
1766 | let mut heap: BinaryHeap = BinaryHeap { data: vec }; |
1767 | heap.rebuild(); |
1768 | heap |
1769 | } |
1770 | } |
1771 | |
1772 | #[stable (feature = "std_collections_from_array" , since = "1.56.0" )] |
1773 | impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> { |
1774 | /// ``` |
1775 | /// use std::collections::BinaryHeap; |
1776 | /// |
1777 | /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]); |
1778 | /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into(); |
1779 | /// while let Some((a, b)) = h1.pop().zip(h2.pop()) { |
1780 | /// assert_eq!(a, b); |
1781 | /// } |
1782 | /// ``` |
1783 | fn from(arr: [T; N]) -> Self { |
1784 | Self::from_iter(arr) |
1785 | } |
1786 | } |
1787 | |
1788 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1789 | impl<T, A: Allocator> From<BinaryHeap<T, A>> for Vec<T, A> { |
1790 | /// Converts a `BinaryHeap<T>` into a `Vec<T>`. |
1791 | /// |
1792 | /// This conversion requires no data movement or allocation, and has |
1793 | /// constant time complexity. |
1794 | fn from(heap: BinaryHeap<T, A>) -> Vec<T, A> { |
1795 | heap.data |
1796 | } |
1797 | } |
1798 | |
1799 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1800 | impl<T: Ord> FromIterator<T> for BinaryHeap<T> { |
1801 | fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> { |
1802 | BinaryHeap::from(iter.into_iter().collect::<Vec<_>>()) |
1803 | } |
1804 | } |
1805 | |
1806 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1807 | impl<T, A: Allocator> IntoIterator for BinaryHeap<T, A> { |
1808 | type Item = T; |
1809 | type IntoIter = IntoIter<T, A>; |
1810 | |
1811 | /// Creates a consuming iterator, that is, one that moves each value out of |
1812 | /// the binary heap in arbitrary order. The binary heap cannot be used |
1813 | /// after calling this. |
1814 | /// |
1815 | /// # Examples |
1816 | /// |
1817 | /// Basic usage: |
1818 | /// |
1819 | /// ``` |
1820 | /// use std::collections::BinaryHeap; |
1821 | /// let heap = BinaryHeap::from([1, 2, 3, 4]); |
1822 | /// |
1823 | /// // Print 1, 2, 3, 4 in arbitrary order |
1824 | /// for x in heap.into_iter() { |
1825 | /// // x has type i32, not &i32 |
1826 | /// println!("{x}" ); |
1827 | /// } |
1828 | /// ``` |
1829 | fn into_iter(self) -> IntoIter<T, A> { |
1830 | IntoIter { iter: self.data.into_iter() } |
1831 | } |
1832 | } |
1833 | |
1834 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1835 | impl<'a, T, A: Allocator> IntoIterator for &'a BinaryHeap<T, A> { |
1836 | type Item = &'a T; |
1837 | type IntoIter = Iter<'a, T>; |
1838 | |
1839 | fn into_iter(self) -> Iter<'a, T> { |
1840 | self.iter() |
1841 | } |
1842 | } |
1843 | |
1844 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1845 | impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> { |
1846 | #[inline ] |
1847 | fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { |
1848 | let guard: RebuildOnDrop<'_, T, A> = RebuildOnDrop { rebuild_from: self.len(), heap: self }; |
1849 | guard.heap.data.extend(iter); |
1850 | } |
1851 | |
1852 | #[inline ] |
1853 | fn extend_one(&mut self, item: T) { |
1854 | self.push(item); |
1855 | } |
1856 | |
1857 | #[inline ] |
1858 | fn extend_reserve(&mut self, additional: usize) { |
1859 | self.reserve(additional); |
1860 | } |
1861 | } |
1862 | |
1863 | #[stable (feature = "extend_ref" , since = "1.2.0" )] |
1864 | impl<'a, T: 'a + Ord + Copy, A: Allocator> Extend<&'a T> for BinaryHeap<T, A> { |
1865 | fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { |
1866 | self.extend(iter:iter.into_iter().cloned()); |
1867 | } |
1868 | |
1869 | #[inline ] |
1870 | fn extend_one(&mut self, &item: T: &'a T) { |
1871 | self.push(item); |
1872 | } |
1873 | |
1874 | #[inline ] |
1875 | fn extend_reserve(&mut self, additional: usize) { |
1876 | self.reserve(additional); |
1877 | } |
1878 | } |
1879 | |