1 | //! A priority queue implemented with a binary heap. |
2 | //! |
3 | //! Insertion and popping the largest element have *O*(log(*n*)) time complexity. |
4 | //! Checking the largest element is *O*(1). Converting a vector to a binary heap |
5 | //! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be |
6 | //! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*)) |
7 | //! in-place heapsort. |
8 | //! |
9 | //! # Examples |
10 | //! |
11 | //! This is a larger example that implements [Dijkstra's algorithm][dijkstra] |
12 | //! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph]. |
13 | //! It shows how to use [`BinaryHeap`] with custom types. |
14 | //! |
15 | //! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm |
16 | //! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem |
17 | //! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph |
18 | //! |
19 | //! ``` |
20 | //! use std::cmp::Ordering; |
21 | //! use std::collections::BinaryHeap; |
22 | //! |
23 | //! #[derive(Copy, Clone, Eq, PartialEq)] |
24 | //! struct State { |
25 | //! cost: usize, |
26 | //! position: usize, |
27 | //! } |
28 | //! |
29 | //! // The priority queue depends on `Ord`. |
30 | //! // Explicitly implement the trait so the queue becomes a min-heap |
31 | //! // instead of a max-heap. |
32 | //! impl Ord for State { |
33 | //! fn cmp(&self, other: &Self) -> Ordering { |
34 | //! // Notice that the we flip the ordering on costs. |
35 | //! // In case of a tie we compare positions - this step is necessary |
36 | //! // to make implementations of `PartialEq` and `Ord` consistent. |
37 | //! other.cost.cmp(&self.cost) |
38 | //! .then_with(|| self.position.cmp(&other.position)) |
39 | //! } |
40 | //! } |
41 | //! |
42 | //! // `PartialOrd` needs to be implemented as well. |
43 | //! impl PartialOrd for State { |
44 | //! fn partial_cmp(&self, other: &Self) -> Option<Ordering> { |
45 | //! Some(self.cmp(other)) |
46 | //! } |
47 | //! } |
48 | //! |
49 | //! // Each node is represented as a `usize`, for a shorter implementation. |
50 | //! struct Edge { |
51 | //! node: usize, |
52 | //! cost: usize, |
53 | //! } |
54 | //! |
55 | //! // Dijkstra's shortest path algorithm. |
56 | //! |
57 | //! // Start at `start` and use `dist` to track the current shortest distance |
58 | //! // to each node. This implementation isn't memory-efficient as it may leave duplicate |
59 | //! // nodes in the queue. It also uses `usize::MAX` as a sentinel value, |
60 | //! // for a simpler implementation. |
61 | //! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> { |
62 | //! // dist[node] = current shortest distance from `start` to `node` |
63 | //! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect(); |
64 | //! |
65 | //! let mut heap = BinaryHeap::new(); |
66 | //! |
67 | //! // We're at `start`, with a zero cost |
68 | //! dist[start] = 0; |
69 | //! heap.push(State { cost: 0, position: start }); |
70 | //! |
71 | //! // Examine the frontier with lower cost nodes first (min-heap) |
72 | //! while let Some(State { cost, position }) = heap.pop() { |
73 | //! // Alternatively we could have continued to find all shortest paths |
74 | //! if position == goal { return Some(cost); } |
75 | //! |
76 | //! // Important as we may have already found a better way |
77 | //! if cost > dist[position] { continue; } |
78 | //! |
79 | //! // For each node we can reach, see if we can find a way with |
80 | //! // a lower cost going through this node |
81 | //! for edge in &adj_list[position] { |
82 | //! let next = State { cost: cost + edge.cost, position: edge.node }; |
83 | //! |
84 | //! // If so, add it to the frontier and continue |
85 | //! if next.cost < dist[next.position] { |
86 | //! heap.push(next); |
87 | //! // Relaxation, we have now found a better way |
88 | //! dist[next.position] = next.cost; |
89 | //! } |
90 | //! } |
91 | //! } |
92 | //! |
93 | //! // Goal not reachable |
94 | //! None |
95 | //! } |
96 | //! |
97 | //! fn main() { |
98 | //! // This is the directed graph we're going to use. |
99 | //! // The node numbers correspond to the different states, |
100 | //! // and the edge weights symbolize the cost of moving |
101 | //! // from one node to another. |
102 | //! // Note that the edges are one-way. |
103 | //! // |
104 | //! // 7 |
105 | //! // +-----------------+ |
106 | //! // | | |
107 | //! // v 1 2 | 2 |
108 | //! // 0 -----> 1 -----> 3 ---> 4 |
109 | //! // | ^ ^ ^ |
110 | //! // | | 1 | | |
111 | //! // | | | 3 | 1 |
112 | //! // +------> 2 -------+ | |
113 | //! // 10 | | |
114 | //! // +---------------+ |
115 | //! // |
116 | //! // The graph is represented as an adjacency list where each index, |
117 | //! // corresponding to a node value, has a list of outgoing edges. |
118 | //! // Chosen for its efficiency. |
119 | //! let graph = vec![ |
120 | //! // Node 0 |
121 | //! vec![Edge { node: 2, cost: 10 }, |
122 | //! Edge { node: 1, cost: 1 }], |
123 | //! // Node 1 |
124 | //! vec![Edge { node: 3, cost: 2 }], |
125 | //! // Node 2 |
126 | //! vec![Edge { node: 1, cost: 1 }, |
127 | //! Edge { node: 3, cost: 3 }, |
128 | //! Edge { node: 4, cost: 1 }], |
129 | //! // Node 3 |
130 | //! vec![Edge { node: 0, cost: 7 }, |
131 | //! Edge { node: 4, cost: 2 }], |
132 | //! // Node 4 |
133 | //! vec![]]; |
134 | //! |
135 | //! assert_eq!(shortest_path(&graph, 0, 1), Some(1)); |
136 | //! assert_eq!(shortest_path(&graph, 0, 3), Some(3)); |
137 | //! assert_eq!(shortest_path(&graph, 3, 0), Some(7)); |
138 | //! assert_eq!(shortest_path(&graph, 0, 4), Some(5)); |
139 | //! assert_eq!(shortest_path(&graph, 4, 0), None); |
140 | //! } |
141 | //! ``` |
142 | |
143 | #![allow (missing_docs)] |
144 | #![stable (feature = "rust1" , since = "1.0.0" )] |
145 | |
146 | use core::alloc::Allocator; |
147 | use core::fmt; |
148 | use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen}; |
149 | use core::mem::{self, swap, ManuallyDrop}; |
150 | use core::num::NonZero; |
151 | use core::ops::{Deref, DerefMut}; |
152 | use core::ptr; |
153 | |
154 | use crate::alloc::Global; |
155 | use crate::collections::TryReserveError; |
156 | use crate::slice; |
157 | use crate::vec::{self, AsVecIntoIter, Vec}; |
158 | |
159 | #[cfg (test)] |
160 | mod tests; |
161 | |
162 | /// A priority queue implemented with a binary heap. |
163 | /// |
164 | /// This will be a max-heap. |
165 | /// |
166 | /// It is a logic error for an item to be modified in such a way that the |
167 | /// item's ordering relative to any other item, as determined by the [`Ord`] |
168 | /// trait, changes while it is in the heap. This is normally only possible |
169 | /// through interior mutability, global state, I/O, or unsafe code. The |
170 | /// behavior resulting from such a logic error is not specified, but will |
171 | /// be encapsulated to the `BinaryHeap` that observed the logic error and not |
172 | /// result in undefined behavior. This could include panics, incorrect results, |
173 | /// aborts, memory leaks, and non-termination. |
174 | /// |
175 | /// As long as no elements change their relative order while being in the heap |
176 | /// as described above, the API of `BinaryHeap` guarantees that the heap |
177 | /// invariant remains intact i.e. its methods all behave as documented. For |
178 | /// example if a method is documented as iterating in sorted order, that's |
179 | /// guaranteed to work as long as elements in the heap have not changed order, |
180 | /// even in the presence of closures getting unwinded out of, iterators getting |
181 | /// leaked, and similar foolishness. |
182 | /// |
183 | /// # Examples |
184 | /// |
185 | /// ``` |
186 | /// use std::collections::BinaryHeap; |
187 | /// |
188 | /// // Type inference lets us omit an explicit type signature (which |
189 | /// // would be `BinaryHeap<i32>` in this example). |
190 | /// let mut heap = BinaryHeap::new(); |
191 | /// |
192 | /// // We can use peek to look at the next item in the heap. In this case, |
193 | /// // there's no items in there yet so we get None. |
194 | /// assert_eq!(heap.peek(), None); |
195 | /// |
196 | /// // Let's add some scores... |
197 | /// heap.push(1); |
198 | /// heap.push(5); |
199 | /// heap.push(2); |
200 | /// |
201 | /// // Now peek shows the most important item in the heap. |
202 | /// assert_eq!(heap.peek(), Some(&5)); |
203 | /// |
204 | /// // We can check the length of a heap. |
205 | /// assert_eq!(heap.len(), 3); |
206 | /// |
207 | /// // We can iterate over the items in the heap, although they are returned in |
208 | /// // a random order. |
209 | /// for x in &heap { |
210 | /// println!("{x}" ); |
211 | /// } |
212 | /// |
213 | /// // If we instead pop these scores, they should come back in order. |
214 | /// assert_eq!(heap.pop(), Some(5)); |
215 | /// assert_eq!(heap.pop(), Some(2)); |
216 | /// assert_eq!(heap.pop(), Some(1)); |
217 | /// assert_eq!(heap.pop(), None); |
218 | /// |
219 | /// // We can clear the heap of any remaining items. |
220 | /// heap.clear(); |
221 | /// |
222 | /// // The heap should now be empty. |
223 | /// assert!(heap.is_empty()) |
224 | /// ``` |
225 | /// |
226 | /// A `BinaryHeap` with a known list of items can be initialized from an array: |
227 | /// |
228 | /// ``` |
229 | /// use std::collections::BinaryHeap; |
230 | /// |
231 | /// let heap = BinaryHeap::from([1, 5, 2]); |
232 | /// ``` |
233 | /// |
234 | /// ## Min-heap |
235 | /// |
236 | /// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to |
237 | /// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest |
238 | /// value instead of the greatest one. |
239 | /// |
240 | /// ``` |
241 | /// use std::collections::BinaryHeap; |
242 | /// use std::cmp::Reverse; |
243 | /// |
244 | /// let mut heap = BinaryHeap::new(); |
245 | /// |
246 | /// // Wrap values in `Reverse` |
247 | /// heap.push(Reverse(1)); |
248 | /// heap.push(Reverse(5)); |
249 | /// heap.push(Reverse(2)); |
250 | /// |
251 | /// // If we pop these scores now, they should come back in the reverse order. |
252 | /// assert_eq!(heap.pop(), Some(Reverse(1))); |
253 | /// assert_eq!(heap.pop(), Some(Reverse(2))); |
254 | /// assert_eq!(heap.pop(), Some(Reverse(5))); |
255 | /// assert_eq!(heap.pop(), None); |
256 | /// ``` |
257 | /// |
258 | /// # Time complexity |
259 | /// |
260 | /// | [push] | [pop] | [peek]/[peek\_mut] | |
261 | /// |---------|---------------|--------------------| |
262 | /// | *O*(1)~ | *O*(log(*n*)) | *O*(1) | |
263 | /// |
264 | /// The value for `push` is an expected cost; the method documentation gives a |
265 | /// more detailed analysis. |
266 | /// |
267 | /// [`core::cmp::Reverse`]: core::cmp::Reverse |
268 | /// [`Cell`]: core::cell::Cell |
269 | /// [`RefCell`]: core::cell::RefCell |
270 | /// [push]: BinaryHeap::push |
271 | /// [pop]: BinaryHeap::pop |
272 | /// [peek]: BinaryHeap::peek |
273 | /// [peek\_mut]: BinaryHeap::peek_mut |
274 | #[stable (feature = "rust1" , since = "1.0.0" )] |
275 | #[cfg_attr (not(test), rustc_diagnostic_item = "BinaryHeap" )] |
276 | pub struct BinaryHeap< |
277 | T, |
278 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
279 | > { |
280 | data: Vec<T, A>, |
281 | } |
282 | |
283 | /// Structure wrapping a mutable reference to the greatest item on a |
284 | /// `BinaryHeap`. |
285 | /// |
286 | /// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See |
287 | /// its documentation for more. |
288 | /// |
289 | /// [`peek_mut`]: BinaryHeap::peek_mut |
290 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
291 | pub struct PeekMut< |
292 | 'a, |
293 | T: 'a + Ord, |
294 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
295 | > { |
296 | heap: &'a mut BinaryHeap<T, A>, |
297 | // If a set_len + sift_down are required, this is Some. If a &mut T has not |
298 | // yet been exposed to peek_mut()'s caller, it's None. |
299 | original_len: Option<NonZero<usize>>, |
300 | } |
301 | |
302 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
303 | impl<T: Ord + fmt::Debug, A: Allocator> fmt::Debug for PeekMut<'_, T, A> { |
304 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
305 | f.debug_tuple(name:"PeekMut" ).field(&self.heap.data[0]).finish() |
306 | } |
307 | } |
308 | |
309 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
310 | impl<T: Ord, A: Allocator> Drop for PeekMut<'_, T, A> { |
311 | fn drop(&mut self) { |
312 | if let Some(original_len: NonZero) = self.original_len { |
313 | // SAFETY: That's how many elements were in the Vec at the time of |
314 | // the PeekMut::deref_mut call, and therefore also at the time of |
315 | // the BinaryHeap::peek_mut call. Since the PeekMut did not end up |
316 | // getting leaked, we are now undoing the leak amplification that |
317 | // the DerefMut prepared for. |
318 | unsafe { self.heap.data.set_len(new_len:original_len.get()) }; |
319 | |
320 | // SAFETY: PeekMut is only instantiated for non-empty heaps. |
321 | unsafe { self.heap.sift_down(pos:0) }; |
322 | } |
323 | } |
324 | } |
325 | |
326 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
327 | impl<T: Ord, A: Allocator> Deref for PeekMut<'_, T, A> { |
328 | type Target = T; |
329 | fn deref(&self) -> &T { |
330 | debug_assert!(!self.heap.is_empty()); |
331 | // SAFE: PeekMut is only instantiated for non-empty heaps |
332 | unsafe { self.heap.data.get_unchecked(index:0) } |
333 | } |
334 | } |
335 | |
336 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
337 | impl<T: Ord, A: Allocator> DerefMut for PeekMut<'_, T, A> { |
338 | fn deref_mut(&mut self) -> &mut T { |
339 | debug_assert!(!self.heap.is_empty()); |
340 | |
341 | let len = self.heap.len(); |
342 | if len > 1 { |
343 | // Here we preemptively leak all the rest of the underlying vector |
344 | // after the currently max element. If the caller mutates the &mut T |
345 | // we're about to give them, and then leaks the PeekMut, all these |
346 | // elements will remain leaked. If they don't leak the PeekMut, then |
347 | // either Drop or PeekMut::pop will un-leak the vector elements. |
348 | // |
349 | // This is technique is described throughout several other places in |
350 | // the standard library as "leak amplification". |
351 | unsafe { |
352 | // SAFETY: len > 1 so len != 0. |
353 | self.original_len = Some(NonZero::new_unchecked(len)); |
354 | // SAFETY: len > 1 so all this does for now is leak elements, |
355 | // which is safe. |
356 | self.heap.data.set_len(1); |
357 | } |
358 | } |
359 | |
360 | // SAFE: PeekMut is only instantiated for non-empty heaps |
361 | unsafe { self.heap.data.get_unchecked_mut(0) } |
362 | } |
363 | } |
364 | |
365 | impl<'a, T: Ord, A: Allocator> PeekMut<'a, T, A> { |
366 | /// Removes the peeked value from the heap and returns it. |
367 | #[stable (feature = "binary_heap_peek_mut_pop" , since = "1.18.0" )] |
368 | pub fn pop(mut this: PeekMut<'a, T, A>) -> T { |
369 | if let Some(original_len: NonZero) = this.original_len.take() { |
370 | // SAFETY: This is how many elements were in the Vec at the time of |
371 | // the BinaryHeap::peek_mut call. |
372 | unsafe { this.heap.data.set_len(new_len:original_len.get()) }; |
373 | |
374 | // Unlike in Drop, here we don't also need to do a sift_down even if |
375 | // the caller could've mutated the element. It is removed from the |
376 | // heap on the next line and pop() is not sensitive to its value. |
377 | } |
378 | this.heap.pop().unwrap() |
379 | } |
380 | } |
381 | |
382 | #[stable (feature = "rust1" , since = "1.0.0" )] |
383 | impl<T: Clone, A: Allocator + Clone> Clone for BinaryHeap<T, A> { |
384 | fn clone(&self) -> Self { |
385 | BinaryHeap { data: self.data.clone() } |
386 | } |
387 | |
388 | /// Overwrites the contents of `self` with a clone of the contents of `source`. |
389 | /// |
390 | /// This method is preferred over simply assigning `source.clone()` to `self`, |
391 | /// as it avoids reallocation if possible. |
392 | /// |
393 | /// See [`Vec::clone_from()`] for more details. |
394 | fn clone_from(&mut self, source: &Self) { |
395 | self.data.clone_from(&source.data); |
396 | } |
397 | } |
398 | |
399 | #[stable (feature = "rust1" , since = "1.0.0" )] |
400 | impl<T: Ord> Default for BinaryHeap<T> { |
401 | /// Creates an empty `BinaryHeap<T>`. |
402 | #[inline ] |
403 | fn default() -> BinaryHeap<T> { |
404 | BinaryHeap::new() |
405 | } |
406 | } |
407 | |
408 | #[stable (feature = "binaryheap_debug" , since = "1.4.0" )] |
409 | impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> { |
410 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
411 | f.debug_list().entries(self.iter()).finish() |
412 | } |
413 | } |
414 | |
415 | struct RebuildOnDrop< |
416 | 'a, |
417 | T: Ord, |
418 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
419 | > { |
420 | heap: &'a mut BinaryHeap<T, A>, |
421 | rebuild_from: usize, |
422 | } |
423 | |
424 | impl<T: Ord, A: Allocator> Drop for RebuildOnDrop<'_, T, A> { |
425 | fn drop(&mut self) { |
426 | self.heap.rebuild_tail(self.rebuild_from); |
427 | } |
428 | } |
429 | |
430 | impl<T: Ord> BinaryHeap<T> { |
431 | /// Creates an empty `BinaryHeap` as a max-heap. |
432 | /// |
433 | /// # Examples |
434 | /// |
435 | /// Basic usage: |
436 | /// |
437 | /// ``` |
438 | /// use std::collections::BinaryHeap; |
439 | /// let mut heap = BinaryHeap::new(); |
440 | /// heap.push(4); |
441 | /// ``` |
442 | #[stable (feature = "rust1" , since = "1.0.0" )] |
443 | #[rustc_const_unstable (feature = "const_binary_heap_constructor" , issue = "112353" )] |
444 | #[must_use ] |
445 | pub const fn new() -> BinaryHeap<T> { |
446 | BinaryHeap { data: vec![] } |
447 | } |
448 | |
449 | /// Creates an empty `BinaryHeap` with at least the specified capacity. |
450 | /// |
451 | /// The binary heap will be able to hold at least `capacity` elements without |
452 | /// reallocating. This method is allowed to allocate for more elements than |
453 | /// `capacity`. If `capacity` is 0, the binary heap will not allocate. |
454 | /// |
455 | /// # Examples |
456 | /// |
457 | /// Basic usage: |
458 | /// |
459 | /// ``` |
460 | /// use std::collections::BinaryHeap; |
461 | /// let mut heap = BinaryHeap::with_capacity(10); |
462 | /// heap.push(4); |
463 | /// ``` |
464 | #[stable (feature = "rust1" , since = "1.0.0" )] |
465 | #[must_use ] |
466 | pub fn with_capacity(capacity: usize) -> BinaryHeap<T> { |
467 | BinaryHeap { data: Vec::with_capacity(capacity) } |
468 | } |
469 | } |
470 | |
471 | impl<T: Ord, A: Allocator> BinaryHeap<T, A> { |
472 | /// Creates an empty `BinaryHeap` as a max-heap, using `A` as allocator. |
473 | /// |
474 | /// # Examples |
475 | /// |
476 | /// Basic usage: |
477 | /// |
478 | /// ``` |
479 | /// #![feature(allocator_api)] |
480 | /// |
481 | /// use std::alloc::System; |
482 | /// use std::collections::BinaryHeap; |
483 | /// let mut heap = BinaryHeap::new_in(System); |
484 | /// heap.push(4); |
485 | /// ``` |
486 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
487 | #[rustc_const_unstable (feature = "const_binary_heap_constructor" , issue = "112353" )] |
488 | #[must_use ] |
489 | pub const fn new_in(alloc: A) -> BinaryHeap<T, A> { |
490 | BinaryHeap { data: Vec::new_in(alloc) } |
491 | } |
492 | |
493 | /// Creates an empty `BinaryHeap` with at least the specified capacity, using `A` as allocator. |
494 | /// |
495 | /// The binary heap will be able to hold at least `capacity` elements without |
496 | /// reallocating. This method is allowed to allocate for more elements than |
497 | /// `capacity`. If `capacity` is 0, the binary heap will not allocate. |
498 | /// |
499 | /// # Examples |
500 | /// |
501 | /// Basic usage: |
502 | /// |
503 | /// ``` |
504 | /// #![feature(allocator_api)] |
505 | /// |
506 | /// use std::alloc::System; |
507 | /// use std::collections::BinaryHeap; |
508 | /// let mut heap = BinaryHeap::with_capacity_in(10, System); |
509 | /// heap.push(4); |
510 | /// ``` |
511 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
512 | #[must_use ] |
513 | pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap<T, A> { |
514 | BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) } |
515 | } |
516 | |
517 | /// Returns a mutable reference to the greatest item in the binary heap, or |
518 | /// `None` if it is empty. |
519 | /// |
520 | /// Note: If the `PeekMut` value is leaked, some heap elements might get |
521 | /// leaked along with it, but the remaining elements will remain a valid |
522 | /// heap. |
523 | /// |
524 | /// # Examples |
525 | /// |
526 | /// Basic usage: |
527 | /// |
528 | /// ``` |
529 | /// use std::collections::BinaryHeap; |
530 | /// let mut heap = BinaryHeap::new(); |
531 | /// assert!(heap.peek_mut().is_none()); |
532 | /// |
533 | /// heap.push(1); |
534 | /// heap.push(5); |
535 | /// heap.push(2); |
536 | /// { |
537 | /// let mut val = heap.peek_mut().unwrap(); |
538 | /// *val = 0; |
539 | /// } |
540 | /// assert_eq!(heap.peek(), Some(&2)); |
541 | /// ``` |
542 | /// |
543 | /// # Time complexity |
544 | /// |
545 | /// If the item is modified then the worst case time complexity is *O*(log(*n*)), |
546 | /// otherwise it's *O*(1). |
547 | #[stable (feature = "binary_heap_peek_mut" , since = "1.12.0" )] |
548 | pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> { |
549 | if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) } |
550 | } |
551 | |
552 | /// Removes the greatest item from the binary heap and returns it, or `None` if it |
553 | /// is empty. |
554 | /// |
555 | /// # Examples |
556 | /// |
557 | /// Basic usage: |
558 | /// |
559 | /// ``` |
560 | /// use std::collections::BinaryHeap; |
561 | /// let mut heap = BinaryHeap::from([1, 3]); |
562 | /// |
563 | /// assert_eq!(heap.pop(), Some(3)); |
564 | /// assert_eq!(heap.pop(), Some(1)); |
565 | /// assert_eq!(heap.pop(), None); |
566 | /// ``` |
567 | /// |
568 | /// # Time complexity |
569 | /// |
570 | /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)). |
571 | #[stable (feature = "rust1" , since = "1.0.0" )] |
572 | pub fn pop(&mut self) -> Option<T> { |
573 | self.data.pop().map(|mut item| { |
574 | if !self.is_empty() { |
575 | swap(&mut item, &mut self.data[0]); |
576 | // SAFETY: !self.is_empty() means that self.len() > 0 |
577 | unsafe { self.sift_down_to_bottom(0) }; |
578 | } |
579 | item |
580 | }) |
581 | } |
582 | |
583 | /// Pushes an item onto the binary heap. |
584 | /// |
585 | /// # Examples |
586 | /// |
587 | /// Basic usage: |
588 | /// |
589 | /// ``` |
590 | /// use std::collections::BinaryHeap; |
591 | /// let mut heap = BinaryHeap::new(); |
592 | /// heap.push(3); |
593 | /// heap.push(5); |
594 | /// heap.push(1); |
595 | /// |
596 | /// assert_eq!(heap.len(), 3); |
597 | /// assert_eq!(heap.peek(), Some(&5)); |
598 | /// ``` |
599 | /// |
600 | /// # Time complexity |
601 | /// |
602 | /// The expected cost of `push`, averaged over every possible ordering of |
603 | /// the elements being pushed, and over a sufficiently large number of |
604 | /// pushes, is *O*(1). This is the most meaningful cost metric when pushing |
605 | /// elements that are *not* already in any sorted pattern. |
606 | /// |
607 | /// The time complexity degrades if elements are pushed in predominantly |
608 | /// ascending order. In the worst case, elements are pushed in ascending |
609 | /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap |
610 | /// containing *n* elements. |
611 | /// |
612 | /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case |
613 | /// occurs when capacity is exhausted and needs a resize. The resize cost |
614 | /// has been amortized in the previous figures. |
615 | #[stable (feature = "rust1" , since = "1.0.0" )] |
616 | #[rustc_confusables ("append" , "put" )] |
617 | pub fn push(&mut self, item: T) { |
618 | let old_len = self.len(); |
619 | self.data.push(item); |
620 | // SAFETY: Since we pushed a new item it means that |
621 | // old_len = self.len() - 1 < self.len() |
622 | unsafe { self.sift_up(0, old_len) }; |
623 | } |
624 | |
625 | /// Consumes the `BinaryHeap` and returns a vector in sorted |
626 | /// (ascending) order. |
627 | /// |
628 | /// # Examples |
629 | /// |
630 | /// Basic usage: |
631 | /// |
632 | /// ``` |
633 | /// use std::collections::BinaryHeap; |
634 | /// |
635 | /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]); |
636 | /// heap.push(6); |
637 | /// heap.push(3); |
638 | /// |
639 | /// let vec = heap.into_sorted_vec(); |
640 | /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]); |
641 | /// ``` |
642 | #[must_use = "`self` will be dropped if the result is not used" ] |
643 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
644 | pub fn into_sorted_vec(mut self) -> Vec<T, A> { |
645 | let mut end = self.len(); |
646 | while end > 1 { |
647 | end -= 1; |
648 | // SAFETY: `end` goes from `self.len() - 1` to 1 (both included), |
649 | // so it's always a valid index to access. |
650 | // It is safe to access index 0 (i.e. `ptr`), because |
651 | // 1 <= end < self.len(), which means self.len() >= 2. |
652 | unsafe { |
653 | let ptr = self.data.as_mut_ptr(); |
654 | ptr::swap(ptr, ptr.add(end)); |
655 | } |
656 | // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so: |
657 | // 0 < 1 <= end <= self.len() - 1 < self.len() |
658 | // Which means 0 < end and end < self.len(). |
659 | unsafe { self.sift_down_range(0, end) }; |
660 | } |
661 | self.into_vec() |
662 | } |
663 | |
664 | // The implementations of sift_up and sift_down use unsafe blocks in |
665 | // order to move an element out of the vector (leaving behind a |
666 | // hole), shift along the others and move the removed element back into the |
667 | // vector at the final location of the hole. |
668 | // The `Hole` type is used to represent this, and make sure |
669 | // the hole is filled back at the end of its scope, even on panic. |
670 | // Using a hole reduces the constant factor compared to using swaps, |
671 | // which involves twice as many moves. |
672 | |
673 | /// # Safety |
674 | /// |
675 | /// The caller must guarantee that `pos < self.len()`. |
676 | unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize { |
677 | // Take out the value at `pos` and create a hole. |
678 | // SAFETY: The caller guarantees that pos < self.len() |
679 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
680 | |
681 | while hole.pos() > start { |
682 | let parent = (hole.pos() - 1) / 2; |
683 | |
684 | // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0 |
685 | // and so hole.pos() - 1 can't underflow. |
686 | // This guarantees that parent < hole.pos() so |
687 | // it's a valid index and also != hole.pos(). |
688 | if hole.element() <= unsafe { hole.get(parent) } { |
689 | break; |
690 | } |
691 | |
692 | // SAFETY: Same as above |
693 | unsafe { hole.move_to(parent) }; |
694 | } |
695 | |
696 | hole.pos() |
697 | } |
698 | |
699 | /// Take an element at `pos` and move it down the heap, |
700 | /// while its children are larger. |
701 | /// |
702 | /// # Safety |
703 | /// |
704 | /// The caller must guarantee that `pos < end <= self.len()`. |
705 | unsafe fn sift_down_range(&mut self, pos: usize, end: usize) { |
706 | // SAFETY: The caller guarantees that pos < end <= self.len(). |
707 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
708 | let mut child = 2 * hole.pos() + 1; |
709 | |
710 | // Loop invariant: child == 2 * hole.pos() + 1. |
711 | while child <= end.saturating_sub(2) { |
712 | // compare with the greater of the two children |
713 | // SAFETY: child < end - 1 < self.len() and |
714 | // child + 1 < end <= self.len(), so they're valid indexes. |
715 | // child == 2 * hole.pos() + 1 != hole.pos() and |
716 | // child + 1 == 2 * hole.pos() + 2 != hole.pos(). |
717 | // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow |
718 | // if T is a ZST |
719 | child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize; |
720 | |
721 | // if we are already in order, stop. |
722 | // SAFETY: child is now either the old child or the old child+1 |
723 | // We already proven that both are < self.len() and != hole.pos() |
724 | if hole.element() >= unsafe { hole.get(child) } { |
725 | return; |
726 | } |
727 | |
728 | // SAFETY: same as above. |
729 | unsafe { hole.move_to(child) }; |
730 | child = 2 * hole.pos() + 1; |
731 | } |
732 | |
733 | // SAFETY: && short circuit, which means that in the |
734 | // second condition it's already true that child == end - 1 < self.len(). |
735 | if child == end - 1 && hole.element() < unsafe { hole.get(child) } { |
736 | // SAFETY: child is already proven to be a valid index and |
737 | // child == 2 * hole.pos() + 1 != hole.pos(). |
738 | unsafe { hole.move_to(child) }; |
739 | } |
740 | } |
741 | |
742 | /// # Safety |
743 | /// |
744 | /// The caller must guarantee that `pos < self.len()`. |
745 | unsafe fn sift_down(&mut self, pos: usize) { |
746 | let len = self.len(); |
747 | // SAFETY: pos < len is guaranteed by the caller and |
748 | // obviously len = self.len() <= self.len(). |
749 | unsafe { self.sift_down_range(pos, len) }; |
750 | } |
751 | |
752 | /// Take an element at `pos` and move it all the way down the heap, |
753 | /// then sift it up to its position. |
754 | /// |
755 | /// Note: This is faster when the element is known to be large / should |
756 | /// be closer to the bottom. |
757 | /// |
758 | /// # Safety |
759 | /// |
760 | /// The caller must guarantee that `pos < self.len()`. |
761 | unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) { |
762 | let end = self.len(); |
763 | let start = pos; |
764 | |
765 | // SAFETY: The caller guarantees that pos < self.len(). |
766 | let mut hole = unsafe { Hole::new(&mut self.data, pos) }; |
767 | let mut child = 2 * hole.pos() + 1; |
768 | |
769 | // Loop invariant: child == 2 * hole.pos() + 1. |
770 | while child <= end.saturating_sub(2) { |
771 | // SAFETY: child < end - 1 < self.len() and |
772 | // child + 1 < end <= self.len(), so they're valid indexes. |
773 | // child == 2 * hole.pos() + 1 != hole.pos() and |
774 | // child + 1 == 2 * hole.pos() + 2 != hole.pos(). |
775 | // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow |
776 | // if T is a ZST |
777 | child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize; |
778 | |
779 | // SAFETY: Same as above |
780 | unsafe { hole.move_to(child) }; |
781 | child = 2 * hole.pos() + 1; |
782 | } |
783 | |
784 | if child == end - 1 { |
785 | // SAFETY: child == end - 1 < self.len(), so it's a valid index |
786 | // and child == 2 * hole.pos() + 1 != hole.pos(). |
787 | unsafe { hole.move_to(child) }; |
788 | } |
789 | pos = hole.pos(); |
790 | drop(hole); |
791 | |
792 | // SAFETY: pos is the position in the hole and was already proven |
793 | // to be a valid index. |
794 | unsafe { self.sift_up(start, pos) }; |
795 | } |
796 | |
797 | /// Rebuild assuming data[0..start] is still a proper heap. |
798 | fn rebuild_tail(&mut self, start: usize) { |
799 | if start == self.len() { |
800 | return; |
801 | } |
802 | |
803 | let tail_len = self.len() - start; |
804 | |
805 | #[inline (always)] |
806 | fn log2_fast(x: usize) -> usize { |
807 | (usize::BITS - x.leading_zeros() - 1) as usize |
808 | } |
809 | |
810 | // `rebuild` takes O(self.len()) operations |
811 | // and about 2 * self.len() comparisons in the worst case |
812 | // while repeating `sift_up` takes O(tail_len * log(start)) operations |
813 | // and about 1 * tail_len * log_2(start) comparisons in the worst case, |
814 | // assuming start >= tail_len. For larger heaps, the crossover point |
815 | // no longer follows this reasoning and was determined empirically. |
816 | let better_to_rebuild = if start < tail_len { |
817 | true |
818 | } else if self.len() <= 2048 { |
819 | 2 * self.len() < tail_len * log2_fast(start) |
820 | } else { |
821 | 2 * self.len() < tail_len * 11 |
822 | }; |
823 | |
824 | if better_to_rebuild { |
825 | self.rebuild(); |
826 | } else { |
827 | for i in start..self.len() { |
828 | // SAFETY: The index `i` is always less than self.len(). |
829 | unsafe { self.sift_up(0, i) }; |
830 | } |
831 | } |
832 | } |
833 | |
834 | fn rebuild(&mut self) { |
835 | let mut n = self.len() / 2; |
836 | while n > 0 { |
837 | n -= 1; |
838 | // SAFETY: n starts from self.len() / 2 and goes down to 0. |
839 | // The only case when !(n < self.len()) is if |
840 | // self.len() == 0, but it's ruled out by the loop condition. |
841 | unsafe { self.sift_down(n) }; |
842 | } |
843 | } |
844 | |
845 | /// Moves all the elements of `other` into `self`, leaving `other` empty. |
846 | /// |
847 | /// # Examples |
848 | /// |
849 | /// Basic usage: |
850 | /// |
851 | /// ``` |
852 | /// use std::collections::BinaryHeap; |
853 | /// |
854 | /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]); |
855 | /// let mut b = BinaryHeap::from([-20, 5, 43]); |
856 | /// |
857 | /// a.append(&mut b); |
858 | /// |
859 | /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); |
860 | /// assert!(b.is_empty()); |
861 | /// ``` |
862 | #[stable (feature = "binary_heap_append" , since = "1.11.0" )] |
863 | pub fn append(&mut self, other: &mut Self) { |
864 | if self.len() < other.len() { |
865 | swap(self, other); |
866 | } |
867 | |
868 | let start = self.data.len(); |
869 | |
870 | self.data.append(&mut other.data); |
871 | |
872 | self.rebuild_tail(start); |
873 | } |
874 | |
875 | /// Clears the binary heap, returning an iterator over the removed elements |
876 | /// in heap order. If the iterator is dropped before being fully consumed, |
877 | /// it drops the remaining elements in heap order. |
878 | /// |
879 | /// The returned iterator keeps a mutable borrow on the heap to optimize |
880 | /// its implementation. |
881 | /// |
882 | /// Note: |
883 | /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`. |
884 | /// You should use the latter for most cases. |
885 | /// |
886 | /// # Examples |
887 | /// |
888 | /// Basic usage: |
889 | /// |
890 | /// ``` |
891 | /// #![feature(binary_heap_drain_sorted)] |
892 | /// use std::collections::BinaryHeap; |
893 | /// |
894 | /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]); |
895 | /// assert_eq!(heap.len(), 5); |
896 | /// |
897 | /// drop(heap.drain_sorted()); // removes all elements in heap order |
898 | /// assert_eq!(heap.len(), 0); |
899 | /// ``` |
900 | #[inline ] |
901 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
902 | pub fn drain_sorted(&mut self) -> DrainSorted<'_, T, A> { |
903 | DrainSorted { inner: self } |
904 | } |
905 | |
906 | /// Retains only the elements specified by the predicate. |
907 | /// |
908 | /// In other words, remove all elements `e` for which `f(&e)` returns |
909 | /// `false`. The elements are visited in unsorted (and unspecified) order. |
910 | /// |
911 | /// # Examples |
912 | /// |
913 | /// Basic usage: |
914 | /// |
915 | /// ``` |
916 | /// use std::collections::BinaryHeap; |
917 | /// |
918 | /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]); |
919 | /// |
920 | /// heap.retain(|x| x % 2 == 0); // only keep even numbers |
921 | /// |
922 | /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4]) |
923 | /// ``` |
924 | #[stable (feature = "binary_heap_retain" , since = "1.70.0" )] |
925 | pub fn retain<F>(&mut self, mut f: F) |
926 | where |
927 | F: FnMut(&T) -> bool, |
928 | { |
929 | // rebuild_start will be updated to the first touched element below, and the rebuild will |
930 | // only be done for the tail. |
931 | let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self }; |
932 | let mut i = 0; |
933 | |
934 | guard.heap.data.retain(|e| { |
935 | let keep = f(e); |
936 | if !keep && i < guard.rebuild_from { |
937 | guard.rebuild_from = i; |
938 | } |
939 | i += 1; |
940 | keep |
941 | }); |
942 | } |
943 | } |
944 | |
945 | impl<T, A: Allocator> BinaryHeap<T, A> { |
946 | /// Returns an iterator visiting all values in the underlying vector, in |
947 | /// arbitrary order. |
948 | /// |
949 | /// # Examples |
950 | /// |
951 | /// Basic usage: |
952 | /// |
953 | /// ``` |
954 | /// use std::collections::BinaryHeap; |
955 | /// let heap = BinaryHeap::from([1, 2, 3, 4]); |
956 | /// |
957 | /// // Print 1, 2, 3, 4 in arbitrary order |
958 | /// for x in heap.iter() { |
959 | /// println!("{x}" ); |
960 | /// } |
961 | /// ``` |
962 | #[stable (feature = "rust1" , since = "1.0.0" )] |
963 | pub fn iter(&self) -> Iter<'_, T> { |
964 | Iter { iter: self.data.iter() } |
965 | } |
966 | |
967 | /// Returns an iterator which retrieves elements in heap order. |
968 | /// This method consumes the original heap. |
969 | /// |
970 | /// # Examples |
971 | /// |
972 | /// Basic usage: |
973 | /// |
974 | /// ``` |
975 | /// #![feature(binary_heap_into_iter_sorted)] |
976 | /// use std::collections::BinaryHeap; |
977 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]); |
978 | /// |
979 | /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]); |
980 | /// ``` |
981 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
982 | pub fn into_iter_sorted(self) -> IntoIterSorted<T, A> { |
983 | IntoIterSorted { inner: self } |
984 | } |
985 | |
986 | /// Returns the greatest item in the binary heap, or `None` if it is empty. |
987 | /// |
988 | /// # Examples |
989 | /// |
990 | /// Basic usage: |
991 | /// |
992 | /// ``` |
993 | /// use std::collections::BinaryHeap; |
994 | /// let mut heap = BinaryHeap::new(); |
995 | /// assert_eq!(heap.peek(), None); |
996 | /// |
997 | /// heap.push(1); |
998 | /// heap.push(5); |
999 | /// heap.push(2); |
1000 | /// assert_eq!(heap.peek(), Some(&5)); |
1001 | /// |
1002 | /// ``` |
1003 | /// |
1004 | /// # Time complexity |
1005 | /// |
1006 | /// Cost is *O*(1) in the worst case. |
1007 | #[must_use ] |
1008 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1009 | pub fn peek(&self) -> Option<&T> { |
1010 | self.data.get(0) |
1011 | } |
1012 | |
1013 | /// Returns the number of elements the binary heap can hold without reallocating. |
1014 | /// |
1015 | /// # Examples |
1016 | /// |
1017 | /// Basic usage: |
1018 | /// |
1019 | /// ``` |
1020 | /// use std::collections::BinaryHeap; |
1021 | /// let mut heap = BinaryHeap::with_capacity(100); |
1022 | /// assert!(heap.capacity() >= 100); |
1023 | /// heap.push(4); |
1024 | /// ``` |
1025 | #[must_use ] |
1026 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1027 | pub fn capacity(&self) -> usize { |
1028 | self.data.capacity() |
1029 | } |
1030 | |
1031 | /// Reserves the minimum capacity for at least `additional` elements more than |
1032 | /// the current length. Unlike [`reserve`], this will not |
1033 | /// deliberately over-allocate to speculatively avoid frequent allocations. |
1034 | /// After calling `reserve_exact`, capacity will be greater than or equal to |
1035 | /// `self.len() + additional`. Does nothing if the capacity is already |
1036 | /// sufficient. |
1037 | /// |
1038 | /// [`reserve`]: BinaryHeap::reserve |
1039 | /// |
1040 | /// # Panics |
1041 | /// |
1042 | /// Panics if the new capacity overflows [`usize`]. |
1043 | /// |
1044 | /// # Examples |
1045 | /// |
1046 | /// Basic usage: |
1047 | /// |
1048 | /// ``` |
1049 | /// use std::collections::BinaryHeap; |
1050 | /// let mut heap = BinaryHeap::new(); |
1051 | /// heap.reserve_exact(100); |
1052 | /// assert!(heap.capacity() >= 100); |
1053 | /// heap.push(4); |
1054 | /// ``` |
1055 | /// |
1056 | /// [`reserve`]: BinaryHeap::reserve |
1057 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1058 | pub fn reserve_exact(&mut self, additional: usize) { |
1059 | self.data.reserve_exact(additional); |
1060 | } |
1061 | |
1062 | /// Reserves capacity for at least `additional` elements more than the |
1063 | /// current length. The allocator may reserve more space to speculatively |
1064 | /// avoid frequent allocations. After calling `reserve`, |
1065 | /// capacity will be greater than or equal to `self.len() + additional`. |
1066 | /// Does nothing if capacity is already sufficient. |
1067 | /// |
1068 | /// # Panics |
1069 | /// |
1070 | /// Panics if the new capacity overflows [`usize`]. |
1071 | /// |
1072 | /// # Examples |
1073 | /// |
1074 | /// Basic usage: |
1075 | /// |
1076 | /// ``` |
1077 | /// use std::collections::BinaryHeap; |
1078 | /// let mut heap = BinaryHeap::new(); |
1079 | /// heap.reserve(100); |
1080 | /// assert!(heap.capacity() >= 100); |
1081 | /// heap.push(4); |
1082 | /// ``` |
1083 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1084 | pub fn reserve(&mut self, additional: usize) { |
1085 | self.data.reserve(additional); |
1086 | } |
1087 | |
1088 | /// Tries to reserve the minimum capacity for at least `additional` elements |
1089 | /// more than the current length. Unlike [`try_reserve`], this will not |
1090 | /// deliberately over-allocate to speculatively avoid frequent allocations. |
1091 | /// After calling `try_reserve_exact`, capacity will be greater than or |
1092 | /// equal to `self.len() + additional` if it returns `Ok(())`. |
1093 | /// Does nothing if the capacity is already sufficient. |
1094 | /// |
1095 | /// Note that the allocator may give the collection more space than it |
1096 | /// requests. Therefore, capacity can not be relied upon to be precisely |
1097 | /// minimal. Prefer [`try_reserve`] if future insertions are expected. |
1098 | /// |
1099 | /// [`try_reserve`]: BinaryHeap::try_reserve |
1100 | /// |
1101 | /// # Errors |
1102 | /// |
1103 | /// If the capacity overflows, or the allocator reports a failure, then an error |
1104 | /// is returned. |
1105 | /// |
1106 | /// # Examples |
1107 | /// |
1108 | /// ``` |
1109 | /// use std::collections::BinaryHeap; |
1110 | /// use std::collections::TryReserveError; |
1111 | /// |
1112 | /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> { |
1113 | /// let mut heap = BinaryHeap::new(); |
1114 | /// |
1115 | /// // Pre-reserve the memory, exiting if we can't |
1116 | /// heap.try_reserve_exact(data.len())?; |
1117 | /// |
1118 | /// // Now we know this can't OOM in the middle of our complex work |
1119 | /// heap.extend(data.iter()); |
1120 | /// |
1121 | /// Ok(heap.pop()) |
1122 | /// } |
1123 | /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?" ); |
1124 | /// ``` |
1125 | #[stable (feature = "try_reserve_2" , since = "1.63.0" )] |
1126 | pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { |
1127 | self.data.try_reserve_exact(additional) |
1128 | } |
1129 | |
1130 | /// Tries to reserve capacity for at least `additional` elements more than the |
1131 | /// current length. The allocator may reserve more space to speculatively |
1132 | /// avoid frequent allocations. After calling `try_reserve`, capacity will be |
1133 | /// greater than or equal to `self.len() + additional` if it returns |
1134 | /// `Ok(())`. Does nothing if capacity is already sufficient. This method |
1135 | /// preserves the contents even if an error occurs. |
1136 | /// |
1137 | /// # Errors |
1138 | /// |
1139 | /// If the capacity overflows, or the allocator reports a failure, then an error |
1140 | /// is returned. |
1141 | /// |
1142 | /// # Examples |
1143 | /// |
1144 | /// ``` |
1145 | /// use std::collections::BinaryHeap; |
1146 | /// use std::collections::TryReserveError; |
1147 | /// |
1148 | /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> { |
1149 | /// let mut heap = BinaryHeap::new(); |
1150 | /// |
1151 | /// // Pre-reserve the memory, exiting if we can't |
1152 | /// heap.try_reserve(data.len())?; |
1153 | /// |
1154 | /// // Now we know this can't OOM in the middle of our complex work |
1155 | /// heap.extend(data.iter()); |
1156 | /// |
1157 | /// Ok(heap.pop()) |
1158 | /// } |
1159 | /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?" ); |
1160 | /// ``` |
1161 | #[stable (feature = "try_reserve_2" , since = "1.63.0" )] |
1162 | pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { |
1163 | self.data.try_reserve(additional) |
1164 | } |
1165 | |
1166 | /// Discards as much additional capacity as possible. |
1167 | /// |
1168 | /// # Examples |
1169 | /// |
1170 | /// Basic usage: |
1171 | /// |
1172 | /// ``` |
1173 | /// use std::collections::BinaryHeap; |
1174 | /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100); |
1175 | /// |
1176 | /// assert!(heap.capacity() >= 100); |
1177 | /// heap.shrink_to_fit(); |
1178 | /// assert!(heap.capacity() == 0); |
1179 | /// ``` |
1180 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1181 | pub fn shrink_to_fit(&mut self) { |
1182 | self.data.shrink_to_fit(); |
1183 | } |
1184 | |
1185 | /// Discards capacity with a lower bound. |
1186 | /// |
1187 | /// The capacity will remain at least as large as both the length |
1188 | /// and the supplied value. |
1189 | /// |
1190 | /// If the current capacity is less than the lower limit, this is a no-op. |
1191 | /// |
1192 | /// # Examples |
1193 | /// |
1194 | /// ``` |
1195 | /// use std::collections::BinaryHeap; |
1196 | /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100); |
1197 | /// |
1198 | /// assert!(heap.capacity() >= 100); |
1199 | /// heap.shrink_to(10); |
1200 | /// assert!(heap.capacity() >= 10); |
1201 | /// ``` |
1202 | #[inline ] |
1203 | #[stable (feature = "shrink_to" , since = "1.56.0" )] |
1204 | pub fn shrink_to(&mut self, min_capacity: usize) { |
1205 | self.data.shrink_to(min_capacity) |
1206 | } |
1207 | |
1208 | /// Returns a slice of all values in the underlying vector, in arbitrary |
1209 | /// order. |
1210 | /// |
1211 | /// # Examples |
1212 | /// |
1213 | /// Basic usage: |
1214 | /// |
1215 | /// ``` |
1216 | /// #![feature(binary_heap_as_slice)] |
1217 | /// use std::collections::BinaryHeap; |
1218 | /// use std::io::{self, Write}; |
1219 | /// |
1220 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]); |
1221 | /// |
1222 | /// io::sink().write(heap.as_slice()).unwrap(); |
1223 | /// ``` |
1224 | #[must_use ] |
1225 | #[unstable (feature = "binary_heap_as_slice" , issue = "83659" )] |
1226 | pub fn as_slice(&self) -> &[T] { |
1227 | self.data.as_slice() |
1228 | } |
1229 | |
1230 | /// Consumes the `BinaryHeap` and returns the underlying vector |
1231 | /// in arbitrary order. |
1232 | /// |
1233 | /// # Examples |
1234 | /// |
1235 | /// Basic usage: |
1236 | /// |
1237 | /// ``` |
1238 | /// use std::collections::BinaryHeap; |
1239 | /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]); |
1240 | /// let vec = heap.into_vec(); |
1241 | /// |
1242 | /// // Will print in some order |
1243 | /// for x in vec { |
1244 | /// println!("{x}" ); |
1245 | /// } |
1246 | /// ``` |
1247 | #[must_use = "`self` will be dropped if the result is not used" ] |
1248 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1249 | pub fn into_vec(self) -> Vec<T, A> { |
1250 | self.into() |
1251 | } |
1252 | |
1253 | /// Returns a reference to the underlying allocator. |
1254 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1255 | #[inline ] |
1256 | pub fn allocator(&self) -> &A { |
1257 | self.data.allocator() |
1258 | } |
1259 | |
1260 | /// Returns the length of the binary heap. |
1261 | /// |
1262 | /// # Examples |
1263 | /// |
1264 | /// Basic usage: |
1265 | /// |
1266 | /// ``` |
1267 | /// use std::collections::BinaryHeap; |
1268 | /// let heap = BinaryHeap::from([1, 3]); |
1269 | /// |
1270 | /// assert_eq!(heap.len(), 2); |
1271 | /// ``` |
1272 | #[must_use ] |
1273 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1274 | #[rustc_confusables ("length" , "size" )] |
1275 | pub fn len(&self) -> usize { |
1276 | self.data.len() |
1277 | } |
1278 | |
1279 | /// Checks if the binary heap is empty. |
1280 | /// |
1281 | /// # Examples |
1282 | /// |
1283 | /// Basic usage: |
1284 | /// |
1285 | /// ``` |
1286 | /// use std::collections::BinaryHeap; |
1287 | /// let mut heap = BinaryHeap::new(); |
1288 | /// |
1289 | /// assert!(heap.is_empty()); |
1290 | /// |
1291 | /// heap.push(3); |
1292 | /// heap.push(5); |
1293 | /// heap.push(1); |
1294 | /// |
1295 | /// assert!(!heap.is_empty()); |
1296 | /// ``` |
1297 | #[must_use ] |
1298 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1299 | pub fn is_empty(&self) -> bool { |
1300 | self.len() == 0 |
1301 | } |
1302 | |
1303 | /// Clears the binary heap, returning an iterator over the removed elements |
1304 | /// in arbitrary order. If the iterator is dropped before being fully |
1305 | /// consumed, it drops the remaining elements in arbitrary order. |
1306 | /// |
1307 | /// The returned iterator keeps a mutable borrow on the heap to optimize |
1308 | /// its implementation. |
1309 | /// |
1310 | /// # Examples |
1311 | /// |
1312 | /// Basic usage: |
1313 | /// |
1314 | /// ``` |
1315 | /// use std::collections::BinaryHeap; |
1316 | /// let mut heap = BinaryHeap::from([1, 3]); |
1317 | /// |
1318 | /// assert!(!heap.is_empty()); |
1319 | /// |
1320 | /// for x in heap.drain() { |
1321 | /// println!("{x}" ); |
1322 | /// } |
1323 | /// |
1324 | /// assert!(heap.is_empty()); |
1325 | /// ``` |
1326 | #[inline ] |
1327 | #[stable (feature = "drain" , since = "1.6.0" )] |
1328 | pub fn drain(&mut self) -> Drain<'_, T, A> { |
1329 | Drain { iter: self.data.drain(..) } |
1330 | } |
1331 | |
1332 | /// Drops all items from the binary heap. |
1333 | /// |
1334 | /// # Examples |
1335 | /// |
1336 | /// Basic usage: |
1337 | /// |
1338 | /// ``` |
1339 | /// use std::collections::BinaryHeap; |
1340 | /// let mut heap = BinaryHeap::from([1, 3]); |
1341 | /// |
1342 | /// assert!(!heap.is_empty()); |
1343 | /// |
1344 | /// heap.clear(); |
1345 | /// |
1346 | /// assert!(heap.is_empty()); |
1347 | /// ``` |
1348 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1349 | pub fn clear(&mut self) { |
1350 | self.drain(); |
1351 | } |
1352 | } |
1353 | |
1354 | /// Hole represents a hole in a slice i.e., an index without valid value |
1355 | /// (because it was moved from or duplicated). |
1356 | /// In drop, `Hole` will restore the slice by filling the hole |
1357 | /// position with the value that was originally removed. |
1358 | struct Hole<'a, T: 'a> { |
1359 | data: &'a mut [T], |
1360 | elt: ManuallyDrop<T>, |
1361 | pos: usize, |
1362 | } |
1363 | |
1364 | impl<'a, T> Hole<'a, T> { |
1365 | /// Create a new `Hole` at index `pos`. |
1366 | /// |
1367 | /// Unsafe because pos must be within the data slice. |
1368 | #[inline ] |
1369 | unsafe fn new(data: &'a mut [T], pos: usize) -> Self { |
1370 | debug_assert!(pos < data.len()); |
1371 | // SAFE: pos should be inside the slice |
1372 | let elt = unsafe { ptr::read(data.get_unchecked(pos)) }; |
1373 | Hole { data, elt: ManuallyDrop::new(elt), pos } |
1374 | } |
1375 | |
1376 | #[inline ] |
1377 | fn pos(&self) -> usize { |
1378 | self.pos |
1379 | } |
1380 | |
1381 | /// Returns a reference to the element removed. |
1382 | #[inline ] |
1383 | fn element(&self) -> &T { |
1384 | &self.elt |
1385 | } |
1386 | |
1387 | /// Returns a reference to the element at `index`. |
1388 | /// |
1389 | /// Unsafe because index must be within the data slice and not equal to pos. |
1390 | #[inline ] |
1391 | unsafe fn get(&self, index: usize) -> &T { |
1392 | debug_assert!(index != self.pos); |
1393 | debug_assert!(index < self.data.len()); |
1394 | unsafe { self.data.get_unchecked(index) } |
1395 | } |
1396 | |
1397 | /// Move hole to new location |
1398 | /// |
1399 | /// Unsafe because index must be within the data slice and not equal to pos. |
1400 | #[inline ] |
1401 | unsafe fn move_to(&mut self, index: usize) { |
1402 | debug_assert!(index != self.pos); |
1403 | debug_assert!(index < self.data.len()); |
1404 | unsafe { |
1405 | let ptr = self.data.as_mut_ptr(); |
1406 | let index_ptr: *const _ = ptr.add(index); |
1407 | let hole_ptr = ptr.add(self.pos); |
1408 | ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); |
1409 | } |
1410 | self.pos = index; |
1411 | } |
1412 | } |
1413 | |
1414 | impl<T> Drop for Hole<'_, T> { |
1415 | #[inline ] |
1416 | fn drop(&mut self) { |
1417 | // fill the hole again |
1418 | unsafe { |
1419 | let pos: usize = self.pos; |
1420 | ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), count:1); |
1421 | } |
1422 | } |
1423 | } |
1424 | |
1425 | /// An iterator over the elements of a `BinaryHeap`. |
1426 | /// |
1427 | /// This `struct` is created by [`BinaryHeap::iter()`]. See its |
1428 | /// documentation for more. |
1429 | /// |
1430 | /// [`iter`]: BinaryHeap::iter |
1431 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
1432 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1433 | pub struct Iter<'a, T: 'a> { |
1434 | iter: slice::Iter<'a, T>, |
1435 | } |
1436 | |
1437 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
1438 | impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> { |
1439 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1440 | f.debug_tuple(name:"Iter" ).field(&self.iter.as_slice()).finish() |
1441 | } |
1442 | } |
1443 | |
1444 | // FIXME(#26925) Remove in favor of `#[derive(Clone)]` |
1445 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1446 | impl<T> Clone for Iter<'_, T> { |
1447 | fn clone(&self) -> Self { |
1448 | Iter { iter: self.iter.clone() } |
1449 | } |
1450 | } |
1451 | |
1452 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1453 | impl<'a, T> Iterator for Iter<'a, T> { |
1454 | type Item = &'a T; |
1455 | |
1456 | #[inline ] |
1457 | fn next(&mut self) -> Option<&'a T> { |
1458 | self.iter.next() |
1459 | } |
1460 | |
1461 | #[inline ] |
1462 | fn size_hint(&self) -> (usize, Option<usize>) { |
1463 | self.iter.size_hint() |
1464 | } |
1465 | |
1466 | #[inline ] |
1467 | fn last(self) -> Option<&'a T> { |
1468 | self.iter.last() |
1469 | } |
1470 | } |
1471 | |
1472 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1473 | impl<'a, T> DoubleEndedIterator for Iter<'a, T> { |
1474 | #[inline ] |
1475 | fn next_back(&mut self) -> Option<&'a T> { |
1476 | self.iter.next_back() |
1477 | } |
1478 | } |
1479 | |
1480 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1481 | impl<T> ExactSizeIterator for Iter<'_, T> { |
1482 | fn is_empty(&self) -> bool { |
1483 | self.iter.is_empty() |
1484 | } |
1485 | } |
1486 | |
1487 | #[stable (feature = "fused" , since = "1.26.0" )] |
1488 | impl<T> FusedIterator for Iter<'_, T> {} |
1489 | |
1490 | /// An owning iterator over the elements of a `BinaryHeap`. |
1491 | /// |
1492 | /// This `struct` is created by [`BinaryHeap::into_iter()`] |
1493 | /// (provided by the [`IntoIterator`] trait). See its documentation for more. |
1494 | /// |
1495 | /// [`into_iter`]: BinaryHeap::into_iter |
1496 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1497 | #[derive (Clone)] |
1498 | pub struct IntoIter< |
1499 | T, |
1500 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1501 | > { |
1502 | iter: vec::IntoIter<T, A>, |
1503 | } |
1504 | |
1505 | impl<T, A: Allocator> IntoIter<T, A> { |
1506 | /// Returns a reference to the underlying allocator. |
1507 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1508 | pub fn allocator(&self) -> &A { |
1509 | self.iter.allocator() |
1510 | } |
1511 | } |
1512 | |
1513 | #[stable (feature = "collection_debug" , since = "1.17.0" )] |
1514 | impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> { |
1515 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1516 | f.debug_tuple(name:"IntoIter" ).field(&self.iter.as_slice()).finish() |
1517 | } |
1518 | } |
1519 | |
1520 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1521 | impl<T, A: Allocator> Iterator for IntoIter<T, A> { |
1522 | type Item = T; |
1523 | |
1524 | #[inline ] |
1525 | fn next(&mut self) -> Option<T> { |
1526 | self.iter.next() |
1527 | } |
1528 | |
1529 | #[inline ] |
1530 | fn size_hint(&self) -> (usize, Option<usize>) { |
1531 | self.iter.size_hint() |
1532 | } |
1533 | } |
1534 | |
1535 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1536 | impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { |
1537 | #[inline ] |
1538 | fn next_back(&mut self) -> Option<T> { |
1539 | self.iter.next_back() |
1540 | } |
1541 | } |
1542 | |
1543 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1544 | impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { |
1545 | fn is_empty(&self) -> bool { |
1546 | self.iter.is_empty() |
1547 | } |
1548 | } |
1549 | |
1550 | #[stable (feature = "fused" , since = "1.26.0" )] |
1551 | impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {} |
1552 | |
1553 | #[doc (hidden)] |
1554 | #[unstable (issue = "none" , feature = "trusted_fused" )] |
1555 | unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {} |
1556 | |
1557 | #[stable (feature = "default_iters" , since = "1.70.0" )] |
1558 | impl<T> Default for IntoIter<T> { |
1559 | /// Creates an empty `binary_heap::IntoIter`. |
1560 | /// |
1561 | /// ``` |
1562 | /// # use std::collections::binary_heap; |
1563 | /// let iter: binary_heap::IntoIter<u8> = Default::default(); |
1564 | /// assert_eq!(iter.len(), 0); |
1565 | /// ``` |
1566 | fn default() -> Self { |
1567 | IntoIter { iter: Default::default() } |
1568 | } |
1569 | } |
1570 | |
1571 | // In addition to the SAFETY invariants of the following three unsafe traits |
1572 | // also refer to the vec::in_place_collect module documentation to get an overview |
1573 | #[unstable (issue = "none" , feature = "inplace_iteration" )] |
1574 | #[doc (hidden)] |
1575 | unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> { |
1576 | type Source = IntoIter<T, A>; |
1577 | |
1578 | #[inline ] |
1579 | unsafe fn as_inner(&mut self) -> &mut Self::Source { |
1580 | self |
1581 | } |
1582 | } |
1583 | |
1584 | #[unstable (issue = "none" , feature = "inplace_iteration" )] |
1585 | #[doc (hidden)] |
1586 | unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> { |
1587 | const EXPAND_BY: Option<NonZero<usize>> = NonZero::new(1); |
1588 | const MERGE_BY: Option<NonZero<usize>> = NonZero::new(1); |
1589 | } |
1590 | |
1591 | unsafe impl<I> AsVecIntoIter for IntoIter<I> { |
1592 | type Item = I; |
1593 | |
1594 | fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> { |
1595 | &mut self.iter |
1596 | } |
1597 | } |
1598 | |
1599 | #[must_use = "iterators are lazy and do nothing unless consumed" ] |
1600 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1601 | #[derive (Clone, Debug)] |
1602 | pub struct IntoIterSorted< |
1603 | T, |
1604 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1605 | > { |
1606 | inner: BinaryHeap<T, A>, |
1607 | } |
1608 | |
1609 | impl<T, A: Allocator> IntoIterSorted<T, A> { |
1610 | /// Returns a reference to the underlying allocator. |
1611 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1612 | pub fn allocator(&self) -> &A { |
1613 | self.inner.allocator() |
1614 | } |
1615 | } |
1616 | |
1617 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1618 | impl<T: Ord, A: Allocator> Iterator for IntoIterSorted<T, A> { |
1619 | type Item = T; |
1620 | |
1621 | #[inline ] |
1622 | fn next(&mut self) -> Option<T> { |
1623 | self.inner.pop() |
1624 | } |
1625 | |
1626 | #[inline ] |
1627 | fn size_hint(&self) -> (usize, Option<usize>) { |
1628 | let exact: usize = self.inner.len(); |
1629 | (exact, Some(exact)) |
1630 | } |
1631 | } |
1632 | |
1633 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1634 | impl<T: Ord, A: Allocator> ExactSizeIterator for IntoIterSorted<T, A> {} |
1635 | |
1636 | #[unstable (feature = "binary_heap_into_iter_sorted" , issue = "59278" )] |
1637 | impl<T: Ord, A: Allocator> FusedIterator for IntoIterSorted<T, A> {} |
1638 | |
1639 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
1640 | unsafe impl<T: Ord, A: Allocator> TrustedLen for IntoIterSorted<T, A> {} |
1641 | |
1642 | /// A draining iterator over the elements of a `BinaryHeap`. |
1643 | /// |
1644 | /// This `struct` is created by [`BinaryHeap::drain()`]. See its |
1645 | /// documentation for more. |
1646 | /// |
1647 | /// [`drain`]: BinaryHeap::drain |
1648 | #[stable (feature = "drain" , since = "1.6.0" )] |
1649 | #[derive (Debug)] |
1650 | pub struct Drain< |
1651 | 'a, |
1652 | T: 'a, |
1653 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1654 | > { |
1655 | iter: vec::Drain<'a, T, A>, |
1656 | } |
1657 | |
1658 | impl<T, A: Allocator> Drain<'_, T, A> { |
1659 | /// Returns a reference to the underlying allocator. |
1660 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1661 | pub fn allocator(&self) -> &A { |
1662 | self.iter.allocator() |
1663 | } |
1664 | } |
1665 | |
1666 | #[stable (feature = "drain" , since = "1.6.0" )] |
1667 | impl<T, A: Allocator> Iterator for Drain<'_, T, A> { |
1668 | type Item = T; |
1669 | |
1670 | #[inline ] |
1671 | fn next(&mut self) -> Option<T> { |
1672 | self.iter.next() |
1673 | } |
1674 | |
1675 | #[inline ] |
1676 | fn size_hint(&self) -> (usize, Option<usize>) { |
1677 | self.iter.size_hint() |
1678 | } |
1679 | } |
1680 | |
1681 | #[stable (feature = "drain" , since = "1.6.0" )] |
1682 | impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { |
1683 | #[inline ] |
1684 | fn next_back(&mut self) -> Option<T> { |
1685 | self.iter.next_back() |
1686 | } |
1687 | } |
1688 | |
1689 | #[stable (feature = "drain" , since = "1.6.0" )] |
1690 | impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { |
1691 | fn is_empty(&self) -> bool { |
1692 | self.iter.is_empty() |
1693 | } |
1694 | } |
1695 | |
1696 | #[stable (feature = "fused" , since = "1.26.0" )] |
1697 | impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} |
1698 | |
1699 | /// A draining iterator over the elements of a `BinaryHeap`. |
1700 | /// |
1701 | /// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its |
1702 | /// documentation for more. |
1703 | /// |
1704 | /// [`drain_sorted`]: BinaryHeap::drain_sorted |
1705 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1706 | #[derive (Debug)] |
1707 | pub struct DrainSorted< |
1708 | 'a, |
1709 | T: Ord, |
1710 | #[unstable (feature = "allocator_api" , issue = "32838" )] A: Allocator = Global, |
1711 | > { |
1712 | inner: &'a mut BinaryHeap<T, A>, |
1713 | } |
1714 | |
1715 | impl<'a, T: Ord, A: Allocator> DrainSorted<'a, T, A> { |
1716 | /// Returns a reference to the underlying allocator. |
1717 | #[unstable (feature = "allocator_api" , issue = "32838" )] |
1718 | pub fn allocator(&self) -> &A { |
1719 | self.inner.allocator() |
1720 | } |
1721 | } |
1722 | |
1723 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1724 | impl<'a, T: Ord, A: Allocator> Drop for DrainSorted<'a, T, A> { |
1725 | /// Removes heap elements in heap order. |
1726 | fn drop(&mut self) { |
1727 | struct DropGuard<'r, 'a, T: Ord, A: Allocator>(&'r mut DrainSorted<'a, T, A>); |
1728 | |
1729 | impl<'r, 'a, T: Ord, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { |
1730 | fn drop(&mut self) { |
1731 | while self.0.inner.pop().is_some() {} |
1732 | } |
1733 | } |
1734 | |
1735 | while let Some(item: T) = self.inner.pop() { |
1736 | let guard: DropGuard<'_, '_, T, A> = DropGuard(self); |
1737 | drop(item); |
1738 | mem::forget(guard); |
1739 | } |
1740 | } |
1741 | } |
1742 | |
1743 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1744 | impl<T: Ord, A: Allocator> Iterator for DrainSorted<'_, T, A> { |
1745 | type Item = T; |
1746 | |
1747 | #[inline ] |
1748 | fn next(&mut self) -> Option<T> { |
1749 | self.inner.pop() |
1750 | } |
1751 | |
1752 | #[inline ] |
1753 | fn size_hint(&self) -> (usize, Option<usize>) { |
1754 | let exact: usize = self.inner.len(); |
1755 | (exact, Some(exact)) |
1756 | } |
1757 | } |
1758 | |
1759 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1760 | impl<T: Ord, A: Allocator> ExactSizeIterator for DrainSorted<'_, T, A> {} |
1761 | |
1762 | #[unstable (feature = "binary_heap_drain_sorted" , issue = "59278" )] |
1763 | impl<T: Ord, A: Allocator> FusedIterator for DrainSorted<'_, T, A> {} |
1764 | |
1765 | #[unstable (feature = "trusted_len" , issue = "37572" )] |
1766 | unsafe impl<T: Ord, A: Allocator> TrustedLen for DrainSorted<'_, T, A> {} |
1767 | |
1768 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1769 | impl<T: Ord, A: Allocator> From<Vec<T, A>> for BinaryHeap<T, A> { |
1770 | /// Converts a `Vec<T>` into a `BinaryHeap<T>`. |
1771 | /// |
1772 | /// This conversion happens in-place, and has *O*(*n*) time complexity. |
1773 | fn from(vec: Vec<T, A>) -> BinaryHeap<T, A> { |
1774 | let mut heap: BinaryHeap = BinaryHeap { data: vec }; |
1775 | heap.rebuild(); |
1776 | heap |
1777 | } |
1778 | } |
1779 | |
1780 | #[stable (feature = "std_collections_from_array" , since = "1.56.0" )] |
1781 | impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> { |
1782 | /// ``` |
1783 | /// use std::collections::BinaryHeap; |
1784 | /// |
1785 | /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]); |
1786 | /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into(); |
1787 | /// while let Some((a, b)) = h1.pop().zip(h2.pop()) { |
1788 | /// assert_eq!(a, b); |
1789 | /// } |
1790 | /// ``` |
1791 | fn from(arr: [T; N]) -> Self { |
1792 | Self::from_iter(arr) |
1793 | } |
1794 | } |
1795 | |
1796 | #[stable (feature = "binary_heap_extras_15" , since = "1.5.0" )] |
1797 | impl<T, A: Allocator> From<BinaryHeap<T, A>> for Vec<T, A> { |
1798 | /// Converts a `BinaryHeap<T>` into a `Vec<T>`. |
1799 | /// |
1800 | /// This conversion requires no data movement or allocation, and has |
1801 | /// constant time complexity. |
1802 | fn from(heap: BinaryHeap<T, A>) -> Vec<T, A> { |
1803 | heap.data |
1804 | } |
1805 | } |
1806 | |
1807 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1808 | impl<T: Ord> FromIterator<T> for BinaryHeap<T> { |
1809 | fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> { |
1810 | BinaryHeap::from(iter.into_iter().collect::<Vec<_>>()) |
1811 | } |
1812 | } |
1813 | |
1814 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1815 | impl<T, A: Allocator> IntoIterator for BinaryHeap<T, A> { |
1816 | type Item = T; |
1817 | type IntoIter = IntoIter<T, A>; |
1818 | |
1819 | /// Creates a consuming iterator, that is, one that moves each value out of |
1820 | /// the binary heap in arbitrary order. The binary heap cannot be used |
1821 | /// after calling this. |
1822 | /// |
1823 | /// # Examples |
1824 | /// |
1825 | /// Basic usage: |
1826 | /// |
1827 | /// ``` |
1828 | /// use std::collections::BinaryHeap; |
1829 | /// let heap = BinaryHeap::from([1, 2, 3, 4]); |
1830 | /// |
1831 | /// // Print 1, 2, 3, 4 in arbitrary order |
1832 | /// for x in heap.into_iter() { |
1833 | /// // x has type i32, not &i32 |
1834 | /// println!("{x}" ); |
1835 | /// } |
1836 | /// ``` |
1837 | fn into_iter(self) -> IntoIter<T, A> { |
1838 | IntoIter { iter: self.data.into_iter() } |
1839 | } |
1840 | } |
1841 | |
1842 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1843 | impl<'a, T, A: Allocator> IntoIterator for &'a BinaryHeap<T, A> { |
1844 | type Item = &'a T; |
1845 | type IntoIter = Iter<'a, T>; |
1846 | |
1847 | fn into_iter(self) -> Iter<'a, T> { |
1848 | self.iter() |
1849 | } |
1850 | } |
1851 | |
1852 | #[stable (feature = "rust1" , since = "1.0.0" )] |
1853 | impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> { |
1854 | #[inline ] |
1855 | fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { |
1856 | let guard: RebuildOnDrop<'_, T, A> = RebuildOnDrop { rebuild_from: self.len(), heap: self }; |
1857 | guard.heap.data.extend(iter); |
1858 | } |
1859 | |
1860 | #[inline ] |
1861 | fn extend_one(&mut self, item: T) { |
1862 | self.push(item); |
1863 | } |
1864 | |
1865 | #[inline ] |
1866 | fn extend_reserve(&mut self, additional: usize) { |
1867 | self.reserve(additional); |
1868 | } |
1869 | } |
1870 | |
1871 | #[stable (feature = "extend_ref" , since = "1.2.0" )] |
1872 | impl<'a, T: 'a + Ord + Copy, A: Allocator> Extend<&'a T> for BinaryHeap<T, A> { |
1873 | fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { |
1874 | self.extend(iter:iter.into_iter().cloned()); |
1875 | } |
1876 | |
1877 | #[inline ] |
1878 | fn extend_one(&mut self, &item: T: &'a T) { |
1879 | self.push(item); |
1880 | } |
1881 | |
1882 | #[inline ] |
1883 | fn extend_reserve(&mut self, additional: usize) { |
1884 | self.reserve(additional); |
1885 | } |
1886 | } |
1887 | |