1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod raw;
11
12use hashbrown::raw::RawTable;
13
14use crate::vec::{Drain, Vec};
15use crate::TryReserveError;
16use core::fmt;
17use core::mem;
18use core::ops::RangeBounds;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Entries, Equivalent, HashValue};
22
23/// Core of the map that does not depend on S
24pub(crate) struct IndexMapCore<K, V> {
25 /// indices mapping from the entry hash to its index.
26 indices: RawTable<usize>,
27 /// entries is a dense vec of entries in their order.
28 entries: Vec<Bucket<K, V>>,
29}
30
31#[inline(always)]
32fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
33 move |&i: usize| entries[i].hash.get()
34}
35
36#[inline]
37fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
38 key: &'a Q,
39 entries: &'a [Bucket<K, V>],
40) -> impl Fn(&usize) -> bool + 'a {
41 move |&i: usize| Q::equivalent(self:key, &entries[i].key)
42}
43
44#[inline]
45fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
46 let erased: bool = table.erase_entry(hash:hash.get(), eq:move |&i| i == index);
47 debug_assert!(erased);
48}
49
50#[inline]
51fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
52 let index = table
53 .get_mut(hash:hash.get(), eq:move |&i| i == old)
54 .expect("index not found");
55 *index = new;
56}
57
58impl<K, V> Clone for IndexMapCore<K, V>
59where
60 K: Clone,
61 V: Clone,
62{
63 fn clone(&self) -> Self {
64 let mut new: IndexMapCore = Self::new();
65 new.clone_from(self);
66 new
67 }
68
69 fn clone_from(&mut self, other: &Self) {
70 let hasher: impl Fn(&usize) -> u64 = get_hash(&other.entries);
71 self.indices.clone_from_with_hasher(&other.indices, hasher);
72 if self.entries.capacity() < other.entries.len() {
73 // If we must resize, match the indices capacity.
74 let additional: usize = other.entries.len() - self.entries.len();
75 self.reserve_entries(additional);
76 }
77 self.entries.clone_from(&other.entries);
78 }
79}
80
81impl<K, V> fmt::Debug for IndexMapCore<K, V>
82where
83 K: fmt::Debug,
84 V: fmt::Debug,
85{
86 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
87 f&mut DebugStruct<'_, '_>.debug_struct("IndexMapCore")
88 .field("indices", &raw::DebugIndices(&self.indices))
89 .field(name:"entries", &self.entries)
90 .finish()
91 }
92}
93
94impl<K, V> Entries for IndexMapCore<K, V> {
95 type Entry = Bucket<K, V>;
96
97 #[inline]
98 fn into_entries(self) -> Vec<Self::Entry> {
99 self.entries
100 }
101
102 #[inline]
103 fn as_entries(&self) -> &[Self::Entry] {
104 &self.entries
105 }
106
107 #[inline]
108 fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
109 &mut self.entries
110 }
111
112 fn with_entries<F>(&mut self, f: F)
113 where
114 F: FnOnce(&mut [Self::Entry]),
115 {
116 f(&mut self.entries);
117 self.rebuild_hash_table();
118 }
119}
120
121impl<K, V> IndexMapCore<K, V> {
122 /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
123 const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
124
125 #[inline]
126 pub(crate) const fn new() -> Self {
127 IndexMapCore {
128 indices: RawTable::new(),
129 entries: Vec::new(),
130 }
131 }
132
133 #[inline]
134 pub(crate) fn with_capacity(n: usize) -> Self {
135 IndexMapCore {
136 indices: RawTable::with_capacity(n),
137 entries: Vec::with_capacity(n),
138 }
139 }
140
141 #[inline]
142 pub(crate) fn len(&self) -> usize {
143 self.indices.len()
144 }
145
146 #[inline]
147 pub(crate) fn capacity(&self) -> usize {
148 Ord::min(self.indices.capacity(), self.entries.capacity())
149 }
150
151 pub(crate) fn clear(&mut self) {
152 self.indices.clear();
153 self.entries.clear();
154 }
155
156 pub(crate) fn truncate(&mut self, len: usize) {
157 if len < self.len() {
158 self.erase_indices(len, self.entries.len());
159 self.entries.truncate(len);
160 }
161 }
162
163 pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
164 where
165 R: RangeBounds<usize>,
166 {
167 let range = simplify_range(range, self.entries.len());
168 self.erase_indices(range.start, range.end);
169 self.entries.drain(range)
170 }
171
172 #[cfg(feature = "rayon")]
173 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
174 where
175 K: Send,
176 V: Send,
177 R: RangeBounds<usize>,
178 {
179 use rayon::iter::ParallelDrainRange;
180 let range = simplify_range(range, self.entries.len());
181 self.erase_indices(range.start, range.end);
182 self.entries.par_drain(range)
183 }
184
185 pub(crate) fn split_off(&mut self, at: usize) -> Self {
186 assert!(at <= self.entries.len());
187 self.erase_indices(at, self.entries.len());
188 let entries = self.entries.split_off(at);
189
190 let mut indices = RawTable::with_capacity(entries.len());
191 raw::insert_bulk_no_grow(&mut indices, &entries);
192 Self { indices, entries }
193 }
194
195 /// Reserve capacity for `additional` more key-value pairs.
196 pub(crate) fn reserve(&mut self, additional: usize) {
197 self.indices.reserve(additional, get_hash(&self.entries));
198 // Only grow entries if necessary, since we also round up capacity.
199 if additional > self.entries.capacity() - self.entries.len() {
200 self.reserve_entries(additional);
201 }
202 }
203
204 /// Reserve entries capacity, rounded up to match the indices
205 fn reserve_entries(&mut self, additional: usize) {
206 // Use a soft-limit on the maximum capacity, but if the caller explicitly
207 // requested more, do it and let them have the resulting panic.
208 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
209 let try_add = new_capacity - self.entries.len();
210 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
211 return;
212 }
213 self.entries.reserve_exact(additional);
214 }
215
216 /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
217 pub(crate) fn reserve_exact(&mut self, additional: usize) {
218 self.indices.reserve(additional, get_hash(&self.entries));
219 self.entries.reserve_exact(additional);
220 }
221
222 /// Try to reserve capacity for `additional` more key-value pairs.
223 pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
224 self.indices
225 .try_reserve(additional, get_hash(&self.entries))
226 .map_err(TryReserveError::from_hashbrown)?;
227 // Only grow entries if necessary, since we also round up capacity.
228 if additional > self.entries.capacity() - self.entries.len() {
229 self.try_reserve_entries(additional)
230 } else {
231 Ok(())
232 }
233 }
234
235 /// Try to reserve entries capacity, rounded up to match the indices
236 fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
237 // Use a soft-limit on the maximum capacity, but if the caller explicitly
238 // requested more, do it and let them have the resulting error.
239 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
240 let try_add = new_capacity - self.entries.len();
241 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
242 return Ok(());
243 }
244 self.entries
245 .try_reserve_exact(additional)
246 .map_err(TryReserveError::from_alloc)
247 }
248
249 /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
250 pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
251 self.indices
252 .try_reserve(additional, get_hash(&self.entries))
253 .map_err(TryReserveError::from_hashbrown)?;
254 self.entries
255 .try_reserve_exact(additional)
256 .map_err(TryReserveError::from_alloc)
257 }
258
259 /// Shrink the capacity of the map with a lower bound
260 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
261 self.indices
262 .shrink_to(min_capacity, get_hash(&self.entries));
263 self.entries.shrink_to(min_capacity);
264 }
265
266 /// Remove the last key-value pair
267 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
268 if let Some(entry) = self.entries.pop() {
269 let last = self.entries.len();
270 erase_index(&mut self.indices, entry.hash, last);
271 Some((entry.key, entry.value))
272 } else {
273 None
274 }
275 }
276
277 /// Append a key-value pair to `entries`, *without* checking whether it already exists.
278 fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
279 if self.entries.len() == self.entries.capacity() {
280 // Reserve our own capacity synced to the indices,
281 // rather than letting `Vec::push` just double it.
282 self.reserve_entries(1);
283 }
284 self.entries.push(Bucket { hash, key, value });
285 }
286
287 /// Return the index in `entries` where an equivalent key can be found
288 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
289 where
290 Q: ?Sized + Equivalent<K>,
291 {
292 let eq = equivalent(key, &self.entries);
293 self.indices.get(hash.get(), eq).copied()
294 }
295
296 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
297 where
298 K: Eq,
299 {
300 match self.find_or_insert(hash, &key) {
301 Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
302 Err(i) => {
303 debug_assert_eq!(i, self.entries.len());
304 self.push_entry(hash, key, value);
305 (i, None)
306 }
307 }
308 }
309
310 /// Remove an entry by shifting all entries that follow it
311 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
312 where
313 Q: ?Sized + Equivalent<K>,
314 {
315 let eq = equivalent(key, &self.entries);
316 match self.indices.remove_entry(hash.get(), eq) {
317 Some(index) => {
318 let (key, value) = self.shift_remove_finish(index);
319 Some((index, key, value))
320 }
321 None => None,
322 }
323 }
324
325 /// Remove an entry by shifting all entries that follow it
326 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
327 match self.entries.get(index) {
328 Some(entry) => {
329 erase_index(&mut self.indices, entry.hash, index);
330 Some(self.shift_remove_finish(index))
331 }
332 None => None,
333 }
334 }
335
336 /// Remove an entry by shifting all entries that follow it
337 ///
338 /// The index should already be removed from `self.indices`.
339 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
340 // Correct indices that point to the entries that followed the removed entry.
341 self.decrement_indices(index + 1, self.entries.len());
342
343 // Use Vec::remove to actually remove the entry.
344 let entry = self.entries.remove(index);
345 (entry.key, entry.value)
346 }
347
348 /// Decrement all indices in the range `start..end`.
349 ///
350 /// The index `start - 1` should not exist in `self.indices`.
351 /// All entries should still be in their original positions.
352 fn decrement_indices(&mut self, start: usize, end: usize) {
353 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
354 let shifted_entries = &self.entries[start..end];
355 if shifted_entries.len() > self.indices.buckets() / 2 {
356 // Shift all indices in range.
357 for i in self.indices_mut() {
358 if start <= *i && *i < end {
359 *i -= 1;
360 }
361 }
362 } else {
363 // Find each entry in range to shift its index.
364 for (i, entry) in (start..end).zip(shifted_entries) {
365 update_index(&mut self.indices, entry.hash, i, i - 1);
366 }
367 }
368 }
369
370 /// Increment all indices in the range `start..end`.
371 ///
372 /// The index `end` should not exist in `self.indices`.
373 /// All entries should still be in their original positions.
374 fn increment_indices(&mut self, start: usize, end: usize) {
375 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
376 let shifted_entries = &self.entries[start..end];
377 if shifted_entries.len() > self.indices.buckets() / 2 {
378 // Shift all indices in range.
379 for i in self.indices_mut() {
380 if start <= *i && *i < end {
381 *i += 1;
382 }
383 }
384 } else {
385 // Find each entry in range to shift its index, updated in reverse so
386 // we never have duplicated indices that might have a hash collision.
387 for (i, entry) in (start..end).zip(shifted_entries).rev() {
388 update_index(&mut self.indices, entry.hash, i, i + 1);
389 }
390 }
391 }
392
393 pub(super) fn move_index(&mut self, from: usize, to: usize) {
394 let from_hash = self.entries[from].hash;
395 if from != to {
396 // Use a sentinel index so other indices don't collide.
397 update_index(&mut self.indices, from_hash, from, usize::MAX);
398
399 // Update all other indices and rotate the entry positions.
400 if from < to {
401 self.decrement_indices(from + 1, to + 1);
402 self.entries[from..=to].rotate_left(1);
403 } else if to < from {
404 self.increment_indices(to, from);
405 self.entries[to..=from].rotate_right(1);
406 }
407
408 // Change the sentinel index to its final position.
409 update_index(&mut self.indices, from_hash, usize::MAX, to);
410 }
411 }
412
413 /// Remove an entry by swapping it with the last
414 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
415 where
416 Q: ?Sized + Equivalent<K>,
417 {
418 let eq = equivalent(key, &self.entries);
419 match self.indices.remove_entry(hash.get(), eq) {
420 Some(index) => {
421 let (key, value) = self.swap_remove_finish(index);
422 Some((index, key, value))
423 }
424 None => None,
425 }
426 }
427
428 /// Remove an entry by swapping it with the last
429 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
430 match self.entries.get(index) {
431 Some(entry) => {
432 erase_index(&mut self.indices, entry.hash, index);
433 Some(self.swap_remove_finish(index))
434 }
435 None => None,
436 }
437 }
438
439 /// Finish removing an entry by swapping it with the last
440 ///
441 /// The index should already be removed from `self.indices`.
442 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
443 // use swap_remove, but then we need to update the index that points
444 // to the other entry that has to move
445 let entry = self.entries.swap_remove(index);
446
447 // correct index that points to the entry that had to swap places
448 if let Some(entry) = self.entries.get(index) {
449 // was not last element
450 // examine new element in `index` and find it in indices
451 let last = self.entries.len();
452 update_index(&mut self.indices, entry.hash, last, index);
453 }
454
455 (entry.key, entry.value)
456 }
457
458 /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
459 ///
460 /// All of these items should still be at their original location in `entries`.
461 /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
462 fn erase_indices(&mut self, start: usize, end: usize) {
463 let (init, shifted_entries) = self.entries.split_at(end);
464 let (start_entries, erased_entries) = init.split_at(start);
465
466 let erased = erased_entries.len();
467 let shifted = shifted_entries.len();
468 let half_capacity = self.indices.buckets() / 2;
469
470 // Use a heuristic between different strategies
471 if erased == 0 {
472 // Degenerate case, nothing to do
473 } else if start + shifted < half_capacity && start < erased {
474 // Reinsert everything, as there are few kept indices
475 self.indices.clear();
476
477 // Reinsert stable indices, then shifted indices
478 raw::insert_bulk_no_grow(&mut self.indices, start_entries);
479 raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
480 } else if erased + shifted < half_capacity {
481 // Find each affected index, as there are few to adjust
482
483 // Find erased indices
484 for (i, entry) in (start..).zip(erased_entries) {
485 erase_index(&mut self.indices, entry.hash, i);
486 }
487
488 // Find shifted indices
489 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
490 update_index(&mut self.indices, entry.hash, old, new);
491 }
492 } else {
493 // Sweep the whole table for adjustments
494 self.erase_indices_sweep(start, end);
495 }
496
497 debug_assert_eq!(self.indices.len(), start + shifted);
498 }
499
500 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
501 where
502 F: FnMut(&mut K, &mut V) -> bool,
503 {
504 self.entries
505 .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
506 if self.entries.len() < self.indices.len() {
507 self.rebuild_hash_table();
508 }
509 }
510
511 fn rebuild_hash_table(&mut self) {
512 self.indices.clear();
513 raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
514 }
515
516 pub(crate) fn reverse(&mut self) {
517 self.entries.reverse();
518
519 // No need to save hash indices, can easily calculate what they should
520 // be, given that this is an in-place reversal.
521 let len = self.entries.len();
522 for i in self.indices_mut() {
523 *i = len - *i - 1;
524 }
525 }
526}
527
528/// Entry for an existing key-value pair or a vacant location to
529/// insert one.
530pub enum Entry<'a, K, V> {
531 /// Existing slot with equivalent key.
532 Occupied(OccupiedEntry<'a, K, V>),
533 /// Vacant slot (no equivalent key in the map).
534 Vacant(VacantEntry<'a, K, V>),
535}
536
537impl<'a, K, V> Entry<'a, K, V> {
538 /// Inserts the given default value in the entry if it is vacant and returns a mutable
539 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
540 ///
541 /// Computes in **O(1)** time (amortized average).
542 pub fn or_insert(self, default: V) -> &'a mut V {
543 match self {
544 Entry::Occupied(entry) => entry.into_mut(),
545 Entry::Vacant(entry) => entry.insert(default),
546 }
547 }
548
549 /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
550 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
551 ///
552 /// Computes in **O(1)** time (amortized average).
553 pub fn or_insert_with<F>(self, call: F) -> &'a mut V
554 where
555 F: FnOnce() -> V,
556 {
557 match self {
558 Entry::Occupied(entry) => entry.into_mut(),
559 Entry::Vacant(entry) => entry.insert(call()),
560 }
561 }
562
563 /// Inserts the result of the `call` function with a reference to the entry's key if it is
564 /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
565 /// an already existent value is returned.
566 ///
567 /// Computes in **O(1)** time (amortized average).
568 pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
569 where
570 F: FnOnce(&K) -> V,
571 {
572 match self {
573 Entry::Occupied(entry) => entry.into_mut(),
574 Entry::Vacant(entry) => {
575 let value = call(&entry.key);
576 entry.insert(value)
577 }
578 }
579 }
580
581 /// Gets a reference to the entry's key, either within the map if occupied,
582 /// or else the new key that was used to find the entry.
583 pub fn key(&self) -> &K {
584 match *self {
585 Entry::Occupied(ref entry) => entry.key(),
586 Entry::Vacant(ref entry) => entry.key(),
587 }
588 }
589
590 /// Return the index where the key-value pair exists or will be inserted.
591 pub fn index(&self) -> usize {
592 match *self {
593 Entry::Occupied(ref entry) => entry.index(),
594 Entry::Vacant(ref entry) => entry.index(),
595 }
596 }
597
598 /// Modifies the entry if it is occupied.
599 pub fn and_modify<F>(self, f: F) -> Self
600 where
601 F: FnOnce(&mut V),
602 {
603 match self {
604 Entry::Occupied(mut o) => {
605 f(o.get_mut());
606 Entry::Occupied(o)
607 }
608 x => x,
609 }
610 }
611
612 /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
613 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
614 ///
615 /// Computes in **O(1)** time (amortized average).
616 pub fn or_default(self) -> &'a mut V
617 where
618 V: Default,
619 {
620 match self {
621 Entry::Occupied(entry) => entry.into_mut(),
622 Entry::Vacant(entry) => entry.insert(V::default()),
623 }
624 }
625}
626
627impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
628 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
629 match *self {
630 Entry::Vacant(ref v: &VacantEntry<'_, K, V>) => f.debug_tuple(name:stringify!(Entry)).field(v).finish(),
631 Entry::Occupied(ref o: &OccupiedEntry<'_, K, V>) => f.debug_tuple(name:stringify!(Entry)).field(o).finish(),
632 }
633 }
634}
635
636pub use self::raw::OccupiedEntry;
637
638// Extra methods that don't threaten the unsafe encapsulation.
639impl<K, V> OccupiedEntry<'_, K, V> {
640 /// Sets the value of the entry to `value`, and returns the entry's old value.
641 pub fn insert(&mut self, value: V) -> V {
642 mem::replace(self.get_mut(), value)
643 }
644
645 /// Remove the key, value pair stored in the map for this entry, and return the value.
646 ///
647 /// **NOTE:** This is equivalent to `.swap_remove()`.
648 pub fn remove(self) -> V {
649 self.swap_remove()
650 }
651
652 /// Remove the key, value pair stored in the map for this entry, and return the value.
653 ///
654 /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
655 /// last element of the map and popping it off. **This perturbs
656 /// the position of what used to be the last element!**
657 ///
658 /// Computes in **O(1)** time (average).
659 pub fn swap_remove(self) -> V {
660 self.swap_remove_entry().1
661 }
662
663 /// Remove the key, value pair stored in the map for this entry, and return the value.
664 ///
665 /// Like `Vec::remove`, the pair is removed by shifting all of the
666 /// elements that follow it, preserving their relative order.
667 /// **This perturbs the index of all of those elements!**
668 ///
669 /// Computes in **O(n)** time (average).
670 pub fn shift_remove(self) -> V {
671 self.shift_remove_entry().1
672 }
673
674 /// Remove and return the key, value pair stored in the map for this entry
675 ///
676 /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
677 pub fn remove_entry(self) -> (K, V) {
678 self.swap_remove_entry()
679 }
680}
681
682impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
683 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
684 f&mut DebugStruct<'_, '_>.debug_struct(stringify!(OccupiedEntry))
685 .field("key", self.key())
686 .field(name:"value", self.get())
687 .finish()
688 }
689}
690
691/// A view into a vacant entry in a `IndexMap`.
692/// It is part of the [`Entry`] enum.
693///
694/// [`Entry`]: enum.Entry.html
695pub struct VacantEntry<'a, K, V> {
696 map: &'a mut IndexMapCore<K, V>,
697 hash: HashValue,
698 key: K,
699}
700
701impl<'a, K, V> VacantEntry<'a, K, V> {
702 /// Gets a reference to the key that was used to find the entry.
703 pub fn key(&self) -> &K {
704 &self.key
705 }
706
707 /// Takes ownership of the key, leaving the entry vacant.
708 pub fn into_key(self) -> K {
709 self.key
710 }
711
712 /// Return the index where the key-value pair will be inserted.
713 pub fn index(&self) -> usize {
714 self.map.indices.len()
715 }
716
717 /// Inserts the entry's key and the given value into the map, and returns a mutable reference
718 /// to the value.
719 pub fn insert(self, value: V) -> &'a mut V {
720 let i = self.index();
721 let Self { map, hash, key } = self;
722 map.indices.insert(hash.get(), i, get_hash(&map.entries));
723 debug_assert_eq!(i, map.entries.len());
724 map.push_entry(hash, key, value);
725 &mut map.entries[i].value
726 }
727}
728
729impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
730 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
731 f&mut DebugTuple<'_, '_>.debug_tuple(name:stringify!(VacantEntry))
732 .field(self.key())
733 .finish()
734 }
735}
736
737#[test]
738fn assert_send_sync() {
739 fn assert_send_sync<T: Send + Sync>() {}
740 assert_send_sync::<IndexMapCore<i32, i32>>();
741 assert_send_sync::<Entry<'_, i32, i32>>();
742}
743