1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod raw;
11
12use hashbrown::raw::RawTable;
13
14use crate::vec::{Drain, Vec};
15use core::cmp;
16use core::fmt;
17use core::mem::replace;
18use core::ops::RangeBounds;
19
20use crate::equivalent::Equivalent;
21use crate::util::simplify_range;
22use crate::{Bucket, Entries, HashValue};
23
24/// Core of the map that does not depend on S
25pub(crate) struct IndexMapCore<K, V> {
26 /// indices mapping from the entry hash to its index.
27 indices: RawTable<usize>,
28 /// entries is a dense vec of entries in their order.
29 entries: Vec<Bucket<K, V>>,
30}
31
32#[inline(always)]
33fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
34 move |&i: usize| entries[i].hash.get()
35}
36
37#[inline]
38fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
39 key: &'a Q,
40 entries: &'a [Bucket<K, V>],
41) -> impl Fn(&usize) -> bool + 'a {
42 move |&i: usize| Q::equivalent(self:key, &entries[i].key)
43}
44
45#[inline]
46fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
47 let erased: bool = table.erase_entry(hash:hash.get(), eq:move |&i: usize| i == index);
48 debug_assert!(erased);
49}
50
51#[inline]
52fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
53 let index: &mut usize = table
54 .get_mut(hash.get(), move |&i| i == old)
55 .expect(msg:"index not found");
56 *index = new;
57}
58
59impl<K, V> Clone for IndexMapCore<K, V>
60where
61 K: Clone,
62 V: Clone,
63{
64 fn clone(&self) -> Self {
65 let indices: RawTable = self.indices.clone();
66 let mut entries: Vec> = Vec::with_capacity(indices.capacity());
67 entries.clone_from(&self.entries);
68 IndexMapCore { indices, entries }
69 }
70
71 fn clone_from(&mut self, other: &Self) {
72 let hasher: impl Fn(&usize) -> u64 = get_hash(&other.entries);
73 self.indices.clone_from_with_hasher(&other.indices, hasher);
74 if self.entries.capacity() < other.entries.len() {
75 // If we must resize, match the indices capacity
76 self.reserve_entries();
77 }
78 self.entries.clone_from(&other.entries);
79 }
80}
81
82impl<K, V> fmt::Debug for IndexMapCore<K, V>
83where
84 K: fmt::Debug,
85 V: fmt::Debug,
86{
87 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
88 f&mut DebugStruct<'_, '_>.debug_struct("IndexMapCore")
89 .field("indices", &raw::DebugIndices(&self.indices))
90 .field(name:"entries", &self.entries)
91 .finish()
92 }
93}
94
95impl<K, V> Entries for IndexMapCore<K, V> {
96 type Entry = Bucket<K, V>;
97
98 #[inline]
99 fn into_entries(self) -> Vec<Self::Entry> {
100 self.entries
101 }
102
103 #[inline]
104 fn as_entries(&self) -> &[Self::Entry] {
105 &self.entries
106 }
107
108 #[inline]
109 fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
110 &mut self.entries
111 }
112
113 fn with_entries<F>(&mut self, f: F)
114 where
115 F: FnOnce(&mut [Self::Entry]),
116 {
117 f(&mut self.entries);
118 self.rebuild_hash_table();
119 }
120}
121
122impl<K, V> IndexMapCore<K, V> {
123 #[inline]
124 pub(crate) const fn new() -> Self {
125 IndexMapCore {
126 indices: RawTable::new(),
127 entries: Vec::new(),
128 }
129 }
130
131 #[inline]
132 pub(crate) fn with_capacity(n: usize) -> Self {
133 IndexMapCore {
134 indices: RawTable::with_capacity(n),
135 entries: Vec::with_capacity(n),
136 }
137 }
138
139 #[inline]
140 pub(crate) fn len(&self) -> usize {
141 self.indices.len()
142 }
143
144 #[inline]
145 pub(crate) fn capacity(&self) -> usize {
146 cmp::min(self.indices.capacity(), self.entries.capacity())
147 }
148
149 pub(crate) fn clear(&mut self) {
150 self.indices.clear();
151 self.entries.clear();
152 }
153
154 pub(crate) fn truncate(&mut self, len: usize) {
155 if len < self.len() {
156 self.erase_indices(len, self.entries.len());
157 self.entries.truncate(len);
158 }
159 }
160
161 pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
162 where
163 R: RangeBounds<usize>,
164 {
165 let range = simplify_range(range, self.entries.len());
166 self.erase_indices(range.start, range.end);
167 self.entries.drain(range)
168 }
169
170 #[cfg(feature = "rayon")]
171 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
172 where
173 K: Send,
174 V: Send,
175 R: RangeBounds<usize>,
176 {
177 use rayon::iter::ParallelDrainRange;
178 let range = simplify_range(range, self.entries.len());
179 self.erase_indices(range.start, range.end);
180 self.entries.par_drain(range)
181 }
182
183 pub(crate) fn split_off(&mut self, at: usize) -> Self {
184 assert!(at <= self.entries.len());
185 self.erase_indices(at, self.entries.len());
186 let entries = self.entries.split_off(at);
187
188 let mut indices = RawTable::with_capacity(entries.len());
189 raw::insert_bulk_no_grow(&mut indices, &entries);
190 Self { indices, entries }
191 }
192
193 /// Reserve capacity for `additional` more key-value pairs.
194 pub(crate) fn reserve(&mut self, additional: usize) {
195 self.indices.reserve(additional, get_hash(&self.entries));
196 self.reserve_entries();
197 }
198
199 /// Reserve entries capacity to match the indices
200 fn reserve_entries(&mut self) {
201 let additional = self.indices.capacity() - self.entries.len();
202 self.entries.reserve_exact(additional);
203 }
204
205 /// Shrink the capacity of the map with a lower bound
206 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
207 self.indices
208 .shrink_to(min_capacity, get_hash(&self.entries));
209 self.entries.shrink_to(min_capacity);
210 }
211
212 /// Remove the last key-value pair
213 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
214 if let Some(entry) = self.entries.pop() {
215 let last = self.entries.len();
216 erase_index(&mut self.indices, entry.hash, last);
217 Some((entry.key, entry.value))
218 } else {
219 None
220 }
221 }
222
223 /// Append a key-value pair, *without* checking whether it already exists,
224 /// and return the pair's new index.
225 fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
226 let i = self.entries.len();
227 self.indices.insert(hash.get(), i, get_hash(&self.entries));
228 if i == self.entries.capacity() {
229 // Reserve our own capacity synced to the indices,
230 // rather than letting `Vec::push` just double it.
231 self.reserve_entries();
232 }
233 self.entries.push(Bucket { hash, key, value });
234 i
235 }
236
237 /// Return the index in `entries` where an equivalent key can be found
238 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
239 where
240 Q: ?Sized + Equivalent<K>,
241 {
242 let eq = equivalent(key, &self.entries);
243 self.indices.get(hash.get(), eq).copied()
244 }
245
246 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
247 where
248 K: Eq,
249 {
250 match self.get_index_of(hash, &key) {
251 Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
252 None => (self.push(hash, key, value), None),
253 }
254 }
255
256 /// Remove an entry by shifting all entries that follow it
257 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
258 where
259 Q: ?Sized + Equivalent<K>,
260 {
261 let eq = equivalent(key, &self.entries);
262 match self.indices.remove_entry(hash.get(), eq) {
263 Some(index) => {
264 let (key, value) = self.shift_remove_finish(index);
265 Some((index, key, value))
266 }
267 None => None,
268 }
269 }
270
271 /// Remove an entry by shifting all entries that follow it
272 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
273 match self.entries.get(index) {
274 Some(entry) => {
275 erase_index(&mut self.indices, entry.hash, index);
276 Some(self.shift_remove_finish(index))
277 }
278 None => None,
279 }
280 }
281
282 /// Remove an entry by shifting all entries that follow it
283 ///
284 /// The index should already be removed from `self.indices`.
285 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
286 // Correct indices that point to the entries that followed the removed entry.
287 self.decrement_indices(index + 1, self.entries.len());
288
289 // Use Vec::remove to actually remove the entry.
290 let entry = self.entries.remove(index);
291 (entry.key, entry.value)
292 }
293
294 /// Decrement all indices in the range `start..end`.
295 ///
296 /// The index `start - 1` should not exist in `self.indices`.
297 /// All entries should still be in their original positions.
298 fn decrement_indices(&mut self, start: usize, end: usize) {
299 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
300 let shifted_entries = &self.entries[start..end];
301 if shifted_entries.len() > self.indices.buckets() / 2 {
302 // Shift all indices in range.
303 for i in self.indices_mut() {
304 if start <= *i && *i < end {
305 *i -= 1;
306 }
307 }
308 } else {
309 // Find each entry in range to shift its index.
310 for (i, entry) in (start..end).zip(shifted_entries) {
311 update_index(&mut self.indices, entry.hash, i, i - 1);
312 }
313 }
314 }
315
316 /// Increment all indices in the range `start..end`.
317 ///
318 /// The index `end` should not exist in `self.indices`.
319 /// All entries should still be in their original positions.
320 fn increment_indices(&mut self, start: usize, end: usize) {
321 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
322 let shifted_entries = &self.entries[start..end];
323 if shifted_entries.len() > self.indices.buckets() / 2 {
324 // Shift all indices in range.
325 for i in self.indices_mut() {
326 if start <= *i && *i < end {
327 *i += 1;
328 }
329 }
330 } else {
331 // Find each entry in range to shift its index, updated in reverse so
332 // we never have duplicated indices that might have a hash collision.
333 for (i, entry) in (start..end).zip(shifted_entries).rev() {
334 update_index(&mut self.indices, entry.hash, i, i + 1);
335 }
336 }
337 }
338
339 pub(super) fn move_index(&mut self, from: usize, to: usize) {
340 let from_hash = self.entries[from].hash;
341 if from != to {
342 // Use a sentinal index so other indices don't collide.
343 update_index(&mut self.indices, from_hash, from, usize::MAX);
344
345 // Update all other indices and rotate the entry positions.
346 if from < to {
347 self.decrement_indices(from + 1, to + 1);
348 self.entries[from..=to].rotate_left(1);
349 } else if to < from {
350 self.increment_indices(to, from);
351 self.entries[to..=from].rotate_right(1);
352 }
353
354 // Change the sentinal index to its final position.
355 update_index(&mut self.indices, from_hash, usize::MAX, to);
356 }
357 }
358
359 /// Remove an entry by swapping it with the last
360 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
361 where
362 Q: ?Sized + Equivalent<K>,
363 {
364 let eq = equivalent(key, &self.entries);
365 match self.indices.remove_entry(hash.get(), eq) {
366 Some(index) => {
367 let (key, value) = self.swap_remove_finish(index);
368 Some((index, key, value))
369 }
370 None => None,
371 }
372 }
373
374 /// Remove an entry by swapping it with the last
375 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
376 match self.entries.get(index) {
377 Some(entry) => {
378 erase_index(&mut self.indices, entry.hash, index);
379 Some(self.swap_remove_finish(index))
380 }
381 None => None,
382 }
383 }
384
385 /// Finish removing an entry by swapping it with the last
386 ///
387 /// The index should already be removed from `self.indices`.
388 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
389 // use swap_remove, but then we need to update the index that points
390 // to the other entry that has to move
391 let entry = self.entries.swap_remove(index);
392
393 // correct index that points to the entry that had to swap places
394 if let Some(entry) = self.entries.get(index) {
395 // was not last element
396 // examine new element in `index` and find it in indices
397 let last = self.entries.len();
398 update_index(&mut self.indices, entry.hash, last, index);
399 }
400
401 (entry.key, entry.value)
402 }
403
404 /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
405 ///
406 /// All of these items should still be at their original location in `entries`.
407 /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
408 fn erase_indices(&mut self, start: usize, end: usize) {
409 let (init, shifted_entries) = self.entries.split_at(end);
410 let (start_entries, erased_entries) = init.split_at(start);
411
412 let erased = erased_entries.len();
413 let shifted = shifted_entries.len();
414 let half_capacity = self.indices.buckets() / 2;
415
416 // Use a heuristic between different strategies
417 if erased == 0 {
418 // Degenerate case, nothing to do
419 } else if start + shifted < half_capacity && start < erased {
420 // Reinsert everything, as there are few kept indices
421 self.indices.clear();
422
423 // Reinsert stable indices, then shifted indices
424 raw::insert_bulk_no_grow(&mut self.indices, start_entries);
425 raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
426 } else if erased + shifted < half_capacity {
427 // Find each affected index, as there are few to adjust
428
429 // Find erased indices
430 for (i, entry) in (start..).zip(erased_entries) {
431 erase_index(&mut self.indices, entry.hash, i);
432 }
433
434 // Find shifted indices
435 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
436 update_index(&mut self.indices, entry.hash, old, new);
437 }
438 } else {
439 // Sweep the whole table for adjustments
440 self.erase_indices_sweep(start, end);
441 }
442
443 debug_assert_eq!(self.indices.len(), start + shifted);
444 }
445
446 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
447 where
448 F: FnMut(&mut K, &mut V) -> bool,
449 {
450 // FIXME: This could use Vec::retain_mut with MSRV 1.61.
451 // Like Vec::retain in self.entries, but with mutable K and V.
452 // We swap-shift all the items we want to keep, truncate the rest,
453 // then rebuild the raw hash table with the new indexes.
454 let len = self.entries.len();
455 let mut n_deleted = 0;
456 for i in 0..len {
457 let will_keep = {
458 let entry = &mut self.entries[i];
459 keep(&mut entry.key, &mut entry.value)
460 };
461 if !will_keep {
462 n_deleted += 1;
463 } else if n_deleted > 0 {
464 self.entries.swap(i - n_deleted, i);
465 }
466 }
467 if n_deleted > 0 {
468 self.entries.truncate(len - n_deleted);
469 self.rebuild_hash_table();
470 }
471 }
472
473 fn rebuild_hash_table(&mut self) {
474 self.indices.clear();
475 raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
476 }
477
478 pub(crate) fn reverse(&mut self) {
479 self.entries.reverse();
480
481 // No need to save hash indices, can easily calculate what they should
482 // be, given that this is an in-place reversal.
483 let len = self.entries.len();
484 for i in self.indices_mut() {
485 *i = len - *i - 1;
486 }
487 }
488}
489
490/// Entry for an existing key-value pair or a vacant location to
491/// insert one.
492pub enum Entry<'a, K, V> {
493 /// Existing slot with equivalent key.
494 Occupied(OccupiedEntry<'a, K, V>),
495 /// Vacant slot (no equivalent key in the map).
496 Vacant(VacantEntry<'a, K, V>),
497}
498
499impl<'a, K, V> Entry<'a, K, V> {
500 /// Inserts the given default value in the entry if it is vacant and returns a mutable
501 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
502 ///
503 /// Computes in **O(1)** time (amortized average).
504 pub fn or_insert(self, default: V) -> &'a mut V {
505 match self {
506 Entry::Occupied(entry) => entry.into_mut(),
507 Entry::Vacant(entry) => entry.insert(default),
508 }
509 }
510
511 /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
512 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
513 ///
514 /// Computes in **O(1)** time (amortized average).
515 pub fn or_insert_with<F>(self, call: F) -> &'a mut V
516 where
517 F: FnOnce() -> V,
518 {
519 match self {
520 Entry::Occupied(entry) => entry.into_mut(),
521 Entry::Vacant(entry) => entry.insert(call()),
522 }
523 }
524
525 /// Inserts the result of the `call` function with a reference to the entry's key if it is
526 /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
527 /// an already existent value is returned.
528 ///
529 /// Computes in **O(1)** time (amortized average).
530 pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
531 where
532 F: FnOnce(&K) -> V,
533 {
534 match self {
535 Entry::Occupied(entry) => entry.into_mut(),
536 Entry::Vacant(entry) => {
537 let value = call(&entry.key);
538 entry.insert(value)
539 }
540 }
541 }
542
543 /// Gets a reference to the entry's key, either within the map if occupied,
544 /// or else the new key that was used to find the entry.
545 pub fn key(&self) -> &K {
546 match *self {
547 Entry::Occupied(ref entry) => entry.key(),
548 Entry::Vacant(ref entry) => entry.key(),
549 }
550 }
551
552 /// Return the index where the key-value pair exists or will be inserted.
553 pub fn index(&self) -> usize {
554 match *self {
555 Entry::Occupied(ref entry) => entry.index(),
556 Entry::Vacant(ref entry) => entry.index(),
557 }
558 }
559
560 /// Modifies the entry if it is occupied.
561 pub fn and_modify<F>(self, f: F) -> Self
562 where
563 F: FnOnce(&mut V),
564 {
565 match self {
566 Entry::Occupied(mut o) => {
567 f(o.get_mut());
568 Entry::Occupied(o)
569 }
570 x => x,
571 }
572 }
573
574 /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
575 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
576 ///
577 /// Computes in **O(1)** time (amortized average).
578 pub fn or_default(self) -> &'a mut V
579 where
580 V: Default,
581 {
582 match self {
583 Entry::Occupied(entry) => entry.into_mut(),
584 Entry::Vacant(entry) => entry.insert(V::default()),
585 }
586 }
587}
588
589impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
590 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
591 match *self {
592 Entry::Vacant(ref v: &VacantEntry<'_, K, V>) => f.debug_tuple(name:stringify!(Entry)).field(v).finish(),
593 Entry::Occupied(ref o: &OccupiedEntry<'_, K, V>) => f.debug_tuple(name:stringify!(Entry)).field(o).finish(),
594 }
595 }
596}
597
598pub use self::raw::OccupiedEntry;
599
600// Extra methods that don't threaten the unsafe encapsulation.
601impl<K, V> OccupiedEntry<'_, K, V> {
602 /// Sets the value of the entry to `value`, and returns the entry's old value.
603 pub fn insert(&mut self, value: V) -> V {
604 replace(self.get_mut(), value)
605 }
606
607 /// Remove the key, value pair stored in the map for this entry, and return the value.
608 ///
609 /// **NOTE:** This is equivalent to `.swap_remove()`.
610 pub fn remove(self) -> V {
611 self.swap_remove()
612 }
613
614 /// Remove the key, value pair stored in the map for this entry, and return the value.
615 ///
616 /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
617 /// last element of the map and popping it off. **This perturbs
618 /// the position of what used to be the last element!**
619 ///
620 /// Computes in **O(1)** time (average).
621 pub fn swap_remove(self) -> V {
622 self.swap_remove_entry().1
623 }
624
625 /// Remove the key, value pair stored in the map for this entry, and return the value.
626 ///
627 /// Like `Vec::remove`, the pair is removed by shifting all of the
628 /// elements that follow it, preserving their relative order.
629 /// **This perturbs the index of all of those elements!**
630 ///
631 /// Computes in **O(n)** time (average).
632 pub fn shift_remove(self) -> V {
633 self.shift_remove_entry().1
634 }
635
636 /// Remove and return the key, value pair stored in the map for this entry
637 ///
638 /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
639 pub fn remove_entry(self) -> (K, V) {
640 self.swap_remove_entry()
641 }
642}
643
644impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
645 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
646 f&mut DebugStruct<'_, '_>.debug_struct(stringify!(OccupiedEntry))
647 .field("key", self.key())
648 .field(name:"value", self.get())
649 .finish()
650 }
651}
652
653/// A view into a vacant entry in a `IndexMap`.
654/// It is part of the [`Entry`] enum.
655///
656/// [`Entry`]: enum.Entry.html
657pub struct VacantEntry<'a, K, V> {
658 map: &'a mut IndexMapCore<K, V>,
659 hash: HashValue,
660 key: K,
661}
662
663impl<'a, K, V> VacantEntry<'a, K, V> {
664 /// Gets a reference to the key that was used to find the entry.
665 pub fn key(&self) -> &K {
666 &self.key
667 }
668
669 /// Takes ownership of the key, leaving the entry vacant.
670 pub fn into_key(self) -> K {
671 self.key
672 }
673
674 /// Return the index where the key-value pair will be inserted.
675 pub fn index(&self) -> usize {
676 self.map.len()
677 }
678
679 /// Inserts the entry's key and the given value into the map, and returns a mutable reference
680 /// to the value.
681 pub fn insert(self, value: V) -> &'a mut V {
682 let i: usize = self.map.push(self.hash, self.key, value);
683 &mut self.map.entries[i].value
684 }
685}
686
687impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
688 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
689 f&mut DebugTuple<'_, '_>.debug_tuple(name:stringify!(VacantEntry))
690 .field(self.key())
691 .finish()
692 }
693}
694
695#[test]
696fn assert_send_sync() {
697 fn assert_send_sync<T: Send + Sync>() {}
698 assert_send_sync::<IndexMapCore<i32, i32>>();
699 assert_send_sync::<Entry<'_, i32, i32>>();
700}
701