1 | //! This is the core implementation that doesn't depend on the hasher at all. |
2 | //! |
3 | //! The methods of `IndexMapCore` don't use any Hash properties of K. |
4 | //! |
5 | //! It's cleaner to separate them out, then the compiler checks that we are not |
6 | //! using Hash at all in these methods. |
7 | //! |
8 | //! However, we should probably not let this show in the public API or docs. |
9 | |
10 | mod entry; |
11 | |
12 | pub mod raw_entry_v1; |
13 | |
14 | use hashbrown::hash_table; |
15 | |
16 | use crate::vec::{self, Vec}; |
17 | use crate::TryReserveError; |
18 | use core::mem; |
19 | use core::ops::RangeBounds; |
20 | |
21 | use crate::util::simplify_range; |
22 | use crate::{Bucket, Equivalent, HashValue}; |
23 | |
24 | type Indices = hash_table::HashTable<usize>; |
25 | type Entries<K, V> = Vec<Bucket<K, V>>; |
26 | |
27 | pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; |
28 | |
29 | /// Core of the map that does not depend on S |
30 | #[derive (Debug)] |
31 | pub(crate) struct IndexMapCore<K, V> { |
32 | /// indices mapping from the entry hash to its index. |
33 | indices: Indices, |
34 | /// entries is a dense vec maintaining entry order. |
35 | entries: Entries<K, V>, |
36 | } |
37 | |
38 | /// Mutable references to the parts of an `IndexMapCore`. |
39 | /// |
40 | /// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our |
41 | /// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type |
42 | /// is used to implement methods on the split references, and `IndexMapCore` can also call those to |
43 | /// avoid duplication. |
44 | struct RefMut<'a, K, V> { |
45 | indices: &'a mut Indices, |
46 | entries: &'a mut Entries<K, V>, |
47 | } |
48 | |
49 | #[inline (always)] |
50 | fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ { |
51 | move |&i: usize| entries[i].hash.get() |
52 | } |
53 | |
54 | #[inline ] |
55 | fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>( |
56 | key: &'a Q, |
57 | entries: &'a [Bucket<K, V>], |
58 | ) -> impl Fn(&usize) -> bool + 'a { |
59 | move |&i: usize| Q::equivalent(self:key, &entries[i].key) |
60 | } |
61 | |
62 | #[inline ] |
63 | fn erase_index(table: &mut Indices, hash: HashValue, index: usize) { |
64 | if let Ok(entry: OccupiedEntry<'_, usize>) = table.find_entry(hash.get(), eq:move |&i: usize| i == index) { |
65 | entry.remove(); |
66 | } else if cfg!(debug_assertions) { |
67 | panic!("index not found" ); |
68 | } |
69 | } |
70 | |
71 | #[inline ] |
72 | fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) { |
73 | let index: &mut usize = table |
74 | .find_mut(hash.get(), move |&i| i == old) |
75 | .expect(msg:"index not found" ); |
76 | *index = new; |
77 | } |
78 | |
79 | /// Inserts many entries into the indices table without reallocating, |
80 | /// and without regard for duplication. |
81 | /// |
82 | /// ***Panics*** if there is not sufficient capacity already. |
83 | fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) { |
84 | assert!(indices.capacity() - indices.len() >= entries.len()); |
85 | for entry: &Bucket in entries { |
86 | indices.insert_unique(hash:entry.hash.get(), value:indices.len(), |_| unreachable!()); |
87 | } |
88 | } |
89 | |
90 | impl<K, V> Clone for IndexMapCore<K, V> |
91 | where |
92 | K: Clone, |
93 | V: Clone, |
94 | { |
95 | fn clone(&self) -> Self { |
96 | let mut new: IndexMapCore = Self::new(); |
97 | new.clone_from(self); |
98 | new |
99 | } |
100 | |
101 | fn clone_from(&mut self, other: &Self) { |
102 | self.indices.clone_from(&other.indices); |
103 | if self.entries.capacity() < other.entries.len() { |
104 | // If we must resize, match the indices capacity. |
105 | let additional: usize = other.entries.len() - self.entries.len(); |
106 | self.borrow_mut().reserve_entries(additional); |
107 | } |
108 | self.entries.clone_from(&other.entries); |
109 | } |
110 | } |
111 | |
112 | impl<K, V> crate::Entries for IndexMapCore<K, V> { |
113 | type Entry = Bucket<K, V>; |
114 | |
115 | #[inline ] |
116 | fn into_entries(self) -> Vec<Self::Entry> { |
117 | self.entries |
118 | } |
119 | |
120 | #[inline ] |
121 | fn as_entries(&self) -> &[Self::Entry] { |
122 | &self.entries |
123 | } |
124 | |
125 | #[inline ] |
126 | fn as_entries_mut(&mut self) -> &mut [Self::Entry] { |
127 | &mut self.entries |
128 | } |
129 | |
130 | fn with_entries<F>(&mut self, f: F) |
131 | where |
132 | F: FnOnce(&mut [Self::Entry]), |
133 | { |
134 | f(&mut self.entries); |
135 | self.rebuild_hash_table(); |
136 | } |
137 | } |
138 | |
139 | impl<K, V> IndexMapCore<K, V> { |
140 | /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. |
141 | const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>(); |
142 | |
143 | #[inline ] |
144 | pub(crate) const fn new() -> Self { |
145 | IndexMapCore { |
146 | indices: Indices::new(), |
147 | entries: Vec::new(), |
148 | } |
149 | } |
150 | |
151 | #[inline ] |
152 | fn borrow_mut(&mut self) -> RefMut<'_, K, V> { |
153 | RefMut::new(&mut self.indices, &mut self.entries) |
154 | } |
155 | |
156 | #[inline ] |
157 | pub(crate) fn with_capacity(n: usize) -> Self { |
158 | IndexMapCore { |
159 | indices: Indices::with_capacity(n), |
160 | entries: Vec::with_capacity(n), |
161 | } |
162 | } |
163 | |
164 | #[inline ] |
165 | pub(crate) fn len(&self) -> usize { |
166 | self.indices.len() |
167 | } |
168 | |
169 | #[inline ] |
170 | pub(crate) fn capacity(&self) -> usize { |
171 | Ord::min(self.indices.capacity(), self.entries.capacity()) |
172 | } |
173 | |
174 | pub(crate) fn clear(&mut self) { |
175 | self.indices.clear(); |
176 | self.entries.clear(); |
177 | } |
178 | |
179 | pub(crate) fn truncate(&mut self, len: usize) { |
180 | if len < self.len() { |
181 | self.erase_indices(len, self.entries.len()); |
182 | self.entries.truncate(len); |
183 | } |
184 | } |
185 | |
186 | pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>> |
187 | where |
188 | R: RangeBounds<usize>, |
189 | { |
190 | let range = simplify_range(range, self.entries.len()); |
191 | self.erase_indices(range.start, range.end); |
192 | self.entries.drain(range) |
193 | } |
194 | |
195 | #[cfg (feature = "rayon" )] |
196 | pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>> |
197 | where |
198 | K: Send, |
199 | V: Send, |
200 | R: RangeBounds<usize>, |
201 | { |
202 | use rayon::iter::ParallelDrainRange; |
203 | let range = simplify_range(range, self.entries.len()); |
204 | self.erase_indices(range.start, range.end); |
205 | self.entries.par_drain(range) |
206 | } |
207 | |
208 | pub(crate) fn split_off(&mut self, at: usize) -> Self { |
209 | assert!(at <= self.entries.len()); |
210 | self.erase_indices(at, self.entries.len()); |
211 | let entries = self.entries.split_off(at); |
212 | |
213 | let mut indices = Indices::with_capacity(entries.len()); |
214 | insert_bulk_no_grow(&mut indices, &entries); |
215 | Self { indices, entries } |
216 | } |
217 | |
218 | pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>) |
219 | where |
220 | R: RangeBounds<usize>, |
221 | { |
222 | let range = simplify_range(range, self.len()); |
223 | self.erase_indices(range.start, self.entries.len()); |
224 | let entries = self.entries.split_off(range.end); |
225 | let drained = self.entries.split_off(range.start); |
226 | |
227 | let mut indices = Indices::with_capacity(entries.len()); |
228 | insert_bulk_no_grow(&mut indices, &entries); |
229 | (Self { indices, entries }, drained.into_iter()) |
230 | } |
231 | |
232 | /// Append from another map without checking whether items already exist. |
233 | pub(crate) fn append_unchecked(&mut self, other: &mut Self) { |
234 | self.reserve(other.len()); |
235 | insert_bulk_no_grow(&mut self.indices, &other.entries); |
236 | self.entries.append(&mut other.entries); |
237 | other.indices.clear(); |
238 | } |
239 | |
240 | /// Reserve capacity for `additional` more key-value pairs. |
241 | pub(crate) fn reserve(&mut self, additional: usize) { |
242 | self.indices.reserve(additional, get_hash(&self.entries)); |
243 | // Only grow entries if necessary, since we also round up capacity. |
244 | if additional > self.entries.capacity() - self.entries.len() { |
245 | self.borrow_mut().reserve_entries(additional); |
246 | } |
247 | } |
248 | |
249 | /// Reserve capacity for `additional` more key-value pairs, without over-allocating. |
250 | pub(crate) fn reserve_exact(&mut self, additional: usize) { |
251 | self.indices.reserve(additional, get_hash(&self.entries)); |
252 | self.entries.reserve_exact(additional); |
253 | } |
254 | |
255 | /// Try to reserve capacity for `additional` more key-value pairs. |
256 | pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { |
257 | self.indices |
258 | .try_reserve(additional, get_hash(&self.entries)) |
259 | .map_err(TryReserveError::from_hashbrown)?; |
260 | // Only grow entries if necessary, since we also round up capacity. |
261 | if additional > self.entries.capacity() - self.entries.len() { |
262 | self.try_reserve_entries(additional) |
263 | } else { |
264 | Ok(()) |
265 | } |
266 | } |
267 | |
268 | /// Try to reserve entries capacity, rounded up to match the indices |
269 | fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { |
270 | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
271 | // requested more, do it and let them have the resulting error. |
272 | let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); |
273 | let try_add = new_capacity - self.entries.len(); |
274 | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
275 | return Ok(()); |
276 | } |
277 | self.entries |
278 | .try_reserve_exact(additional) |
279 | .map_err(TryReserveError::from_alloc) |
280 | } |
281 | |
282 | /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. |
283 | pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { |
284 | self.indices |
285 | .try_reserve(additional, get_hash(&self.entries)) |
286 | .map_err(TryReserveError::from_hashbrown)?; |
287 | self.entries |
288 | .try_reserve_exact(additional) |
289 | .map_err(TryReserveError::from_alloc) |
290 | } |
291 | |
292 | /// Shrink the capacity of the map with a lower bound |
293 | pub(crate) fn shrink_to(&mut self, min_capacity: usize) { |
294 | self.indices |
295 | .shrink_to(min_capacity, get_hash(&self.entries)); |
296 | self.entries.shrink_to(min_capacity); |
297 | } |
298 | |
299 | /// Remove the last key-value pair |
300 | pub(crate) fn pop(&mut self) -> Option<(K, V)> { |
301 | if let Some(entry) = self.entries.pop() { |
302 | let last = self.entries.len(); |
303 | erase_index(&mut self.indices, entry.hash, last); |
304 | Some((entry.key, entry.value)) |
305 | } else { |
306 | None |
307 | } |
308 | } |
309 | |
310 | /// Return the index in `entries` where an equivalent key can be found |
311 | pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize> |
312 | where |
313 | Q: ?Sized + Equivalent<K>, |
314 | { |
315 | let eq = equivalent(key, &self.entries); |
316 | self.indices.find(hash.get(), eq).copied() |
317 | } |
318 | |
319 | /// Append a key-value pair to `entries`, |
320 | /// *without* checking whether it already exists. |
321 | fn push_entry(&mut self, hash: HashValue, key: K, value: V) { |
322 | if self.entries.len() == self.entries.capacity() { |
323 | // Reserve our own capacity synced to the indices, |
324 | // rather than letting `Vec::push` just double it. |
325 | self.borrow_mut().reserve_entries(1); |
326 | } |
327 | self.entries.push(Bucket { hash, key, value }); |
328 | } |
329 | |
330 | pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>) |
331 | where |
332 | K: Eq, |
333 | { |
334 | let eq = equivalent(&key, &self.entries); |
335 | let hasher = get_hash(&self.entries); |
336 | match self.indices.entry(hash.get(), eq, hasher) { |
337 | hash_table::Entry::Occupied(entry) => { |
338 | let i = *entry.get(); |
339 | (i, Some(mem::replace(&mut self.entries[i].value, value))) |
340 | } |
341 | hash_table::Entry::Vacant(entry) => { |
342 | let i = self.entries.len(); |
343 | entry.insert(i); |
344 | self.push_entry(hash, key, value); |
345 | debug_assert_eq!(self.indices.len(), self.entries.len()); |
346 | (i, None) |
347 | } |
348 | } |
349 | } |
350 | |
351 | /// Same as `insert_full`, except it also replaces the key |
352 | pub(crate) fn replace_full( |
353 | &mut self, |
354 | hash: HashValue, |
355 | key: K, |
356 | value: V, |
357 | ) -> (usize, Option<(K, V)>) |
358 | where |
359 | K: Eq, |
360 | { |
361 | let eq = equivalent(&key, &self.entries); |
362 | let hasher = get_hash(&self.entries); |
363 | match self.indices.entry(hash.get(), eq, hasher) { |
364 | hash_table::Entry::Occupied(entry) => { |
365 | let i = *entry.get(); |
366 | let entry = &mut self.entries[i]; |
367 | let kv = ( |
368 | mem::replace(&mut entry.key, key), |
369 | mem::replace(&mut entry.value, value), |
370 | ); |
371 | (i, Some(kv)) |
372 | } |
373 | hash_table::Entry::Vacant(entry) => { |
374 | let i = self.entries.len(); |
375 | entry.insert(i); |
376 | self.push_entry(hash, key, value); |
377 | debug_assert_eq!(self.indices.len(), self.entries.len()); |
378 | (i, None) |
379 | } |
380 | } |
381 | } |
382 | |
383 | /// Remove an entry by shifting all entries that follow it |
384 | pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
385 | where |
386 | Q: ?Sized + Equivalent<K>, |
387 | { |
388 | let eq = equivalent(key, &self.entries); |
389 | match self.indices.find_entry(hash.get(), eq) { |
390 | Ok(entry) => { |
391 | let (index, _) = entry.remove(); |
392 | let (key, value) = self.borrow_mut().shift_remove_finish(index); |
393 | Some((index, key, value)) |
394 | } |
395 | Err(_) => None, |
396 | } |
397 | } |
398 | |
399 | /// Remove an entry by shifting all entries that follow it |
400 | #[inline ] |
401 | pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
402 | self.borrow_mut().shift_remove_index(index) |
403 | } |
404 | |
405 | #[inline ] |
406 | pub(super) fn move_index(&mut self, from: usize, to: usize) { |
407 | self.borrow_mut().move_index(from, to); |
408 | } |
409 | |
410 | #[inline ] |
411 | pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { |
412 | self.borrow_mut().swap_indices(a, b); |
413 | } |
414 | |
415 | /// Remove an entry by swapping it with the last |
416 | pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
417 | where |
418 | Q: ?Sized + Equivalent<K>, |
419 | { |
420 | let eq = equivalent(key, &self.entries); |
421 | match self.indices.find_entry(hash.get(), eq) { |
422 | Ok(entry) => { |
423 | let (index, _) = entry.remove(); |
424 | let (key, value) = self.borrow_mut().swap_remove_finish(index); |
425 | Some((index, key, value)) |
426 | } |
427 | Err(_) => None, |
428 | } |
429 | } |
430 | |
431 | /// Remove an entry by swapping it with the last |
432 | #[inline ] |
433 | pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
434 | self.borrow_mut().swap_remove_index(index) |
435 | } |
436 | |
437 | /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` |
438 | /// |
439 | /// All of these items should still be at their original location in `entries`. |
440 | /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. |
441 | fn erase_indices(&mut self, start: usize, end: usize) { |
442 | let (init, shifted_entries) = self.entries.split_at(end); |
443 | let (start_entries, erased_entries) = init.split_at(start); |
444 | |
445 | let erased = erased_entries.len(); |
446 | let shifted = shifted_entries.len(); |
447 | let half_capacity = self.indices.capacity() / 2; |
448 | |
449 | // Use a heuristic between different strategies |
450 | if erased == 0 { |
451 | // Degenerate case, nothing to do |
452 | } else if start + shifted < half_capacity && start < erased { |
453 | // Reinsert everything, as there are few kept indices |
454 | self.indices.clear(); |
455 | |
456 | // Reinsert stable indices, then shifted indices |
457 | insert_bulk_no_grow(&mut self.indices, start_entries); |
458 | insert_bulk_no_grow(&mut self.indices, shifted_entries); |
459 | } else if erased + shifted < half_capacity { |
460 | // Find each affected index, as there are few to adjust |
461 | |
462 | // Find erased indices |
463 | for (i, entry) in (start..).zip(erased_entries) { |
464 | erase_index(&mut self.indices, entry.hash, i); |
465 | } |
466 | |
467 | // Find shifted indices |
468 | for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { |
469 | update_index(&mut self.indices, entry.hash, old, new); |
470 | } |
471 | } else { |
472 | // Sweep the whole table for adjustments |
473 | let offset = end - start; |
474 | self.indices.retain(move |i| { |
475 | if *i >= end { |
476 | *i -= offset; |
477 | true |
478 | } else { |
479 | *i < start |
480 | } |
481 | }); |
482 | } |
483 | |
484 | debug_assert_eq!(self.indices.len(), start + shifted); |
485 | } |
486 | |
487 | pub(crate) fn retain_in_order<F>(&mut self, mut keep: F) |
488 | where |
489 | F: FnMut(&mut K, &mut V) -> bool, |
490 | { |
491 | self.entries |
492 | .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); |
493 | if self.entries.len() < self.indices.len() { |
494 | self.rebuild_hash_table(); |
495 | } |
496 | } |
497 | |
498 | fn rebuild_hash_table(&mut self) { |
499 | self.indices.clear(); |
500 | insert_bulk_no_grow(&mut self.indices, &self.entries); |
501 | } |
502 | |
503 | pub(crate) fn reverse(&mut self) { |
504 | self.entries.reverse(); |
505 | |
506 | // No need to save hash indices, can easily calculate what they should |
507 | // be, given that this is an in-place reversal. |
508 | let len = self.entries.len(); |
509 | for i in &mut self.indices { |
510 | *i = len - *i - 1; |
511 | } |
512 | } |
513 | } |
514 | |
515 | impl<'a, K, V> RefMut<'a, K, V> { |
516 | #[inline ] |
517 | fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self { |
518 | Self { indices, entries } |
519 | } |
520 | |
521 | /// Reserve entries capacity, rounded up to match the indices |
522 | fn reserve_entries(&mut self, additional: usize) { |
523 | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
524 | // requested more, do it and let them have the resulting panic. |
525 | let new_capacity = Ord::min( |
526 | self.indices.capacity(), |
527 | IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY, |
528 | ); |
529 | let try_add = new_capacity - self.entries.len(); |
530 | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
531 | return; |
532 | } |
533 | self.entries.reserve_exact(additional); |
534 | } |
535 | |
536 | /// Insert a key-value pair in `entries`, |
537 | /// *without* checking whether it already exists. |
538 | fn insert_unique(mut self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> { |
539 | if self.entries.len() == self.entries.capacity() { |
540 | // Reserve our own capacity synced to the indices, |
541 | // rather than letting `Vec::push` just double it. |
542 | self.reserve_entries(1); |
543 | } |
544 | let i = self.indices.len(); |
545 | let entry = self |
546 | .indices |
547 | .insert_unique(hash.get(), i, get_hash(self.entries)); |
548 | debug_assert_eq!(i, self.entries.len()); |
549 | self.entries.push(Bucket { hash, key, value }); |
550 | OccupiedEntry::new(self.entries, entry) |
551 | } |
552 | |
553 | /// Insert a key-value pair in `entries` at a particular index, |
554 | /// *without* checking whether it already exists. |
555 | fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) { |
556 | let end = self.indices.len(); |
557 | assert!(index <= end); |
558 | // Increment others first so we don't have duplicate indices. |
559 | self.increment_indices(index, end); |
560 | let entries = &*self.entries; |
561 | self.indices.insert_unique(hash.get(), index, move |&i| { |
562 | // Adjust for the incremented indices to find hashes. |
563 | debug_assert_ne!(i, index); |
564 | let i = if i < index { i } else { i - 1 }; |
565 | entries[i].hash.get() |
566 | }); |
567 | if self.entries.len() == self.entries.capacity() { |
568 | // Reserve our own capacity synced to the indices, |
569 | // rather than letting `Vec::insert` just double it. |
570 | self.reserve_entries(1); |
571 | } |
572 | self.entries.insert(index, Bucket { hash, key, value }); |
573 | } |
574 | |
575 | /// Remove an entry by shifting all entries that follow it |
576 | fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
577 | match self.entries.get(index) { |
578 | Some(entry) => { |
579 | erase_index(self.indices, entry.hash, index); |
580 | Some(self.shift_remove_finish(index)) |
581 | } |
582 | None => None, |
583 | } |
584 | } |
585 | |
586 | /// Remove an entry by shifting all entries that follow it |
587 | /// |
588 | /// The index should already be removed from `self.indices`. |
589 | fn shift_remove_finish(&mut self, index: usize) -> (K, V) { |
590 | // Correct indices that point to the entries that followed the removed entry. |
591 | self.decrement_indices(index + 1, self.entries.len()); |
592 | |
593 | // Use Vec::remove to actually remove the entry. |
594 | let entry = self.entries.remove(index); |
595 | (entry.key, entry.value) |
596 | } |
597 | |
598 | /// Remove an entry by swapping it with the last |
599 | fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
600 | match self.entries.get(index) { |
601 | Some(entry) => { |
602 | erase_index(self.indices, entry.hash, index); |
603 | Some(self.swap_remove_finish(index)) |
604 | } |
605 | None => None, |
606 | } |
607 | } |
608 | |
609 | /// Finish removing an entry by swapping it with the last |
610 | /// |
611 | /// The index should already be removed from `self.indices`. |
612 | fn swap_remove_finish(&mut self, index: usize) -> (K, V) { |
613 | // use swap_remove, but then we need to update the index that points |
614 | // to the other entry that has to move |
615 | let entry = self.entries.swap_remove(index); |
616 | |
617 | // correct index that points to the entry that had to swap places |
618 | if let Some(entry) = self.entries.get(index) { |
619 | // was not last element |
620 | // examine new element in `index` and find it in indices |
621 | let last = self.entries.len(); |
622 | update_index(self.indices, entry.hash, last, index); |
623 | } |
624 | |
625 | (entry.key, entry.value) |
626 | } |
627 | |
628 | /// Decrement all indices in the range `start..end`. |
629 | /// |
630 | /// The index `start - 1` should not exist in `self.indices`. |
631 | /// All entries should still be in their original positions. |
632 | fn decrement_indices(&mut self, start: usize, end: usize) { |
633 | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
634 | let shifted_entries = &self.entries[start..end]; |
635 | if shifted_entries.len() > self.indices.capacity() / 2 { |
636 | // Shift all indices in range. |
637 | for i in &mut *self.indices { |
638 | if start <= *i && *i < end { |
639 | *i -= 1; |
640 | } |
641 | } |
642 | } else { |
643 | // Find each entry in range to shift its index. |
644 | for (i, entry) in (start..end).zip(shifted_entries) { |
645 | update_index(self.indices, entry.hash, i, i - 1); |
646 | } |
647 | } |
648 | } |
649 | |
650 | /// Increment all indices in the range `start..end`. |
651 | /// |
652 | /// The index `end` should not exist in `self.indices`. |
653 | /// All entries should still be in their original positions. |
654 | fn increment_indices(&mut self, start: usize, end: usize) { |
655 | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
656 | let shifted_entries = &self.entries[start..end]; |
657 | if shifted_entries.len() > self.indices.capacity() / 2 { |
658 | // Shift all indices in range. |
659 | for i in &mut *self.indices { |
660 | if start <= *i && *i < end { |
661 | *i += 1; |
662 | } |
663 | } |
664 | } else { |
665 | // Find each entry in range to shift its index, updated in reverse so |
666 | // we never have duplicated indices that might have a hash collision. |
667 | for (i, entry) in (start..end).zip(shifted_entries).rev() { |
668 | update_index(self.indices, entry.hash, i, i + 1); |
669 | } |
670 | } |
671 | } |
672 | |
673 | fn move_index(&mut self, from: usize, to: usize) { |
674 | let from_hash = self.entries[from].hash; |
675 | let _ = self.entries[to]; // explicit bounds check |
676 | if from != to { |
677 | // Use a sentinel index so other indices don't collide. |
678 | update_index(self.indices, from_hash, from, usize::MAX); |
679 | |
680 | // Update all other indices and rotate the entry positions. |
681 | if from < to { |
682 | self.decrement_indices(from + 1, to + 1); |
683 | self.entries[from..=to].rotate_left(1); |
684 | } else if to < from { |
685 | self.increment_indices(to, from); |
686 | self.entries[to..=from].rotate_right(1); |
687 | } |
688 | |
689 | // Change the sentinel index to its final position. |
690 | update_index(self.indices, from_hash, usize::MAX, to); |
691 | } |
692 | } |
693 | |
694 | fn swap_indices(&mut self, a: usize, b: usize) { |
695 | // If they're equal and in-bounds, there's nothing to do. |
696 | if a == b && a < self.entries.len() { |
697 | return; |
698 | } |
699 | |
700 | // We'll get a "nice" bounds-check from indexing `entries`, |
701 | // and then we expect to find it in the table as well. |
702 | match self.indices.get_many_mut( |
703 | [self.entries[a].hash.get(), self.entries[b].hash.get()], |
704 | move |i, &x| if i == 0 { x == a } else { x == b }, |
705 | ) { |
706 | [Some(ref_a), Some(ref_b)] => { |
707 | mem::swap(ref_a, ref_b); |
708 | self.entries.swap(a, b); |
709 | } |
710 | _ => panic!("indices not found" ), |
711 | } |
712 | } |
713 | } |
714 | |
715 | #[test ] |
716 | fn assert_send_sync() { |
717 | fn assert_send_sync<T: Send + Sync>() {} |
718 | assert_send_sync::<IndexMapCore<i32, i32>>(); |
719 | assert_send_sync::<Entry<'_, i32, i32>>(); |
720 | assert_send_sync::<IndexedEntry<'_, i32, i32>>(); |
721 | assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>(); |
722 | } |
723 | |