1 | //! This is the core implementation that doesn't depend on the hasher at all. |
2 | //! |
3 | //! The methods of `IndexMapCore` don't use any Hash properties of K. |
4 | //! |
5 | //! It's cleaner to separate them out, then the compiler checks that we are not |
6 | //! using Hash at all in these methods. |
7 | //! |
8 | //! However, we should probably not let this show in the public API or docs. |
9 | |
10 | mod entry; |
11 | mod raw; |
12 | |
13 | pub mod raw_entry_v1; |
14 | |
15 | use hashbrown::raw::RawTable; |
16 | |
17 | use crate::vec::{self, Vec}; |
18 | use crate::TryReserveError; |
19 | use core::mem; |
20 | use core::ops::RangeBounds; |
21 | |
22 | use crate::util::simplify_range; |
23 | use crate::{Bucket, Entries, Equivalent, HashValue}; |
24 | |
25 | pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; |
26 | |
27 | /// Core of the map that does not depend on S |
28 | pub(crate) struct IndexMapCore<K, V> { |
29 | /// indices mapping from the entry hash to its index. |
30 | indices: RawTable<usize>, |
31 | /// entries is a dense vec of entries in their order. |
32 | entries: Vec<Bucket<K, V>>, |
33 | } |
34 | |
35 | #[inline (always)] |
36 | fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ { |
37 | move |&i: usize| entries[i].hash.get() |
38 | } |
39 | |
40 | #[inline ] |
41 | fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>( |
42 | key: &'a Q, |
43 | entries: &'a [Bucket<K, V>], |
44 | ) -> impl Fn(&usize) -> bool + 'a { |
45 | move |&i: usize| Q::equivalent(self:key, &entries[i].key) |
46 | } |
47 | |
48 | #[inline ] |
49 | fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) { |
50 | let erased: bool = table.erase_entry(hash:hash.get(), eq:move |&i: usize| i == index); |
51 | debug_assert!(erased); |
52 | } |
53 | |
54 | #[inline ] |
55 | fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) { |
56 | let index: &mut usize = table |
57 | .get_mut(hash.get(), move |&i| i == old) |
58 | .expect(msg:"index not found" ); |
59 | *index = new; |
60 | } |
61 | |
62 | impl<K, V> Clone for IndexMapCore<K, V> |
63 | where |
64 | K: Clone, |
65 | V: Clone, |
66 | { |
67 | fn clone(&self) -> Self { |
68 | let mut new: IndexMapCore = Self::new(); |
69 | new.clone_from(self); |
70 | new |
71 | } |
72 | |
73 | fn clone_from(&mut self, other: &Self) { |
74 | let hasher: impl Fn(&usize) -> u64 = get_hash(&other.entries); |
75 | self.indices.clone_from_with_hasher(&other.indices, hasher); |
76 | if self.entries.capacity() < other.entries.len() { |
77 | // If we must resize, match the indices capacity. |
78 | let additional: usize = other.entries.len() - self.entries.len(); |
79 | self.reserve_entries(additional); |
80 | } |
81 | self.entries.clone_from(&other.entries); |
82 | } |
83 | } |
84 | |
85 | #[cfg (feature = "test_debug" )] |
86 | impl<K, V> core::fmt::Debug for IndexMapCore<K, V> |
87 | where |
88 | K: core::fmt::Debug, |
89 | V: core::fmt::Debug, |
90 | { |
91 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
92 | f.debug_struct("IndexMapCore" ) |
93 | .field("indices" , &raw::DebugIndices(&self.indices)) |
94 | .field("entries" , &self.entries) |
95 | .finish() |
96 | } |
97 | } |
98 | |
99 | impl<K, V> Entries for IndexMapCore<K, V> { |
100 | type Entry = Bucket<K, V>; |
101 | |
102 | #[inline ] |
103 | fn into_entries(self) -> Vec<Self::Entry> { |
104 | self.entries |
105 | } |
106 | |
107 | #[inline ] |
108 | fn as_entries(&self) -> &[Self::Entry] { |
109 | &self.entries |
110 | } |
111 | |
112 | #[inline ] |
113 | fn as_entries_mut(&mut self) -> &mut [Self::Entry] { |
114 | &mut self.entries |
115 | } |
116 | |
117 | fn with_entries<F>(&mut self, f: F) |
118 | where |
119 | F: FnOnce(&mut [Self::Entry]), |
120 | { |
121 | f(&mut self.entries); |
122 | self.rebuild_hash_table(); |
123 | } |
124 | } |
125 | |
126 | impl<K, V> IndexMapCore<K, V> { |
127 | /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. |
128 | const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>(); |
129 | |
130 | #[inline ] |
131 | pub(crate) const fn new() -> Self { |
132 | IndexMapCore { |
133 | indices: RawTable::new(), |
134 | entries: Vec::new(), |
135 | } |
136 | } |
137 | |
138 | #[inline ] |
139 | pub(crate) fn with_capacity(n: usize) -> Self { |
140 | IndexMapCore { |
141 | indices: RawTable::with_capacity(n), |
142 | entries: Vec::with_capacity(n), |
143 | } |
144 | } |
145 | |
146 | #[inline ] |
147 | pub(crate) fn len(&self) -> usize { |
148 | self.indices.len() |
149 | } |
150 | |
151 | #[inline ] |
152 | pub(crate) fn capacity(&self) -> usize { |
153 | Ord::min(self.indices.capacity(), self.entries.capacity()) |
154 | } |
155 | |
156 | pub(crate) fn clear(&mut self) { |
157 | self.indices.clear(); |
158 | self.entries.clear(); |
159 | } |
160 | |
161 | pub(crate) fn truncate(&mut self, len: usize) { |
162 | if len < self.len() { |
163 | self.erase_indices(len, self.entries.len()); |
164 | self.entries.truncate(len); |
165 | } |
166 | } |
167 | |
168 | pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>> |
169 | where |
170 | R: RangeBounds<usize>, |
171 | { |
172 | let range = simplify_range(range, self.entries.len()); |
173 | self.erase_indices(range.start, range.end); |
174 | self.entries.drain(range) |
175 | } |
176 | |
177 | #[cfg (feature = "rayon" )] |
178 | pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>> |
179 | where |
180 | K: Send, |
181 | V: Send, |
182 | R: RangeBounds<usize>, |
183 | { |
184 | use rayon::iter::ParallelDrainRange; |
185 | let range = simplify_range(range, self.entries.len()); |
186 | self.erase_indices(range.start, range.end); |
187 | self.entries.par_drain(range) |
188 | } |
189 | |
190 | pub(crate) fn split_off(&mut self, at: usize) -> Self { |
191 | assert!(at <= self.entries.len()); |
192 | self.erase_indices(at, self.entries.len()); |
193 | let entries = self.entries.split_off(at); |
194 | |
195 | let mut indices = RawTable::with_capacity(entries.len()); |
196 | raw::insert_bulk_no_grow(&mut indices, &entries); |
197 | Self { indices, entries } |
198 | } |
199 | |
200 | pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>) |
201 | where |
202 | R: RangeBounds<usize>, |
203 | { |
204 | let range = simplify_range(range, self.len()); |
205 | self.erase_indices(range.start, self.entries.len()); |
206 | let entries = self.entries.split_off(range.end); |
207 | let drained = self.entries.split_off(range.start); |
208 | |
209 | let mut indices = RawTable::with_capacity(entries.len()); |
210 | raw::insert_bulk_no_grow(&mut indices, &entries); |
211 | (Self { indices, entries }, drained.into_iter()) |
212 | } |
213 | |
214 | /// Append from another map without checking whether items already exist. |
215 | pub(crate) fn append_unchecked(&mut self, other: &mut Self) { |
216 | self.reserve(other.len()); |
217 | raw::insert_bulk_no_grow(&mut self.indices, &other.entries); |
218 | self.entries.append(&mut other.entries); |
219 | other.indices.clear(); |
220 | } |
221 | |
222 | /// Reserve capacity for `additional` more key-value pairs. |
223 | pub(crate) fn reserve(&mut self, additional: usize) { |
224 | self.indices.reserve(additional, get_hash(&self.entries)); |
225 | // Only grow entries if necessary, since we also round up capacity. |
226 | if additional > self.entries.capacity() - self.entries.len() { |
227 | self.reserve_entries(additional); |
228 | } |
229 | } |
230 | |
231 | /// Reserve entries capacity, rounded up to match the indices |
232 | fn reserve_entries(&mut self, additional: usize) { |
233 | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
234 | // requested more, do it and let them have the resulting panic. |
235 | let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); |
236 | let try_add = new_capacity - self.entries.len(); |
237 | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
238 | return; |
239 | } |
240 | self.entries.reserve_exact(additional); |
241 | } |
242 | |
243 | /// Reserve capacity for `additional` more key-value pairs, without over-allocating. |
244 | pub(crate) fn reserve_exact(&mut self, additional: usize) { |
245 | self.indices.reserve(additional, get_hash(&self.entries)); |
246 | self.entries.reserve_exact(additional); |
247 | } |
248 | |
249 | /// Try to reserve capacity for `additional` more key-value pairs. |
250 | pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { |
251 | self.indices |
252 | .try_reserve(additional, get_hash(&self.entries)) |
253 | .map_err(TryReserveError::from_hashbrown)?; |
254 | // Only grow entries if necessary, since we also round up capacity. |
255 | if additional > self.entries.capacity() - self.entries.len() { |
256 | self.try_reserve_entries(additional) |
257 | } else { |
258 | Ok(()) |
259 | } |
260 | } |
261 | |
262 | /// Try to reserve entries capacity, rounded up to match the indices |
263 | fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { |
264 | // Use a soft-limit on the maximum capacity, but if the caller explicitly |
265 | // requested more, do it and let them have the resulting error. |
266 | let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); |
267 | let try_add = new_capacity - self.entries.len(); |
268 | if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { |
269 | return Ok(()); |
270 | } |
271 | self.entries |
272 | .try_reserve_exact(additional) |
273 | .map_err(TryReserveError::from_alloc) |
274 | } |
275 | |
276 | /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. |
277 | pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { |
278 | self.indices |
279 | .try_reserve(additional, get_hash(&self.entries)) |
280 | .map_err(TryReserveError::from_hashbrown)?; |
281 | self.entries |
282 | .try_reserve_exact(additional) |
283 | .map_err(TryReserveError::from_alloc) |
284 | } |
285 | |
286 | /// Shrink the capacity of the map with a lower bound |
287 | pub(crate) fn shrink_to(&mut self, min_capacity: usize) { |
288 | self.indices |
289 | .shrink_to(min_capacity, get_hash(&self.entries)); |
290 | self.entries.shrink_to(min_capacity); |
291 | } |
292 | |
293 | /// Remove the last key-value pair |
294 | pub(crate) fn pop(&mut self) -> Option<(K, V)> { |
295 | if let Some(entry) = self.entries.pop() { |
296 | let last = self.entries.len(); |
297 | erase_index(&mut self.indices, entry.hash, last); |
298 | Some((entry.key, entry.value)) |
299 | } else { |
300 | None |
301 | } |
302 | } |
303 | |
304 | /// Append a key-value pair to `entries`, *without* checking whether it already exists. |
305 | fn push_entry(&mut self, hash: HashValue, key: K, value: V) { |
306 | if self.entries.len() == self.entries.capacity() { |
307 | // Reserve our own capacity synced to the indices, |
308 | // rather than letting `Vec::push` just double it. |
309 | self.reserve_entries(1); |
310 | } |
311 | self.entries.push(Bucket { hash, key, value }); |
312 | } |
313 | |
314 | /// Insert a key-value pair in `entries` at a particular index, |
315 | /// *without* checking whether it already exists. |
316 | fn insert_entry(&mut self, index: usize, hash: HashValue, key: K, value: V) { |
317 | if self.entries.len() == self.entries.capacity() { |
318 | // Reserve our own capacity synced to the indices, |
319 | // rather than letting `Vec::insert` just double it. |
320 | self.reserve_entries(1); |
321 | } |
322 | self.entries.insert(index, Bucket { hash, key, value }); |
323 | } |
324 | |
325 | /// Return the index in `entries` where an equivalent key can be found |
326 | pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize> |
327 | where |
328 | Q: ?Sized + Equivalent<K>, |
329 | { |
330 | let eq = equivalent(key, &self.entries); |
331 | self.indices.get(hash.get(), eq).copied() |
332 | } |
333 | |
334 | pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>) |
335 | where |
336 | K: Eq, |
337 | { |
338 | match self.find_or_insert(hash, &key) { |
339 | Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))), |
340 | Err(i) => { |
341 | debug_assert_eq!(i, self.entries.len()); |
342 | self.push_entry(hash, key, value); |
343 | (i, None) |
344 | } |
345 | } |
346 | } |
347 | |
348 | /// Same as `insert_full`, except it also replaces the key |
349 | pub(crate) fn replace_full( |
350 | &mut self, |
351 | hash: HashValue, |
352 | key: K, |
353 | value: V, |
354 | ) -> (usize, Option<(K, V)>) |
355 | where |
356 | K: Eq, |
357 | { |
358 | match self.find_or_insert(hash, &key) { |
359 | Ok(i) => { |
360 | let entry = &mut self.entries[i]; |
361 | let kv = ( |
362 | mem::replace(&mut entry.key, key), |
363 | mem::replace(&mut entry.value, value), |
364 | ); |
365 | (i, Some(kv)) |
366 | } |
367 | Err(i) => { |
368 | debug_assert_eq!(i, self.entries.len()); |
369 | self.push_entry(hash, key, value); |
370 | (i, None) |
371 | } |
372 | } |
373 | } |
374 | |
375 | fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> usize { |
376 | let i = self.indices.len(); |
377 | self.indices.insert(hash.get(), i, get_hash(&self.entries)); |
378 | debug_assert_eq!(i, self.entries.len()); |
379 | self.push_entry(hash, key, value); |
380 | i |
381 | } |
382 | |
383 | fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) { |
384 | let end = self.indices.len(); |
385 | assert!(index <= end); |
386 | // Increment others first so we don't have duplicate indices. |
387 | self.increment_indices(index, end); |
388 | let entries = &*self.entries; |
389 | self.indices.insert(hash.get(), index, move |&i| { |
390 | // Adjust for the incremented indices to find hashes. |
391 | debug_assert_ne!(i, index); |
392 | let i = if i < index { i } else { i - 1 }; |
393 | entries[i].hash.get() |
394 | }); |
395 | self.insert_entry(index, hash, key, value); |
396 | } |
397 | |
398 | /// Remove an entry by shifting all entries that follow it |
399 | pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
400 | where |
401 | Q: ?Sized + Equivalent<K>, |
402 | { |
403 | let eq = equivalent(key, &self.entries); |
404 | match self.indices.remove_entry(hash.get(), eq) { |
405 | Some(index) => { |
406 | let (key, value) = self.shift_remove_finish(index); |
407 | Some((index, key, value)) |
408 | } |
409 | None => None, |
410 | } |
411 | } |
412 | |
413 | /// Remove an entry by shifting all entries that follow it |
414 | pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
415 | match self.entries.get(index) { |
416 | Some(entry) => { |
417 | erase_index(&mut self.indices, entry.hash, index); |
418 | Some(self.shift_remove_finish(index)) |
419 | } |
420 | None => None, |
421 | } |
422 | } |
423 | |
424 | /// Remove an entry by shifting all entries that follow it |
425 | /// |
426 | /// The index should already be removed from `self.indices`. |
427 | fn shift_remove_finish(&mut self, index: usize) -> (K, V) { |
428 | // Correct indices that point to the entries that followed the removed entry. |
429 | self.decrement_indices(index + 1, self.entries.len()); |
430 | |
431 | // Use Vec::remove to actually remove the entry. |
432 | let entry = self.entries.remove(index); |
433 | (entry.key, entry.value) |
434 | } |
435 | |
436 | /// Decrement all indices in the range `start..end`. |
437 | /// |
438 | /// The index `start - 1` should not exist in `self.indices`. |
439 | /// All entries should still be in their original positions. |
440 | fn decrement_indices(&mut self, start: usize, end: usize) { |
441 | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
442 | let shifted_entries = &self.entries[start..end]; |
443 | if shifted_entries.len() > self.indices.buckets() / 2 { |
444 | // Shift all indices in range. |
445 | for i in self.indices_mut() { |
446 | if start <= *i && *i < end { |
447 | *i -= 1; |
448 | } |
449 | } |
450 | } else { |
451 | // Find each entry in range to shift its index. |
452 | for (i, entry) in (start..end).zip(shifted_entries) { |
453 | update_index(&mut self.indices, entry.hash, i, i - 1); |
454 | } |
455 | } |
456 | } |
457 | |
458 | /// Increment all indices in the range `start..end`. |
459 | /// |
460 | /// The index `end` should not exist in `self.indices`. |
461 | /// All entries should still be in their original positions. |
462 | fn increment_indices(&mut self, start: usize, end: usize) { |
463 | // Use a heuristic between a full sweep vs. a `find()` for every shifted item. |
464 | let shifted_entries = &self.entries[start..end]; |
465 | if shifted_entries.len() > self.indices.buckets() / 2 { |
466 | // Shift all indices in range. |
467 | for i in self.indices_mut() { |
468 | if start <= *i && *i < end { |
469 | *i += 1; |
470 | } |
471 | } |
472 | } else { |
473 | // Find each entry in range to shift its index, updated in reverse so |
474 | // we never have duplicated indices that might have a hash collision. |
475 | for (i, entry) in (start..end).zip(shifted_entries).rev() { |
476 | update_index(&mut self.indices, entry.hash, i, i + 1); |
477 | } |
478 | } |
479 | } |
480 | |
481 | pub(super) fn move_index(&mut self, from: usize, to: usize) { |
482 | let from_hash = self.entries[from].hash; |
483 | if from != to { |
484 | // Use a sentinel index so other indices don't collide. |
485 | update_index(&mut self.indices, from_hash, from, usize::MAX); |
486 | |
487 | // Update all other indices and rotate the entry positions. |
488 | if from < to { |
489 | self.decrement_indices(from + 1, to + 1); |
490 | self.entries[from..=to].rotate_left(1); |
491 | } else if to < from { |
492 | self.increment_indices(to, from); |
493 | self.entries[to..=from].rotate_right(1); |
494 | } |
495 | |
496 | // Change the sentinel index to its final position. |
497 | update_index(&mut self.indices, from_hash, usize::MAX, to); |
498 | } |
499 | } |
500 | |
501 | pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { |
502 | // If they're equal and in-bounds, there's nothing to do. |
503 | if a == b && a < self.entries.len() { |
504 | return; |
505 | } |
506 | |
507 | // We'll get a "nice" bounds-check from indexing `self.entries`, |
508 | // and then we expect to find it in the table as well. |
509 | let [ref_a, ref_b] = self |
510 | .indices |
511 | .get_many_mut( |
512 | [self.entries[a].hash.get(), self.entries[b].hash.get()], |
513 | move |i, &x| if i == 0 { x == a } else { x == b }, |
514 | ) |
515 | .expect("indices not found" ); |
516 | |
517 | mem::swap(ref_a, ref_b); |
518 | self.entries.swap(a, b); |
519 | } |
520 | |
521 | /// Remove an entry by swapping it with the last |
522 | pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> |
523 | where |
524 | Q: ?Sized + Equivalent<K>, |
525 | { |
526 | let eq = equivalent(key, &self.entries); |
527 | match self.indices.remove_entry(hash.get(), eq) { |
528 | Some(index) => { |
529 | let (key, value) = self.swap_remove_finish(index); |
530 | Some((index, key, value)) |
531 | } |
532 | None => None, |
533 | } |
534 | } |
535 | |
536 | /// Remove an entry by swapping it with the last |
537 | pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { |
538 | match self.entries.get(index) { |
539 | Some(entry) => { |
540 | erase_index(&mut self.indices, entry.hash, index); |
541 | Some(self.swap_remove_finish(index)) |
542 | } |
543 | None => None, |
544 | } |
545 | } |
546 | |
547 | /// Finish removing an entry by swapping it with the last |
548 | /// |
549 | /// The index should already be removed from `self.indices`. |
550 | fn swap_remove_finish(&mut self, index: usize) -> (K, V) { |
551 | // use swap_remove, but then we need to update the index that points |
552 | // to the other entry that has to move |
553 | let entry = self.entries.swap_remove(index); |
554 | |
555 | // correct index that points to the entry that had to swap places |
556 | if let Some(entry) = self.entries.get(index) { |
557 | // was not last element |
558 | // examine new element in `index` and find it in indices |
559 | let last = self.entries.len(); |
560 | update_index(&mut self.indices, entry.hash, last, index); |
561 | } |
562 | |
563 | (entry.key, entry.value) |
564 | } |
565 | |
566 | /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` |
567 | /// |
568 | /// All of these items should still be at their original location in `entries`. |
569 | /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. |
570 | fn erase_indices(&mut self, start: usize, end: usize) { |
571 | let (init, shifted_entries) = self.entries.split_at(end); |
572 | let (start_entries, erased_entries) = init.split_at(start); |
573 | |
574 | let erased = erased_entries.len(); |
575 | let shifted = shifted_entries.len(); |
576 | let half_capacity = self.indices.buckets() / 2; |
577 | |
578 | // Use a heuristic between different strategies |
579 | if erased == 0 { |
580 | // Degenerate case, nothing to do |
581 | } else if start + shifted < half_capacity && start < erased { |
582 | // Reinsert everything, as there are few kept indices |
583 | self.indices.clear(); |
584 | |
585 | // Reinsert stable indices, then shifted indices |
586 | raw::insert_bulk_no_grow(&mut self.indices, start_entries); |
587 | raw::insert_bulk_no_grow(&mut self.indices, shifted_entries); |
588 | } else if erased + shifted < half_capacity { |
589 | // Find each affected index, as there are few to adjust |
590 | |
591 | // Find erased indices |
592 | for (i, entry) in (start..).zip(erased_entries) { |
593 | erase_index(&mut self.indices, entry.hash, i); |
594 | } |
595 | |
596 | // Find shifted indices |
597 | for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { |
598 | update_index(&mut self.indices, entry.hash, old, new); |
599 | } |
600 | } else { |
601 | // Sweep the whole table for adjustments |
602 | self.erase_indices_sweep(start, end); |
603 | } |
604 | |
605 | debug_assert_eq!(self.indices.len(), start + shifted); |
606 | } |
607 | |
608 | pub(crate) fn retain_in_order<F>(&mut self, mut keep: F) |
609 | where |
610 | F: FnMut(&mut K, &mut V) -> bool, |
611 | { |
612 | self.entries |
613 | .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); |
614 | if self.entries.len() < self.indices.len() { |
615 | self.rebuild_hash_table(); |
616 | } |
617 | } |
618 | |
619 | fn rebuild_hash_table(&mut self) { |
620 | self.indices.clear(); |
621 | raw::insert_bulk_no_grow(&mut self.indices, &self.entries); |
622 | } |
623 | |
624 | pub(crate) fn reverse(&mut self) { |
625 | self.entries.reverse(); |
626 | |
627 | // No need to save hash indices, can easily calculate what they should |
628 | // be, given that this is an in-place reversal. |
629 | let len = self.entries.len(); |
630 | for i in self.indices_mut() { |
631 | *i = len - *i - 1; |
632 | } |
633 | } |
634 | } |
635 | |
636 | #[test ] |
637 | fn assert_send_sync() { |
638 | fn assert_send_sync<T: Send + Sync>() {} |
639 | assert_send_sync::<IndexMapCore<i32, i32>>(); |
640 | assert_send_sync::<Entry<'_, i32, i32>>(); |
641 | assert_send_sync::<IndexedEntry<'_, i32, i32>>(); |
642 | } |
643 | |