1 | #![allow (unsafe_code)] |
2 | //! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`, |
3 | //! mostly in dealing with its bucket "pointers". |
4 | |
5 | use super::{equivalent, get_hash, Bucket, HashValue, IndexMapCore}; |
6 | use hashbrown::raw::RawTable; |
7 | |
8 | type RawBucket = hashbrown::raw::Bucket<usize>; |
9 | |
10 | /// Inserts many entries into a raw table without reallocating. |
11 | /// |
12 | /// ***Panics*** if there is not sufficient capacity already. |
13 | pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) { |
14 | assert!(indices.capacity() - indices.len() >= entries.len()); |
15 | for entry: &Bucket in entries { |
16 | // SAFETY: we asserted that sufficient capacity exists for all entries. |
17 | unsafe { |
18 | indices.insert_no_grow(hash:entry.hash.get(), value:indices.len()); |
19 | } |
20 | } |
21 | } |
22 | |
23 | #[cfg (feature = "test_debug" )] |
24 | pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>); |
25 | |
26 | #[cfg (feature = "test_debug" )] |
27 | impl core::fmt::Debug for DebugIndices<'_> { |
28 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
29 | // SAFETY: we're not letting any of the buckets escape this function |
30 | let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) }; |
31 | f.debug_list().entries(indices).finish() |
32 | } |
33 | } |
34 | |
35 | impl<K, V> IndexMapCore<K, V> { |
36 | /// Sweep the whole table to erase indices start..end |
37 | pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) { |
38 | // SAFETY: we're not letting any of the buckets escape this function |
39 | unsafe { |
40 | let offset = end - start; |
41 | for bucket in self.indices.iter() { |
42 | let i = bucket.as_mut(); |
43 | if *i >= end { |
44 | *i -= offset; |
45 | } else if *i >= start { |
46 | self.indices.erase(bucket); |
47 | } |
48 | } |
49 | } |
50 | } |
51 | |
52 | /// Search for a key in the table and return `Ok(entry_index)` if found. |
53 | /// Otherwise, insert the key and return `Err(new_index)`. |
54 | /// |
55 | /// Note that hashbrown may resize the table to reserve space for insertion, |
56 | /// even before checking if it's already present, so this is somewhat biased |
57 | /// towards new items. |
58 | pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize> |
59 | where |
60 | K: Eq, |
61 | { |
62 | let hash = hash.get(); |
63 | let eq = equivalent(key, &self.entries); |
64 | let hasher = get_hash(&self.entries); |
65 | // SAFETY: We're not mutating between find and read/insert. |
66 | unsafe { |
67 | match self.indices.find_or_find_insert_slot(hash, eq, hasher) { |
68 | Ok(raw_bucket) => Ok(*raw_bucket.as_ref()), |
69 | Err(slot) => { |
70 | let index = self.indices.len(); |
71 | self.indices.insert_in_slot(hash, slot, index); |
72 | Err(index) |
73 | } |
74 | } |
75 | } |
76 | } |
77 | |
78 | pub(super) fn raw_entry( |
79 | &mut self, |
80 | hash: HashValue, |
81 | mut is_match: impl FnMut(&K) -> bool, |
82 | ) -> Result<RawTableEntry<'_, K, V>, &mut Self> { |
83 | let entries = &*self.entries; |
84 | let eq = move |&i: &usize| is_match(&entries[i].key); |
85 | match self.indices.find(hash.get(), eq) { |
86 | // SAFETY: The entry is created with a live raw bucket, at the same time |
87 | // we have a &mut reference to the map, so it can not be modified further. |
88 | Some(raw_bucket) => Ok(RawTableEntry { |
89 | map: self, |
90 | raw_bucket, |
91 | }), |
92 | None => Err(self), |
93 | } |
94 | } |
95 | |
96 | pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> { |
97 | // SAFETY: we're not letting any of the buckets escape this function, |
98 | // only the item references that are appropriately bound to `&mut self`. |
99 | unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) } |
100 | } |
101 | } |
102 | |
103 | /// A view into an occupied raw entry in an `IndexMap`. |
104 | // SAFETY: The lifetime of the map reference also constrains the raw bucket, |
105 | // which is essentially a raw pointer into the map indices. |
106 | pub(super) struct RawTableEntry<'a, K, V> { |
107 | map: &'a mut IndexMapCore<K, V>, |
108 | raw_bucket: RawBucket, |
109 | } |
110 | |
111 | // `hashbrown::raw::Bucket` is only `Send`, not `Sync`. |
112 | // SAFETY: `&self` only accesses the bucket to read it. |
113 | unsafe impl<K: Sync, V: Sync> Sync for RawTableEntry<'_, K, V> {} |
114 | |
115 | impl<'a, K, V> RawTableEntry<'a, K, V> { |
116 | /// Return the index of the key-value pair |
117 | #[inline ] |
118 | pub(super) fn index(&self) -> usize { |
119 | // SAFETY: we have `&mut map` keeping the bucket stable |
120 | unsafe { *self.raw_bucket.as_ref() } |
121 | } |
122 | |
123 | #[inline ] |
124 | pub(super) fn bucket(&self) -> &Bucket<K, V> { |
125 | &self.map.entries[self.index()] |
126 | } |
127 | |
128 | #[inline ] |
129 | pub(super) fn bucket_mut(&mut self) -> &mut Bucket<K, V> { |
130 | let index = self.index(); |
131 | &mut self.map.entries[index] |
132 | } |
133 | |
134 | #[inline ] |
135 | pub(super) fn into_bucket(self) -> &'a mut Bucket<K, V> { |
136 | let index = self.index(); |
137 | &mut self.map.entries[index] |
138 | } |
139 | |
140 | /// Remove the index from indices, leaving the actual entries to the caller. |
141 | pub(super) fn remove_index(self) -> (&'a mut IndexMapCore<K, V>, usize) { |
142 | // SAFETY: This is safe because it can only happen once (self is consumed) |
143 | // and map.indices have not been modified since entry construction |
144 | let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) }; |
145 | (self.map, index) |
146 | } |
147 | |
148 | /// Take no action, just return the index and the original map reference. |
149 | pub(super) fn into_inner(self) -> (&'a mut IndexMapCore<K, V>, usize) { |
150 | let index = self.index(); |
151 | (self.map, index) |
152 | } |
153 | } |
154 | |