1use crate::alloc::alloc::{handle_alloc_error, Layout};
2use crate::scopeguard::{guard, ScopeGuard};
3use crate::TryReserveError;
4use core::iter::FusedIterator;
5use core::marker::PhantomData;
6use core::mem;
7use core::mem::MaybeUninit;
8use core::ptr::NonNull;
9use core::{hint, ptr};
10
11cfg_if! {
12 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
13 // at once instead of 8. We don't bother with AVX since it would require
14 // runtime dispatch and wouldn't gain us much anyways: the probability of
15 // finding a match drops off drastically after the first few buckets.
16 //
17 // I attempted an implementation on ARM using NEON instructions, but it
18 // turns out that most NEON instructions have multi-cycle latency, which in
19 // the end outweighs any gains over the generic implementation.
20 if #[cfg(all(
21 target_feature = "sse2",
22 any(target_arch = "x86", target_arch = "x86_64"),
23 not(miri),
24 ))] {
25 mod sse2;
26 use sse2 as imp;
27 } else if #[cfg(all(
28 target_arch = "aarch64",
29 target_feature = "neon",
30 // NEON intrinsics are currently broken on big-endian targets.
31 // See https://github.com/rust-lang/stdarch/issues/1484.
32 target_endian = "little",
33 not(miri),
34 ))] {
35 mod neon;
36 use neon as imp;
37 } else {
38 mod generic;
39 use generic as imp;
40 }
41}
42
43mod alloc;
44pub(crate) use self::alloc::{do_alloc, Allocator, Global};
45
46mod bitmask;
47
48use self::bitmask::BitMaskIter;
49use self::imp::Group;
50
51// Branch prediction hint. This is currently only available on nightly but it
52// consistently improves performance by 10-15%.
53#[cfg(not(feature = "nightly"))]
54use core::convert::identity as likely;
55#[cfg(not(feature = "nightly"))]
56use core::convert::identity as unlikely;
57#[cfg(feature = "nightly")]
58use core::intrinsics::{likely, unlikely};
59
60// FIXME: use strict provenance functions once they are stable.
61// Implement it with a transmute for now.
62#[inline(always)]
63#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here
64fn invalid_mut<T>(addr: usize) -> *mut T {
65 unsafe { core::mem::transmute(src:addr) }
66}
67
68#[inline]
69unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
70 to.offset_from(origin:from) as usize
71}
72
73/// Whether memory allocation errors should return an error or abort.
74#[derive(Copy, Clone)]
75enum Fallibility {
76 Fallible,
77 Infallible,
78}
79
80impl Fallibility {
81 /// Error to return on capacity overflow.
82 #[cfg_attr(feature = "inline-more", inline)]
83 fn capacity_overflow(self) -> TryReserveError {
84 match self {
85 Fallibility::Fallible => TryReserveError::CapacityOverflow,
86 Fallibility::Infallible => panic!("Hash table capacity overflow"),
87 }
88 }
89
90 /// Error to return on allocation error.
91 #[cfg_attr(feature = "inline-more", inline)]
92 fn alloc_err(self, layout: Layout) -> TryReserveError {
93 match self {
94 Fallibility::Fallible => TryReserveError::AllocError { layout },
95 Fallibility::Infallible => handle_alloc_error(layout),
96 }
97 }
98}
99
100trait SizedTypeProperties: Sized {
101 const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
102 const NEEDS_DROP: bool = mem::needs_drop::<Self>();
103}
104
105impl<T> SizedTypeProperties for T {}
106
107/// Control byte value for an empty bucket.
108const EMPTY: u8 = 0b1111_1111;
109
110/// Control byte value for a deleted bucket.
111const DELETED: u8 = 0b1000_0000;
112
113/// Checks whether a control byte represents a full bucket (top bit is clear).
114#[inline]
115fn is_full(ctrl: u8) -> bool {
116 ctrl & 0x80 == 0
117}
118
119/// Checks whether a control byte represents a special value (top bit is set).
120#[inline]
121fn is_special(ctrl: u8) -> bool {
122 ctrl & 0x80 != 0
123}
124
125/// Checks whether a special control value is EMPTY (just check 1 bit).
126#[inline]
127fn special_is_empty(ctrl: u8) -> bool {
128 debug_assert!(is_special(ctrl));
129 ctrl & 0x01 != 0
130}
131
132/// Primary hash function, used to select the initial bucket to probe from.
133#[inline]
134#[allow(clippy::cast_possible_truncation)]
135fn h1(hash: u64) -> usize {
136 // On 32-bit platforms we simply ignore the higher hash bits.
137 hash as usize
138}
139
140// Constant for h2 function that grabing the top 7 bits of the hash.
141const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
142 mem::size_of::<usize>()
143} else {
144 mem::size_of::<u64>()
145};
146
147/// Secondary hash function, saved in the low 7 bits of the control byte.
148#[inline]
149#[allow(clippy::cast_possible_truncation)]
150fn h2(hash: u64) -> u8 {
151 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
152 // value, some hash functions (such as FxHash) produce a usize result
153 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
154 // So we use MIN_HASH_LEN constant to handle this.
155 let top7: u64 = hash >> (MIN_HASH_LEN * 8 - 7);
156 (top7 & 0x7f) as u8 // truncation
157}
158
159/// Probe sequence based on triangular numbers, which is guaranteed (since our
160/// table size is a power of two) to visit every group of elements exactly once.
161///
162/// A triangular probe has us jump by 1 more group every time. So first we
163/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
164/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
165///
166/// Proof that the probe will visit every group in the table:
167/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
168struct ProbeSeq {
169 pos: usize,
170 stride: usize,
171}
172
173impl ProbeSeq {
174 #[inline]
175 fn move_next(&mut self, bucket_mask: usize) {
176 // We should have found an empty bucket by now and ended the probe.
177 debug_assert!(
178 self.stride <= bucket_mask,
179 "Went past end of probe sequence"
180 );
181
182 self.stride += Group::WIDTH;
183 self.pos += self.stride;
184 self.pos &= bucket_mask;
185 }
186}
187
188/// Returns the number of buckets needed to hold the given number of items,
189/// taking the maximum load factor into account.
190///
191/// Returns `None` if an overflow occurs.
192// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
193#[cfg_attr(target_os = "emscripten", inline(never))]
194#[cfg_attr(not(target_os = "emscripten"), inline)]
195fn capacity_to_buckets(cap: usize) -> Option<usize> {
196 debug_assert_ne!(cap, 0);
197
198 // For small tables we require at least 1 empty bucket so that lookups are
199 // guaranteed to terminate if an element doesn't exist in the table.
200 if cap < 8 {
201 // We don't bother with a table size of 2 buckets since that can only
202 // hold a single element. Instead we skip directly to a 4 bucket table
203 // which can hold 3 elements.
204 return Some(if cap < 4 { 4 } else { 8 });
205 }
206
207 // Otherwise require 1/8 buckets to be empty (87.5% load)
208 //
209 // Be careful when modifying this, calculate_layout relies on the
210 // overflow check here.
211 let adjusted_cap: usize = cap.checked_mul(8)? / 7;
212
213 // Any overflows will have been caught by the checked_mul. Also, any
214 // rounding errors from the division above will be cleaned up by
215 // next_power_of_two (which can't overflow because of the previous division).
216 Some(adjusted_cap.next_power_of_two())
217}
218
219/// Returns the maximum effective capacity for the given bucket mask, taking
220/// the maximum load factor into account.
221#[inline]
222fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
223 if bucket_mask < 8 {
224 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
225 // Keep in mind that the bucket mask is one less than the bucket count.
226 bucket_mask
227 } else {
228 // For larger tables we reserve 12.5% of the slots as empty.
229 ((bucket_mask + 1) / 8) * 7
230 }
231}
232
233/// Helper which allows the max calculation for ctrl_align to be statically computed for each T
234/// while keeping the rest of `calculate_layout_for` independent of `T`
235#[derive(Copy, Clone)]
236struct TableLayout {
237 size: usize,
238 ctrl_align: usize,
239}
240
241impl TableLayout {
242 #[inline]
243 const fn new<T>() -> Self {
244 let layout = Layout::new::<T>();
245 Self {
246 size: layout.size(),
247 ctrl_align: if layout.align() > Group::WIDTH {
248 layout.align()
249 } else {
250 Group::WIDTH
251 },
252 }
253 }
254
255 #[inline]
256 fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> {
257 debug_assert!(buckets.is_power_of_two());
258
259 let TableLayout { size, ctrl_align } = self;
260 // Manual layout calculation since Layout methods are not yet stable.
261 let ctrl_offset =
262 size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
263 let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
264
265 // We need an additional check to ensure that the allocation doesn't
266 // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
267 if len > isize::MAX as usize - (ctrl_align - 1) {
268 return None;
269 }
270
271 Some((
272 unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
273 ctrl_offset,
274 ))
275 }
276}
277
278/// A reference to an empty bucket into which an can be inserted.
279pub struct InsertSlot {
280 index: usize,
281}
282
283/// A reference to a hash table bucket containing a `T`.
284///
285/// This is usually just a pointer to the element itself. However if the element
286/// is a ZST, then we instead track the index of the element in the table so
287/// that `erase` works properly.
288pub struct Bucket<T> {
289 // Actually it is pointer to next element than element itself
290 // this is needed to maintain pointer arithmetic invariants
291 // keeping direct pointer to element introduces difficulty.
292 // Using `NonNull` for variance and niche layout
293 ptr: NonNull<T>,
294}
295
296// This Send impl is needed for rayon support. This is safe since Bucket is
297// never exposed in a public API.
298unsafe impl<T> Send for Bucket<T> {}
299
300impl<T> Clone for Bucket<T> {
301 #[inline]
302 fn clone(&self) -> Self {
303 Self { ptr: self.ptr }
304 }
305}
306
307impl<T> Bucket<T> {
308 /// Creates a [`Bucket`] that contain pointer to the data.
309 /// The pointer calculation is performed by calculating the
310 /// offset from given `base` pointer (convenience for
311 /// `base.as_ptr().sub(index)`).
312 ///
313 /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
314 /// offset of `3 * size_of::<T>()` bytes.
315 ///
316 /// If the `T` is a ZST, then we instead track the index of the element
317 /// in the table so that `erase` works properly (return
318 /// `NonNull::new_unchecked((index + 1) as *mut T)`)
319 ///
320 /// # Safety
321 ///
322 /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
323 /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
324 /// rules of [`NonNull::new_unchecked`] function.
325 ///
326 /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
327 /// and [`NonNull::new_unchecked`] function, as well as for the correct
328 /// logic of the work of this crate, the following rules are necessary and
329 /// sufficient:
330 ///
331 /// * the `base` pointer must not be `dangling` and must points to the
332 /// end of the first `value element` from the `data part` of the table, i.e.
333 /// must be the pointer that returned by [`RawTable::data_end`] or by
334 /// [`RawTableInner::data_end<T>`];
335 ///
336 /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
337 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
338 /// must be no greater than the number returned by the function
339 /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
340 ///
341 /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
342 /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
343 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
344 /// must be no greater than the number returned by the function
345 /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
346 ///
347 /// [`Bucket`]: crate::raw::Bucket
348 /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
349 /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
350 /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
351 /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
352 /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
353 /// [`RawTableInner::buckets`]: RawTableInner::buckets
354 #[inline]
355 unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
356 // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
357 // the data part of the table (we start counting from "0", so that
358 // in the expression T[last], the "last" index actually one less than the
359 // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
360 //
361 // `from_base_index(base, 1).as_ptr()` returns a pointer that
362 // points here in the data part of the table
363 // (to the start of T1)
364 // |
365 // | `base: NonNull<T>` must point here
366 // | (to the end of T0 or to the start of C0)
367 // v v
368 // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
369 // ^
370 // `from_base_index(base, 1)` returns a pointer
371 // that points here in the data part of the table
372 // (to the end of T1)
373 //
374 // where: T0...Tlast - our stored data; C0...Clast - control bytes
375 // or metadata for data.
376 let ptr = if T::IS_ZERO_SIZED {
377 // won't overflow because index must be less than length (bucket_mask)
378 // and bucket_mask is guaranteed to be less than `isize::MAX`
379 // (see TableLayout::calculate_layout_for method)
380 invalid_mut(index + 1)
381 } else {
382 base.as_ptr().sub(index)
383 };
384 Self {
385 ptr: NonNull::new_unchecked(ptr),
386 }
387 }
388
389 /// Calculates the index of a [`Bucket`] as distance between two pointers
390 /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
391 /// The returned value is in units of T: the distance in bytes divided by
392 /// [`core::mem::size_of::<T>()`].
393 ///
394 /// If the `T` is a ZST, then we return the index of the element in
395 /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
396 ///
397 /// This function is the inverse of [`from_base_index`].
398 ///
399 /// # Safety
400 ///
401 /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
402 /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
403 ///
404 /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
405 /// method, as well as for the correct logic of the work of this crate, the
406 /// following rules are necessary and sufficient:
407 ///
408 /// * `base` contained pointer must not be `dangling` and must point to the
409 /// end of the first `element` from the `data part` of the table, i.e.
410 /// must be a pointer that returns by [`RawTable::data_end`] or by
411 /// [`RawTableInner::data_end<T>`];
412 ///
413 /// * `self` also must not contain dangling pointer;
414 ///
415 /// * both `self` and `base` must be created from the same [`RawTable`]
416 /// (or [`RawTableInner`]).
417 ///
418 /// If `mem::size_of::<T>() == 0`, this function is always safe.
419 ///
420 /// [`Bucket`]: crate::raw::Bucket
421 /// [`from_base_index`]: crate::raw::Bucket::from_base_index
422 /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
423 /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
424 /// [`RawTable`]: crate::raw::RawTable
425 /// [`RawTableInner`]: RawTableInner
426 /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
427 #[inline]
428 unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
429 // If mem::size_of::<T>() != 0 then return an index under which we used to store the
430 // `element` in the data part of the table (we start counting from "0", so
431 // that in the expression T[last], the "last" index actually is one less than the
432 // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
433 // For example for 5th element in table calculation is performed like this:
434 //
435 // mem::size_of::<T>()
436 // |
437 // | `self = from_base_index(base, 5)` that returns pointer
438 // | that points here in tha data part of the table
439 // | (to the end of T5)
440 // | | `base: NonNull<T>` must point here
441 // v | (to the end of T0 or to the start of C0)
442 // /???\ v v
443 // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
444 // \__________ __________/
445 // \/
446 // `bucket.to_base_index(base)` = 5
447 // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
448 //
449 // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
450 if T::IS_ZERO_SIZED {
451 // this can not be UB
452 self.ptr.as_ptr() as usize - 1
453 } else {
454 offset_from(base.as_ptr(), self.ptr.as_ptr())
455 }
456 }
457
458 /// Acquires the underlying raw pointer `*mut T` to `data`.
459 ///
460 /// # Note
461 ///
462 /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
463 /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
464 /// for properly dropping the data we also need to clear `data` control bytes. If we
465 /// drop data, but do not clear `data control byte` it leads to double drop when
466 /// [`RawTable`] goes out of scope.
467 ///
468 /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
469 /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
470 /// will not re-evaluate where the new value should go, meaning the value may become
471 /// "lost" if their location does not reflect their state.
472 ///
473 /// [`RawTable`]: crate::raw::RawTable
474 /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
475 /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
476 /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
477 ///
478 /// # Examples
479 ///
480 /// ```
481 /// # #[cfg(feature = "raw")]
482 /// # fn test() {
483 /// use core::hash::{BuildHasher, Hash};
484 /// use hashbrown::raw::{Bucket, RawTable};
485 ///
486 /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
487 ///
488 /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
489 /// use core::hash::Hasher;
490 /// let mut state = hash_builder.build_hasher();
491 /// key.hash(&mut state);
492 /// state.finish()
493 /// }
494 ///
495 /// let hash_builder = NewHashBuilder::default();
496 /// let mut table = RawTable::new();
497 ///
498 /// let value = ("a", 100);
499 /// let hash = make_hash(&hash_builder, &value.0);
500 ///
501 /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
502 ///
503 /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap();
504 ///
505 /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
506 /// # }
507 /// # fn main() {
508 /// # #[cfg(feature = "raw")]
509 /// # test()
510 /// # }
511 /// ```
512 #[inline]
513 pub fn as_ptr(&self) -> *mut T {
514 if T::IS_ZERO_SIZED {
515 // Just return an arbitrary ZST pointer which is properly aligned
516 // invalid pointer is good enough for ZST
517 invalid_mut(mem::align_of::<T>())
518 } else {
519 unsafe { self.ptr.as_ptr().sub(1) }
520 }
521 }
522
523 /// Create a new [`Bucket`] that is offset from the `self` by the given
524 /// `offset`. The pointer calculation is performed by calculating the
525 /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
526 /// This function is used for iterators.
527 ///
528 /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
529 /// offset of `3 * size_of::<T>()` bytes.
530 ///
531 /// # Safety
532 ///
533 /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
534 /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
535 /// rules of [`NonNull::new_unchecked`] function.
536 ///
537 /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
538 /// and [`NonNull::new_unchecked`] function, as well as for the correct
539 /// logic of the work of this crate, the following rules are necessary and
540 /// sufficient:
541 ///
542 /// * `self` contained pointer must not be `dangling`;
543 ///
544 /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
545 /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
546 /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
547 /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
548 ///
549 /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
550 /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
551 /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
552 /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
553 /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
554 ///
555 /// [`Bucket`]: crate::raw::Bucket
556 /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
557 /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
558 /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
559 /// [`RawTableInner::buckets`]: RawTableInner::buckets
560 #[inline]
561 unsafe fn next_n(&self, offset: usize) -> Self {
562 let ptr = if T::IS_ZERO_SIZED {
563 // invalid pointer is good enough for ZST
564 invalid_mut(self.ptr.as_ptr() as usize + offset)
565 } else {
566 self.ptr.as_ptr().sub(offset)
567 };
568 Self {
569 ptr: NonNull::new_unchecked(ptr),
570 }
571 }
572
573 /// Executes the destructor (if any) of the pointed-to `data`.
574 ///
575 /// # Safety
576 ///
577 /// See [`ptr::drop_in_place`] for safety concerns.
578 ///
579 /// You should use [`RawTable::erase`] instead of this function,
580 /// or be careful with calling this function directly, because for
581 /// properly dropping the data we need also clear `data` control bytes.
582 /// If we drop data, but do not erase `data control byte` it leads to
583 /// double drop when [`RawTable`] goes out of scope.
584 ///
585 /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
586 /// [`RawTable`]: crate::raw::RawTable
587 /// [`RawTable::erase`]: crate::raw::RawTable::erase
588 #[cfg_attr(feature = "inline-more", inline)]
589 pub(crate) unsafe fn drop(&self) {
590 self.as_ptr().drop_in_place();
591 }
592
593 /// Reads the `value` from `self` without moving it. This leaves the
594 /// memory in `self` unchanged.
595 ///
596 /// # Safety
597 ///
598 /// See [`ptr::read`] for safety concerns.
599 ///
600 /// You should use [`RawTable::remove`] instead of this function,
601 /// or be careful with calling this function directly, because compiler
602 /// calls its destructor when readed `value` goes out of scope. It
603 /// can cause double dropping when [`RawTable`] goes out of scope,
604 /// because of not erased `data control byte`.
605 ///
606 /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
607 /// [`RawTable`]: crate::raw::RawTable
608 /// [`RawTable::remove`]: crate::raw::RawTable::remove
609 #[inline]
610 pub(crate) unsafe fn read(&self) -> T {
611 self.as_ptr().read()
612 }
613
614 /// Overwrites a memory location with the given `value` without reading
615 /// or dropping the old value (like [`ptr::write`] function).
616 ///
617 /// # Safety
618 ///
619 /// See [`ptr::write`] for safety concerns.
620 ///
621 /// # Note
622 ///
623 /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
624 /// those for the old `T` value, as the map will not re-evaluate where the new
625 /// value should go, meaning the value may become "lost" if their location
626 /// does not reflect their state.
627 ///
628 /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
629 /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
630 /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
631 #[inline]
632 pub(crate) unsafe fn write(&self, val: T) {
633 self.as_ptr().write(val);
634 }
635
636 /// Returns a shared immutable reference to the `value`.
637 ///
638 /// # Safety
639 ///
640 /// See [`NonNull::as_ref`] for safety concerns.
641 ///
642 /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
643 ///
644 /// # Examples
645 ///
646 /// ```
647 /// # #[cfg(feature = "raw")]
648 /// # fn test() {
649 /// use core::hash::{BuildHasher, Hash};
650 /// use hashbrown::raw::{Bucket, RawTable};
651 ///
652 /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
653 ///
654 /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
655 /// use core::hash::Hasher;
656 /// let mut state = hash_builder.build_hasher();
657 /// key.hash(&mut state);
658 /// state.finish()
659 /// }
660 ///
661 /// let hash_builder = NewHashBuilder::default();
662 /// let mut table = RawTable::new();
663 ///
664 /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
665 /// let hash = make_hash(&hash_builder, &value.0);
666 ///
667 /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
668 ///
669 /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
670 ///
671 /// assert_eq!(
672 /// unsafe { bucket.as_ref() },
673 /// &("A pony", "is a small horse".to_owned())
674 /// );
675 /// # }
676 /// # fn main() {
677 /// # #[cfg(feature = "raw")]
678 /// # test()
679 /// # }
680 /// ```
681 #[inline]
682 pub unsafe fn as_ref<'a>(&self) -> &'a T {
683 &*self.as_ptr()
684 }
685
686 /// Returns a unique mutable reference to the `value`.
687 ///
688 /// # Safety
689 ///
690 /// See [`NonNull::as_mut`] for safety concerns.
691 ///
692 /// # Note
693 ///
694 /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
695 /// those for the old `T` value, as the map will not re-evaluate where the new
696 /// value should go, meaning the value may become "lost" if their location
697 /// does not reflect their state.
698 ///
699 /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
700 /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
701 /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
702 ///
703 /// # Examples
704 ///
705 /// ```
706 /// # #[cfg(feature = "raw")]
707 /// # fn test() {
708 /// use core::hash::{BuildHasher, Hash};
709 /// use hashbrown::raw::{Bucket, RawTable};
710 ///
711 /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
712 ///
713 /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
714 /// use core::hash::Hasher;
715 /// let mut state = hash_builder.build_hasher();
716 /// key.hash(&mut state);
717 /// state.finish()
718 /// }
719 ///
720 /// let hash_builder = NewHashBuilder::default();
721 /// let mut table = RawTable::new();
722 ///
723 /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
724 /// let hash = make_hash(&hash_builder, &value.0);
725 ///
726 /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
727 ///
728 /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
729 ///
730 /// unsafe {
731 /// bucket
732 /// .as_mut()
733 /// .1
734 /// .push_str(" less than 147 cm at the withers")
735 /// };
736 /// assert_eq!(
737 /// unsafe { bucket.as_ref() },
738 /// &(
739 /// "A pony",
740 /// "is a small horse less than 147 cm at the withers".to_owned()
741 /// )
742 /// );
743 /// # }
744 /// # fn main() {
745 /// # #[cfg(feature = "raw")]
746 /// # test()
747 /// # }
748 /// ```
749 #[inline]
750 pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
751 &mut *self.as_ptr()
752 }
753
754 /// Copies `size_of<T>` bytes from `other` to `self`. The source
755 /// and destination may *not* overlap.
756 ///
757 /// # Safety
758 ///
759 /// See [`ptr::copy_nonoverlapping`] for safety concerns.
760 ///
761 /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
762 /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
763 /// in the region beginning at `*self` and the region beginning at `*other` can
764 /// [violate memory safety].
765 ///
766 /// # Note
767 ///
768 /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
769 /// those for the old `T` value, as the map will not re-evaluate where the new
770 /// value should go, meaning the value may become "lost" if their location
771 /// does not reflect their state.
772 ///
773 /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
774 /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
775 /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
776 /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
777 /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
778 #[cfg(feature = "raw")]
779 #[inline]
780 pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
781 self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
782 }
783}
784
785/// A raw hash table with an unsafe API.
786pub struct RawTable<T, A: Allocator = Global> {
787 table: RawTableInner,
788 alloc: A,
789 // Tell dropck that we own instances of T.
790 marker: PhantomData<T>,
791}
792
793/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
794/// of how many different key-value types are used.
795struct RawTableInner {
796 // Mask to get an index from a hash value. The value is one less than the
797 // number of buckets in the table.
798 bucket_mask: usize,
799
800 // [Padding], T1, T2, ..., Tlast, C1, C2, ...
801 // ^ points here
802 ctrl: NonNull<u8>,
803
804 // Number of elements that can be inserted before we need to grow the table
805 growth_left: usize,
806
807 // Number of elements in the table, only really used by len()
808 items: usize,
809}
810
811impl<T> RawTable<T, Global> {
812 /// Creates a new empty hash table without allocating any memory.
813 ///
814 /// In effect this returns a table with exactly 1 bucket. However we can
815 /// leave the data pointer dangling since that bucket is never written to
816 /// due to our load factor forcing us to always have at least 1 free bucket.
817 #[inline]
818 pub const fn new() -> Self {
819 Self {
820 table: RawTableInner::NEW,
821 alloc: Global,
822 marker: PhantomData,
823 }
824 }
825
826 /// Attempts to allocate a new hash table with at least enough capacity
827 /// for inserting the given number of elements without reallocating.
828 #[cfg(feature = "raw")]
829 pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
830 Self::try_with_capacity_in(capacity, Global)
831 }
832
833 /// Allocates a new hash table with at least enough capacity for inserting
834 /// the given number of elements without reallocating.
835 pub fn with_capacity(capacity: usize) -> Self {
836 Self::with_capacity_in(capacity, Global)
837 }
838}
839
840impl<T, A: Allocator> RawTable<T, A> {
841 const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
842
843 /// Creates a new empty hash table without allocating any memory, using the
844 /// given allocator.
845 ///
846 /// In effect this returns a table with exactly 1 bucket. However we can
847 /// leave the data pointer dangling since that bucket is never written to
848 /// due to our load factor forcing us to always have at least 1 free bucket.
849 #[inline]
850 pub const fn new_in(alloc: A) -> Self {
851 Self {
852 table: RawTableInner::NEW,
853 alloc,
854 marker: PhantomData,
855 }
856 }
857
858 /// Allocates a new hash table with the given number of buckets.
859 ///
860 /// The control bytes are left uninitialized.
861 #[cfg_attr(feature = "inline-more", inline)]
862 unsafe fn new_uninitialized(
863 alloc: A,
864 buckets: usize,
865 fallibility: Fallibility,
866 ) -> Result<Self, TryReserveError> {
867 debug_assert!(buckets.is_power_of_two());
868
869 Ok(Self {
870 table: RawTableInner::new_uninitialized(
871 &alloc,
872 Self::TABLE_LAYOUT,
873 buckets,
874 fallibility,
875 )?,
876 alloc,
877 marker: PhantomData,
878 })
879 }
880
881 /// Attempts to allocate a new hash table using the given allocator, with at least enough
882 /// capacity for inserting the given number of elements without reallocating.
883 #[cfg(feature = "raw")]
884 pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
885 Ok(Self {
886 table: RawTableInner::fallible_with_capacity(
887 &alloc,
888 Self::TABLE_LAYOUT,
889 capacity,
890 Fallibility::Fallible,
891 )?,
892 alloc,
893 marker: PhantomData,
894 })
895 }
896
897 /// Allocates a new hash table using the given allocator, with at least enough capacity for
898 /// inserting the given number of elements without reallocating.
899 pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
900 Self {
901 table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
902 alloc,
903 marker: PhantomData,
904 }
905 }
906
907 /// Returns a reference to the underlying allocator.
908 #[inline]
909 pub fn allocator(&self) -> &A {
910 &self.alloc
911 }
912
913 /// Returns pointer to one past last `data` element in the the table as viewed from
914 /// the start point of the allocation.
915 ///
916 /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
917 /// otherwise using it may result in [`undefined behavior`].
918 ///
919 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
920 #[inline]
921 pub fn data_end(&self) -> NonNull<T> {
922 // SAFETY: `self.table.ctrl` is `NonNull`, so casting it is safe
923 //
924 // `self.table.ctrl.as_ptr().cast()` returns pointer that
925 // points here (to the end of `T0`)
926 // ∨
927 // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
928 // \________ ________/
929 // \/
930 // `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
931 //
932 // where: T0...T_n - our stored data;
933 // CT0...CT_n - control bytes or metadata for `data`.
934 // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
935 // with loading `Group` bytes from the heap works properly, even if the result
936 // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
937 // `RawTableInner::set_ctrl` function.
938 //
939 // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
940 // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
941 unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) }
942 }
943
944 /// Returns pointer to start of data table.
945 #[inline]
946 #[cfg(any(feature = "raw", feature = "nightly"))]
947 pub unsafe fn data_start(&self) -> NonNull<T> {
948 NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
949 }
950
951 /// Return the information about memory allocated by the table.
952 ///
953 /// `RawTable` allocates single memory block to store both data and metadata.
954 /// This function returns allocation size and alignment and the beginning of the area.
955 /// These are the arguments which will be passed to `dealloc` when the table is dropped.
956 ///
957 /// This function might be useful for memory profiling.
958 #[inline]
959 #[cfg(feature = "raw")]
960 pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
961 // SAFETY: We use the same `table_layout` that was used to allocate
962 // this table.
963 unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
964 }
965
966 /// Returns the index of a bucket from a `Bucket`.
967 #[inline]
968 pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
969 bucket.to_base_index(self.data_end())
970 }
971
972 /// Returns a pointer to an element in the table.
973 ///
974 /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
975 /// otherwise using it may result in [`undefined behavior`].
976 ///
977 /// # Safety
978 ///
979 /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
980 /// following safety rules:
981 ///
982 /// * The table must already be allocated;
983 ///
984 /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
985 /// function, i.e. `(index + 1) <= self.buckets()`.
986 ///
987 /// It is safe to call this function with index of zero (`index == 0`) on a table that has
988 /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
989 ///
990 /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
991 /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
992 /// `(index + 1) <= self.buckets()`.
993 ///
994 /// [`RawTable::buckets`]: RawTable::buckets
995 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
996 #[inline]
997 pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
998 // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
999 // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
1000 // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
1001 //
1002 // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
1003 // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
1004 // |
1005 // | `base = self.data_end()` points here
1006 // | (to the start of CT0 or to the end of T0)
1007 // v v
1008 // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
1009 // ^ \__________ __________/
1010 // `table.bucket(3)` returns a pointer that points \/
1011 // here in the `data` part of the `RawTable` (to additional control bytes
1012 // the end of T3) `m = Group::WIDTH - 1`
1013 //
1014 // where: T0...T_n - our stored data;
1015 // CT0...CT_n - control bytes or metadata for `data`;
1016 // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
1017 // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
1018 // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
1019 //
1020 // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
1021 // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
1022 debug_assert_ne!(self.table.bucket_mask, 0);
1023 debug_assert!(index < self.buckets());
1024 Bucket::from_base_index(self.data_end(), index)
1025 }
1026
1027 /// Erases an element from the table without dropping it.
1028 #[cfg_attr(feature = "inline-more", inline)]
1029 unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
1030 let index = self.bucket_index(item);
1031 self.table.erase(index);
1032 }
1033
1034 /// Erases an element from the table, dropping it in place.
1035 #[cfg_attr(feature = "inline-more", inline)]
1036 #[allow(clippy::needless_pass_by_value)]
1037 pub unsafe fn erase(&mut self, item: Bucket<T>) {
1038 // Erase the element from the table first since drop might panic.
1039 self.erase_no_drop(&item);
1040 item.drop();
1041 }
1042
1043 /// Finds and erases an element from the table, dropping it in place.
1044 /// Returns true if an element was found.
1045 #[cfg(feature = "raw")]
1046 #[cfg_attr(feature = "inline-more", inline)]
1047 pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
1048 // Avoid `Option::map` because it bloats LLVM IR.
1049 if let Some(bucket) = self.find(hash, eq) {
1050 unsafe {
1051 self.erase(bucket);
1052 }
1053 true
1054 } else {
1055 false
1056 }
1057 }
1058
1059 /// Removes an element from the table, returning it.
1060 ///
1061 /// This also returns an `InsertSlot` pointing to the newly free bucket.
1062 #[cfg_attr(feature = "inline-more", inline)]
1063 #[allow(clippy::needless_pass_by_value)]
1064 pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
1065 self.erase_no_drop(&item);
1066 (
1067 item.read(),
1068 InsertSlot {
1069 index: self.bucket_index(&item),
1070 },
1071 )
1072 }
1073
1074 /// Finds and removes an element from the table, returning it.
1075 #[cfg_attr(feature = "inline-more", inline)]
1076 pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
1077 // Avoid `Option::map` because it bloats LLVM IR.
1078 match self.find(hash, eq) {
1079 Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
1080 None => None,
1081 }
1082 }
1083
1084 /// Marks all table buckets as empty without dropping their contents.
1085 #[cfg_attr(feature = "inline-more", inline)]
1086 pub fn clear_no_drop(&mut self) {
1087 self.table.clear_no_drop();
1088 }
1089
1090 /// Removes all elements from the table without freeing the backing memory.
1091 #[cfg_attr(feature = "inline-more", inline)]
1092 pub fn clear(&mut self) {
1093 if self.is_empty() {
1094 // Special case empty table to avoid surprising O(capacity) time.
1095 return;
1096 }
1097 // Ensure that the table is reset even if one of the drops panic
1098 let mut self_ = guard(self, |self_| self_.clear_no_drop());
1099 unsafe {
1100 // SAFETY: ScopeGuard sets to zero the `items` field of the table
1101 // even in case of panic during the dropping of the elements so
1102 // that there will be no double drop of the elements.
1103 self_.table.drop_elements::<T>();
1104 }
1105 }
1106
1107 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
1108 #[cfg_attr(feature = "inline-more", inline)]
1109 pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
1110 // Calculate the minimal number of elements that we need to reserve
1111 // space for.
1112 let min_size = usize::max(self.table.items, min_size);
1113 if min_size == 0 {
1114 let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
1115 unsafe {
1116 // SAFETY:
1117 // 1. We call the function only once;
1118 // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
1119 // and [`TableLayout`] that were used to allocate this table.
1120 // 3. If any elements' drop function panics, then there will only be a memory leak,
1121 // because we have replaced the inner table with a new one.
1122 old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
1123 }
1124 return;
1125 }
1126
1127 // Calculate the number of buckets that we need for this number of
1128 // elements. If the calculation overflows then the requested bucket
1129 // count must be larger than what we have right and nothing needs to be
1130 // done.
1131 let min_buckets = match capacity_to_buckets(min_size) {
1132 Some(buckets) => buckets,
1133 None => return,
1134 };
1135
1136 // If we have more buckets than we need, shrink the table.
1137 if min_buckets < self.buckets() {
1138 // Fast path if the table is empty
1139 if self.table.items == 0 {
1140 let new_inner =
1141 RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
1142 let mut old_inner = mem::replace(&mut self.table, new_inner);
1143 unsafe {
1144 // SAFETY:
1145 // 1. We call the function only once;
1146 // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
1147 // and [`TableLayout`] that were used to allocate this table.
1148 // 3. If any elements' drop function panics, then there will only be a memory leak,
1149 // because we have replaced the inner table with a new one.
1150 old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
1151 }
1152 } else {
1153 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1154 unsafe {
1155 // SAFETY:
1156 // 1. We know for sure that `min_size >= self.table.items`.
1157 // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1158 // we will never expose RawTable::new_uninitialized in a public API.
1159 if self
1160 .resize(min_size, hasher, Fallibility::Infallible)
1161 .is_err()
1162 {
1163 // SAFETY: The result of calling the `resize` function cannot be an error
1164 // because `fallibility == Fallibility::Infallible.
1165 hint::unreachable_unchecked()
1166 }
1167 }
1168 }
1169 }
1170 }
1171
1172 /// Ensures that at least `additional` items can be inserted into the table
1173 /// without reallocation.
1174 #[cfg_attr(feature = "inline-more", inline)]
1175 pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
1176 if unlikely(additional > self.table.growth_left) {
1177 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1178 unsafe {
1179 // SAFETY: The [`RawTableInner`] must already have properly initialized control
1180 // bytes since we will never expose RawTable::new_uninitialized in a public API.
1181 if self
1182 .reserve_rehash(additional, hasher, Fallibility::Infallible)
1183 .is_err()
1184 {
1185 // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
1186 hint::unreachable_unchecked()
1187 }
1188 }
1189 }
1190 }
1191
1192 /// Tries to ensure that at least `additional` items can be inserted into
1193 /// the table without reallocation.
1194 #[cfg_attr(feature = "inline-more", inline)]
1195 pub fn try_reserve(
1196 &mut self,
1197 additional: usize,
1198 hasher: impl Fn(&T) -> u64,
1199 ) -> Result<(), TryReserveError> {
1200 if additional > self.table.growth_left {
1201 // SAFETY: The [`RawTableInner`] must already have properly initialized control
1202 // bytes since we will never expose RawTable::new_uninitialized in a public API.
1203 unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
1204 } else {
1205 Ok(())
1206 }
1207 }
1208
1209 /// Out-of-line slow path for `reserve` and `try_reserve`.
1210 ///
1211 /// # Safety
1212 ///
1213 /// The [`RawTableInner`] must have properly initialized control bytes,
1214 /// otherwise calling this function results in [`undefined behavior`]
1215 ///
1216 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1217 #[cold]
1218 #[inline(never)]
1219 unsafe fn reserve_rehash(
1220 &mut self,
1221 additional: usize,
1222 hasher: impl Fn(&T) -> u64,
1223 fallibility: Fallibility,
1224 ) -> Result<(), TryReserveError> {
1225 unsafe {
1226 // SAFETY:
1227 // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1228 // [`TableLayout`] that were used to allocate this table.
1229 // 2. The `drop` function is the actual drop function of the elements stored in
1230 // the table.
1231 // 3. The caller ensures that the control bytes of the `RawTableInner`
1232 // are already initialized.
1233 self.table.reserve_rehash_inner(
1234 &self.alloc,
1235 additional,
1236 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1237 fallibility,
1238 Self::TABLE_LAYOUT,
1239 if T::NEEDS_DROP {
1240 Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
1241 } else {
1242 None
1243 },
1244 )
1245 }
1246 }
1247
1248 /// Allocates a new table of a different size and moves the contents of the
1249 /// current table into it.
1250 ///
1251 /// # Safety
1252 ///
1253 /// The [`RawTableInner`] must have properly initialized control bytes,
1254 /// otherwise calling this function results in [`undefined behavior`]
1255 ///
1256 /// The caller of this function must ensure that `capacity >= self.table.items`
1257 /// otherwise:
1258 ///
1259 /// * If `self.table.items != 0`, calling of this function with `capacity`
1260 /// equal to 0 (`capacity == 0`) results in [`undefined behavior`].
1261 ///
1262 /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
1263 /// `self.table.items > capacity_to_buckets(capacity)`
1264 /// calling this function results in [`undefined behavior`].
1265 ///
1266 /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
1267 /// `self.table.items > capacity_to_buckets(capacity)`
1268 /// calling this function are never return (will go into an
1269 /// infinite loop).
1270 ///
1271 /// See [`RawTableInner::find_insert_slot`] for more information.
1272 ///
1273 /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
1274 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1275 unsafe fn resize(
1276 &mut self,
1277 capacity: usize,
1278 hasher: impl Fn(&T) -> u64,
1279 fallibility: Fallibility,
1280 ) -> Result<(), TryReserveError> {
1281 // SAFETY:
1282 // 1. The caller of this function guarantees that `capacity >= self.table.items`.
1283 // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
1284 // [`TableLayout`] that were used to allocate this table.
1285 // 3. The caller ensures that the control bytes of the `RawTableInner`
1286 // are already initialized.
1287 self.table.resize_inner(
1288 &self.alloc,
1289 capacity,
1290 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
1291 fallibility,
1292 Self::TABLE_LAYOUT,
1293 )
1294 }
1295
1296 /// Inserts a new element into the table, and returns its raw bucket.
1297 ///
1298 /// This does not check if the given element already exists in the table.
1299 #[cfg_attr(feature = "inline-more", inline)]
1300 pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
1301 unsafe {
1302 // SAFETY:
1303 // 1. The [`RawTableInner`] must already have properly initialized control bytes since
1304 // we will never expose `RawTable::new_uninitialized` in a public API.
1305 //
1306 // 2. We reserve additional space (if necessary) right after calling this function.
1307 let mut slot = self.table.find_insert_slot(hash);
1308
1309 // We can avoid growing the table once we have reached our load factor if we are replacing
1310 // a tombstone. This works since the number of EMPTY slots does not change in this case.
1311 //
1312 // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
1313 // in the range `0..=self.buckets()`.
1314 let old_ctrl = *self.table.ctrl(slot.index);
1315 if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
1316 self.reserve(1, hasher);
1317 // SAFETY: We know for sure that `RawTableInner` has control bytes
1318 // initialized and that there is extra space in the table.
1319 slot = self.table.find_insert_slot(hash);
1320 }
1321
1322 self.insert_in_slot(hash, slot, value)
1323 }
1324 }
1325
1326 /// Attempts to insert a new element without growing the table and return its raw bucket.
1327 ///
1328 /// Returns an `Err` containing the given element if inserting it would require growing the
1329 /// table.
1330 ///
1331 /// This does not check if the given element already exists in the table.
1332 #[cfg(feature = "raw")]
1333 #[cfg_attr(feature = "inline-more", inline)]
1334 pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> {
1335 unsafe {
1336 match self.table.prepare_insert_no_grow(hash) {
1337 Ok(index) => {
1338 let bucket = self.bucket(index);
1339 bucket.write(value);
1340 Ok(bucket)
1341 }
1342 Err(()) => Err(value),
1343 }
1344 }
1345 }
1346
1347 /// Inserts a new element into the table, and returns a mutable reference to it.
1348 ///
1349 /// This does not check if the given element already exists in the table.
1350 #[cfg_attr(feature = "inline-more", inline)]
1351 pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
1352 unsafe { self.insert(hash, value, hasher).as_mut() }
1353 }
1354
1355 /// Inserts a new element into the table, without growing the table.
1356 ///
1357 /// There must be enough space in the table to insert the new element.
1358 ///
1359 /// This does not check if the given element already exists in the table.
1360 #[cfg_attr(feature = "inline-more", inline)]
1361 #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
1362 pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
1363 let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
1364 let bucket = self.table.bucket(index);
1365
1366 // If we are replacing a DELETED entry then we don't need to update
1367 // the load counter.
1368 self.table.growth_left -= special_is_empty(old_ctrl) as usize;
1369
1370 bucket.write(value);
1371 self.table.items += 1;
1372 bucket
1373 }
1374
1375 /// Temporary removes a bucket, applying the given function to the removed
1376 /// element and optionally put back the returned value in the same bucket.
1377 ///
1378 /// Returns `true` if the bucket still contains an element
1379 ///
1380 /// This does not check if the given bucket is actually occupied.
1381 #[cfg_attr(feature = "inline-more", inline)]
1382 pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1383 where
1384 F: FnOnce(T) -> Option<T>,
1385 {
1386 let index = self.bucket_index(&bucket);
1387 let old_ctrl = *self.table.ctrl(index);
1388 debug_assert!(self.is_bucket_full(index));
1389 let old_growth_left = self.table.growth_left;
1390 let item = self.remove(bucket).0;
1391 if let Some(new_item) = f(item) {
1392 self.table.growth_left = old_growth_left;
1393 self.table.set_ctrl(index, old_ctrl);
1394 self.table.items += 1;
1395 self.bucket(index).write(new_item);
1396 true
1397 } else {
1398 false
1399 }
1400 }
1401
1402 /// Searches for an element in the table. If the element is not found,
1403 /// returns `Err` with the position of a slot where an element with the
1404 /// same hash could be inserted.
1405 ///
1406 /// This function may resize the table if additional space is required for
1407 /// inserting an element.
1408 #[inline]
1409 pub fn find_or_find_insert_slot(
1410 &mut self,
1411 hash: u64,
1412 mut eq: impl FnMut(&T) -> bool,
1413 hasher: impl Fn(&T) -> u64,
1414 ) -> Result<Bucket<T>, InsertSlot> {
1415 self.reserve(1, hasher);
1416
1417 unsafe {
1418 // SAFETY:
1419 // 1. We know for sure that there is at least one empty `bucket` in the table.
1420 // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
1421 // never expose `RawTable::new_uninitialized` in a public API.
1422 // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
1423 // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
1424 // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
1425 match self
1426 .table
1427 .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
1428 {
1429 // SAFETY: See explanation above.
1430 Ok(index) => Ok(self.bucket(index)),
1431 Err(slot) => Err(slot),
1432 }
1433 }
1434 }
1435
1436 /// Inserts a new element into the table in the given slot, and returns its
1437 /// raw bucket.
1438 ///
1439 /// # Safety
1440 ///
1441 /// `slot` must point to a slot previously returned by
1442 /// `find_or_find_insert_slot`, and no mutation of the table must have
1443 /// occurred since that call.
1444 #[inline]
1445 pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
1446 let old_ctrl = *self.table.ctrl(slot.index);
1447 self.table.record_item_insert_at(slot.index, old_ctrl, hash);
1448
1449 let bucket = self.bucket(slot.index);
1450 bucket.write(value);
1451 bucket
1452 }
1453
1454 /// Searches for an element in the table.
1455 #[inline]
1456 pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1457 unsafe {
1458 // SAFETY:
1459 // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
1460 // will never expose `RawTable::new_uninitialized` in a public API.
1461 // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
1462 // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
1463 // is safe.
1464 let result = self
1465 .table
1466 .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
1467
1468 // Avoid `Option::map` because it bloats LLVM IR.
1469 match result {
1470 // SAFETY: See explanation above.
1471 Some(index) => Some(self.bucket(index)),
1472 None => None,
1473 }
1474 }
1475 }
1476
1477 /// Gets a reference to an element in the table.
1478 #[inline]
1479 pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1480 // Avoid `Option::map` because it bloats LLVM IR.
1481 match self.find(hash, eq) {
1482 Some(bucket) => Some(unsafe { bucket.as_ref() }),
1483 None => None,
1484 }
1485 }
1486
1487 /// Gets a mutable reference to an element in the table.
1488 #[inline]
1489 pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1490 // Avoid `Option::map` because it bloats LLVM IR.
1491 match self.find(hash, eq) {
1492 Some(bucket) => Some(unsafe { bucket.as_mut() }),
1493 None => None,
1494 }
1495 }
1496
1497 /// Attempts to get mutable references to `N` entries in the table at once.
1498 ///
1499 /// Returns an array of length `N` with the results of each query.
1500 ///
1501 /// At most one mutable reference will be returned to any entry. `None` will be returned if any
1502 /// of the hashes are duplicates. `None` will be returned if the hash is not found.
1503 ///
1504 /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
1505 /// the `i`th key to be looked up.
1506 pub fn get_many_mut<const N: usize>(
1507 &mut self,
1508 hashes: [u64; N],
1509 eq: impl FnMut(usize, &T) -> bool,
1510 ) -> Option<[&'_ mut T; N]> {
1511 unsafe {
1512 let ptrs = self.get_many_mut_pointers(hashes, eq)?;
1513
1514 for (i, &cur) in ptrs.iter().enumerate() {
1515 if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
1516 return None;
1517 }
1518 }
1519 // All bucket are distinct from all previous buckets so we're clear to return the result
1520 // of the lookup.
1521
1522 // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1523 Some(mem::transmute_copy(&ptrs))
1524 }
1525 }
1526
1527 pub unsafe fn get_many_unchecked_mut<const N: usize>(
1528 &mut self,
1529 hashes: [u64; N],
1530 eq: impl FnMut(usize, &T) -> bool,
1531 ) -> Option<[&'_ mut T; N]> {
1532 let ptrs = self.get_many_mut_pointers(hashes, eq)?;
1533 Some(mem::transmute_copy(&ptrs))
1534 }
1535
1536 unsafe fn get_many_mut_pointers<const N: usize>(
1537 &mut self,
1538 hashes: [u64; N],
1539 mut eq: impl FnMut(usize, &T) -> bool,
1540 ) -> Option<[*mut T; N]> {
1541 // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
1542 let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
1543 let outs_ptr = outs.as_mut_ptr();
1544
1545 for (i, &hash) in hashes.iter().enumerate() {
1546 let cur = self.find(hash, |k| eq(i, k))?;
1547 *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
1548 }
1549
1550 // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
1551 Some(outs.assume_init())
1552 }
1553
1554 /// Returns the number of elements the map can hold without reallocating.
1555 ///
1556 /// This number is a lower bound; the table might be able to hold
1557 /// more, but is guaranteed to be able to hold at least this many.
1558 #[inline]
1559 pub fn capacity(&self) -> usize {
1560 self.table.items + self.table.growth_left
1561 }
1562
1563 /// Returns the number of elements in the table.
1564 #[inline]
1565 pub fn len(&self) -> usize {
1566 self.table.items
1567 }
1568
1569 /// Returns `true` if the table contains no elements.
1570 #[inline]
1571 pub fn is_empty(&self) -> bool {
1572 self.len() == 0
1573 }
1574
1575 /// Returns the number of buckets in the table.
1576 #[inline]
1577 pub fn buckets(&self) -> usize {
1578 self.table.bucket_mask + 1
1579 }
1580
1581 /// Checks whether the bucket at `index` is full.
1582 ///
1583 /// # Safety
1584 ///
1585 /// The caller must ensure `index` is less than the number of buckets.
1586 #[inline]
1587 pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
1588 self.table.is_bucket_full(index)
1589 }
1590
1591 /// Returns an iterator over every element in the table. It is up to
1592 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1593 /// Because we cannot make the `next` method unsafe on the `RawIter`
1594 /// struct, we have to make the `iter` method unsafe.
1595 #[inline]
1596 pub unsafe fn iter(&self) -> RawIter<T> {
1597 // SAFETY:
1598 // 1. The caller must uphold the safety contract for `iter` method.
1599 // 2. The [`RawTableInner`] must already have properly initialized control bytes since
1600 // we will never expose RawTable::new_uninitialized in a public API.
1601 self.table.iter()
1602 }
1603
1604 /// Returns an iterator over occupied buckets that could match a given hash.
1605 ///
1606 /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1607 /// return items that have a hash value different than the one provided. You
1608 /// should always validate the returned values before using them.
1609 ///
1610 /// It is up to the caller to ensure that the `RawTable` outlives the
1611 /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1612 /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1613 #[cfg_attr(feature = "inline-more", inline)]
1614 #[cfg(feature = "raw")]
1615 pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
1616 RawIterHash::new(self, hash)
1617 }
1618
1619 /// Returns an iterator which removes all elements from the table without
1620 /// freeing the memory.
1621 #[cfg_attr(feature = "inline-more", inline)]
1622 pub fn drain(&mut self) -> RawDrain<'_, T, A> {
1623 unsafe {
1624 let iter = self.iter();
1625 self.drain_iter_from(iter)
1626 }
1627 }
1628
1629 /// Returns an iterator which removes all elements from the table without
1630 /// freeing the memory.
1631 ///
1632 /// Iteration starts at the provided iterator's current location.
1633 ///
1634 /// It is up to the caller to ensure that the iterator is valid for this
1635 /// `RawTable` and covers all items that remain in the table.
1636 #[cfg_attr(feature = "inline-more", inline)]
1637 pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> {
1638 debug_assert_eq!(iter.len(), self.len());
1639 RawDrain {
1640 iter,
1641 table: mem::replace(&mut self.table, RawTableInner::NEW),
1642 orig_table: NonNull::from(&mut self.table),
1643 marker: PhantomData,
1644 }
1645 }
1646
1647 /// Returns an iterator which consumes all elements from the table.
1648 ///
1649 /// Iteration starts at the provided iterator's current location.
1650 ///
1651 /// It is up to the caller to ensure that the iterator is valid for this
1652 /// `RawTable` and covers all items that remain in the table.
1653 pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
1654 debug_assert_eq!(iter.len(), self.len());
1655
1656 let allocation = self.into_allocation();
1657 RawIntoIter {
1658 iter,
1659 allocation,
1660 marker: PhantomData,
1661 }
1662 }
1663
1664 /// Converts the table into a raw allocation. The contents of the table
1665 /// should be dropped using a `RawIter` before freeing the allocation.
1666 #[cfg_attr(feature = "inline-more", inline)]
1667 pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
1668 let alloc = if self.table.is_empty_singleton() {
1669 None
1670 } else {
1671 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1672 let (layout, ctrl_offset) =
1673 match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
1674 Some(lco) => lco,
1675 None => unsafe { hint::unreachable_unchecked() },
1676 };
1677 Some((
1678 unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
1679 layout,
1680 unsafe { ptr::read(&self.alloc) },
1681 ))
1682 };
1683 mem::forget(self);
1684 alloc
1685 }
1686}
1687
1688unsafe impl<T, A: Allocator> Send for RawTable<T, A>
1689where
1690 T: Send,
1691 A: Send,
1692{
1693}
1694unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
1695where
1696 T: Sync,
1697 A: Sync,
1698{
1699}
1700
1701impl RawTableInner {
1702 const NEW: Self = RawTableInner::new();
1703
1704 /// Creates a new empty hash table without allocating any memory.
1705 ///
1706 /// In effect this returns a table with exactly 1 bucket. However we can
1707 /// leave the data pointer dangling since that bucket is never accessed
1708 /// due to our load factor forcing us to always have at least 1 free bucket.
1709 #[inline]
1710 const fn new() -> Self {
1711 Self {
1712 // Be careful to cast the entire slice to a raw pointer.
1713 ctrl: unsafe { NonNull::new_unchecked(ptr:Group::static_empty() as *const _ as *mut u8) },
1714 bucket_mask: 0,
1715 items: 0,
1716 growth_left: 0,
1717 }
1718 }
1719}
1720
1721impl RawTableInner {
1722 /// Allocates a new [`RawTableInner`] with the given number of buckets.
1723 /// The control bytes and buckets are left uninitialized.
1724 ///
1725 /// # Safety
1726 ///
1727 /// The caller of this function must ensure that the `buckets` is power of two
1728 /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
1729 /// Group::WIDTH` with the [`EMPTY`] bytes.
1730 ///
1731 /// See also [`Allocator`] API for other safety concerns.
1732 ///
1733 /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
1734 #[cfg_attr(feature = "inline-more", inline)]
1735 unsafe fn new_uninitialized<A>(
1736 alloc: &A,
1737 table_layout: TableLayout,
1738 buckets: usize,
1739 fallibility: Fallibility,
1740 ) -> Result<Self, TryReserveError>
1741 where
1742 A: Allocator,
1743 {
1744 debug_assert!(buckets.is_power_of_two());
1745
1746 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1747 let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) {
1748 Some(lco) => lco,
1749 None => return Err(fallibility.capacity_overflow()),
1750 };
1751
1752 let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
1753 Ok(block) => block.cast(),
1754 Err(_) => return Err(fallibility.alloc_err(layout)),
1755 };
1756
1757 // SAFETY: null pointer will be caught in above check
1758 let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
1759 Ok(Self {
1760 ctrl,
1761 bucket_mask: buckets - 1,
1762 items: 0,
1763 growth_left: bucket_mask_to_capacity(buckets - 1),
1764 })
1765 }
1766
1767 /// Attempts to allocate a new [`RawTableInner`] with at least enough
1768 /// capacity for inserting the given number of elements without reallocating.
1769 ///
1770 /// All the control bytes are initialized with the [`EMPTY`] bytes.
1771 #[inline]
1772 fn fallible_with_capacity<A>(
1773 alloc: &A,
1774 table_layout: TableLayout,
1775 capacity: usize,
1776 fallibility: Fallibility,
1777 ) -> Result<Self, TryReserveError>
1778 where
1779 A: Allocator,
1780 {
1781 if capacity == 0 {
1782 Ok(Self::NEW)
1783 } else {
1784 // SAFETY: We checked that we could successfully allocate the new table, and then
1785 // initialized all control bytes with the constant `EMPTY` byte.
1786 unsafe {
1787 let buckets =
1788 capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
1789
1790 let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
1791 // SAFETY: We checked that the table is allocated and therefore the table already has
1792 // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
1793 // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
1794 result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
1795
1796 Ok(result)
1797 }
1798 }
1799 }
1800
1801 /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
1802 /// the given number of elements without reallocating.
1803 ///
1804 /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
1805 /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
1806 /// handle memory allocation failure.
1807 ///
1808 /// All the control bytes are initialized with the [`EMPTY`] bytes.
1809 ///
1810 /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
1811 /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
1812 fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
1813 where
1814 A: Allocator,
1815 {
1816 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1817 match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
1818 Ok(table_inner) => table_inner,
1819 // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
1820 Err(_) => unsafe { hint::unreachable_unchecked() },
1821 }
1822 }
1823
1824 /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
1825 ///
1826 /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
1827 /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately
1828 /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
1829 /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
1830 /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
1831 /// We detect this situation here and perform a second scan starting at the beginning of the table.
1832 /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
1833 /// trailing control bytes (containing [`EMPTY`] bytes).
1834 ///
1835 /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
1836 /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
1837 /// `Safety`).
1838 ///
1839 /// # Warning
1840 ///
1841 /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
1842 /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
1843 /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
1844 /// index will cause immediate [`undefined behavior`].
1845 ///
1846 /// # Safety
1847 ///
1848 /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
1849 /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
1850 /// of this crate, the following rules are necessary and sufficient:
1851 ///
1852 /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
1853 /// function results in [`undefined behavior`].
1854 ///
1855 /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
1856 /// (after the `find_insert_slot_in_group` function, but before insertion into the table).
1857 ///
1858 /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
1859 /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
1860 ///
1861 /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
1862 /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
1863 /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
1864 ///
1865 /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
1866 /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
1867 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1868 #[inline]
1869 unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
1870 // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
1871 if unlikely(self.is_bucket_full(index)) {
1872 debug_assert!(self.bucket_mask < Group::WIDTH);
1873 // SAFETY:
1874 //
1875 // * Since the caller of this function ensures that the control bytes are properly
1876 // initialized and `ptr = self.ctrl(0)` points to the start of the array of control
1877 // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
1878 // and points to the properly initialized control bytes (see also
1879 // `TableLayout::calculate_layout_for` and `ptr::read`);
1880 //
1881 // * Because the caller of this function ensures that the index was provided by the
1882 // `self.find_insert_slot_in_group()` function, so for for tables larger than the
1883 // group width (self.buckets() >= Group::WIDTH), we will never end up in the given
1884 // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
1885 // cannot return a full bucket index. For tables smaller than the group width, calling
1886 // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
1887 // the range of the table are filled with EMPTY bytes (and we know for sure that there
1888 // is at least one FULL bucket), so this second scan either finds an empty slot (due to
1889 // the load factor) or hits the trailing control bytes (containing EMPTY).
1890 index = Group::load_aligned(self.ctrl(0))
1891 .match_empty_or_deleted()
1892 .lowest_set_bit()
1893 .unwrap_unchecked();
1894 }
1895 InsertSlot { index }
1896 }
1897
1898 /// Finds the position to insert something in a group.
1899 ///
1900 /// **This may have false positives and must be fixed up with `fix_insert_slot`
1901 /// before it's used.**
1902 ///
1903 /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
1904 /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
1905 #[inline]
1906 fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
1907 let bit = group.match_empty_or_deleted().lowest_set_bit();
1908
1909 if likely(bit.is_some()) {
1910 // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
1911 // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
1912 Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
1913 } else {
1914 None
1915 }
1916 }
1917
1918 /// Searches for an element in the table, or a potential slot where that element could
1919 /// be inserted (an empty or deleted [`Bucket`] index).
1920 ///
1921 /// This uses dynamic dispatch to reduce the amount of code generated, but that is
1922 /// eliminated by LLVM optimizations.
1923 ///
1924 /// This function does not make any changes to the `data` part of the table, or any
1925 /// changes to the `items` or `growth_left` field of the table.
1926 ///
1927 /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
1928 /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
1929 /// will never return (will go into an infinite loop) for tables larger than the group
1930 /// width, or return an index outside of the table indices range if the table is less
1931 /// than the group width.
1932 ///
1933 /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
1934 /// function with only `FULL` buckets' indices and return the `index` of the found
1935 /// element (as `Ok(index)`). If the element is not found and there is at least 1
1936 /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
1937 /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case,
1938 /// if this function returns [`InsertSlot`], it will contain an index in the range
1939 /// `0..=self.buckets()`.
1940 ///
1941 /// # Safety
1942 ///
1943 /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
1944 /// this function results in [`undefined behavior`].
1945 ///
1946 /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
1947 /// less than the group width and if there was not at least one empty or deleted bucket in
1948 /// the table will cause immediate [`undefined behavior`]. This is because in this case the
1949 /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
1950 /// control bytes outside the table range.
1951 ///
1952 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
1953 #[inline]
1954 unsafe fn find_or_find_insert_slot_inner(
1955 &self,
1956 hash: u64,
1957 eq: &mut dyn FnMut(usize) -> bool,
1958 ) -> Result<usize, InsertSlot> {
1959 let mut insert_slot = None;
1960
1961 let h2_hash = h2(hash);
1962 let mut probe_seq = self.probe_seq(hash);
1963
1964 loop {
1965 // SAFETY:
1966 // * Caller of this function ensures that the control bytes are properly initialized.
1967 //
1968 // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1969 // of the table due to masking with `self.bucket_mask` and also because mumber of
1970 // buckets is a power of two (see `self.probe_seq` function).
1971 //
1972 // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
1973 // call `Group::load` due to the extended control bytes range, which is
1974 // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
1975 // byte will never be read for the allocated table);
1976 //
1977 // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
1978 // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
1979 // bytes, which is safe (see RawTableInner::new).
1980 let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
1981
1982 for bit in group.match_byte(h2_hash) {
1983 let index = (probe_seq.pos + bit) & self.bucket_mask;
1984
1985 if likely(eq(index)) {
1986 return Ok(index);
1987 }
1988 }
1989
1990 // We didn't find the element we were looking for in the group, try to get an
1991 // insertion slot from the group if we don't have one yet.
1992 if likely(insert_slot.is_none()) {
1993 insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
1994 }
1995
1996 // Only stop the search if the group contains at least one empty element.
1997 // Otherwise, the element that we are looking for might be in a following group.
1998 if likely(group.match_empty().any_bit_set()) {
1999 // We must have found a insert slot by now, since the current group contains at
2000 // least one. For tables smaller than the group width, there will still be an
2001 // empty element in the current (and only) group due to the load factor.
2002 unsafe {
2003 // SAFETY:
2004 // * Caller of this function ensures that the control bytes are properly initialized.
2005 //
2006 // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
2007 return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
2008 }
2009 }
2010
2011 probe_seq.move_next(self.bucket_mask);
2012 }
2013 }
2014
2015 /// Searches for an empty or deleted bucket which is suitable for inserting a new
2016 /// element and sets the hash for that slot. Returns an index of that slot and the
2017 /// old control byte stored in the found index.
2018 ///
2019 /// This function does not check if the given element exists in the table. Also,
2020 /// this function does not check if there is enough space in the table to insert
2021 /// a new element. Caller of the funtion must make ensure that the table has at
2022 /// least 1 empty or deleted `bucket`, otherwise this function will never return
2023 /// (will go into an infinite loop) for tables larger than the group width, or
2024 /// return an index outside of the table indices range if the table is less than
2025 /// the group width.
2026 ///
2027 /// If there is at least 1 empty or deleted `bucket` in the table, the function is
2028 /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
2029 /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
2030 ///
2031 /// This function does not make any changes to the `data` parts of the table,
2032 /// or any changes to the the `items` or `growth_left` field of the table.
2033 ///
2034 /// # Safety
2035 ///
2036 /// The safety rules are directly derived from the safety rules for the
2037 /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods.
2038 /// Thus, in order to uphold the safety contracts for that methods, as well as for
2039 /// the correct logic of the work of this crate, you must observe the following rules
2040 /// when calling this function:
2041 ///
2042 /// * The [`RawTableInner`] has already been allocated and has properly initialized
2043 /// control bytes otherwise calling this function results in [`undefined behavior`].
2044 ///
2045 /// * The caller of this function must ensure that the "data" parts of the table
2046 /// will have an entry in the returned index (matching the given hash) right
2047 /// after calling this function.
2048 ///
2049 /// Attempt to write data at the `index` returned by this function when the table is
2050 /// less than the group width and if there was not at least one empty or deleted bucket in
2051 /// the table will cause immediate [`undefined behavior`]. This is because in this case the
2052 /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
2053 /// control bytes outside the table range.
2054 ///
2055 /// The caller must independently increase the `items` field of the table, and also,
2056 /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
2057 /// field, and do not change it if the old control byte was [`DELETED`].
2058 ///
2059 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2060 /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
2061 ///
2062 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2063 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2064 /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
2065 /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2066 /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
2067 #[inline]
2068 unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
2069 // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
2070 let index: usize = self.find_insert_slot(hash).index;
2071 // SAFETY:
2072 // 1. The `find_insert_slot` function either returns an `index` less than or
2073 // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
2074 // returns if it cannot find an empty or deleted slot.
2075 // 2. The caller of this function guarantees that the table has already been
2076 // allocated
2077 let old_ctrl = *self.ctrl(index);
2078 self.set_ctrl_h2(index, hash);
2079 (index, old_ctrl)
2080 }
2081
2082 /// Searches for an empty or deleted bucket which is suitable for inserting
2083 /// a new element, returning the `index` for the new [`Bucket`].
2084 ///
2085 /// This function does not make any changes to the `data` part of the table, or any
2086 /// changes to the `items` or `growth_left` field of the table.
2087 ///
2088 /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
2089 /// will never return (will go into an infinite loop) for tables larger than the group
2090 /// width, or return an index outside of the table indices range if the table is less
2091 /// than the group width.
2092 ///
2093 /// If there is at least 1 empty or deleted `bucket` in the table, the function is
2094 /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
2095 /// but in any case, if this function returns [`InsertSlot`], it will contain an index
2096 /// in the range `0..=self.buckets()`.
2097 ///
2098 /// # Safety
2099 ///
2100 /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
2101 /// this function results in [`undefined behavior`].
2102 ///
2103 /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
2104 /// less than the group width and if there was not at least one empty or deleted bucket in
2105 /// the table will cause immediate [`undefined behavior`]. This is because in this case the
2106 /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
2107 /// control bytes outside the table range.
2108 ///
2109 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2110 #[inline]
2111 unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
2112 let mut probe_seq = self.probe_seq(hash);
2113 loop {
2114 // SAFETY:
2115 // * Caller of this function ensures that the control bytes are properly initialized.
2116 //
2117 // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
2118 // of the table due to masking with `self.bucket_mask` and also because mumber of
2119 // buckets is a power of two (see `self.probe_seq` function).
2120 //
2121 // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2122 // call `Group::load` due to the extended control bytes range, which is
2123 // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2124 // byte will never be read for the allocated table);
2125 //
2126 // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2127 // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2128 // bytes, which is safe (see RawTableInner::new).
2129 let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2130
2131 let index = self.find_insert_slot_in_group(&group, &probe_seq);
2132 if likely(index.is_some()) {
2133 // SAFETY:
2134 // * Caller of this function ensures that the control bytes are properly initialized.
2135 //
2136 // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
2137 unsafe {
2138 return self.fix_insert_slot(index.unwrap_unchecked());
2139 }
2140 }
2141 probe_seq.move_next(self.bucket_mask);
2142 }
2143 }
2144
2145 /// Searches for an element in a table, returning the `index` of the found element.
2146 /// This uses dynamic dispatch to reduce the amount of code generated, but it is
2147 /// eliminated by LLVM optimizations.
2148 ///
2149 /// This function does not make any changes to the `data` part of the table, or any
2150 /// changes to the `items` or `growth_left` field of the table.
2151 ///
2152 /// The table must have at least 1 empty `bucket`, otherwise, if the
2153 /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
2154 /// this function will also never return (will go into an infinite loop).
2155 ///
2156 /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
2157 /// function with only `FULL` buckets' indices and return the `index` of the found
2158 /// element as `Some(index)`, so the index will always be in the range
2159 /// `0..self.buckets()`.
2160 ///
2161 /// # Safety
2162 ///
2163 /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
2164 /// this function results in [`undefined behavior`].
2165 ///
2166 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2167 #[inline(always)]
2168 unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
2169 let h2_hash = h2(hash);
2170 let mut probe_seq = self.probe_seq(hash);
2171
2172 loop {
2173 // SAFETY:
2174 // * Caller of this function ensures that the control bytes are properly initialized.
2175 //
2176 // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
2177 // of the table due to masking with `self.bucket_mask`.
2178 //
2179 // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
2180 // call `Group::load` due to the extended control bytes range, which is
2181 // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
2182 // byte will never be read for the allocated table);
2183 //
2184 // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
2185 // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
2186 // bytes, which is safe (see RawTableInner::new_in).
2187 let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
2188
2189 for bit in group.match_byte(h2_hash) {
2190 // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
2191 // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2192 let index = (probe_seq.pos + bit) & self.bucket_mask;
2193
2194 if likely(eq(index)) {
2195 return Some(index);
2196 }
2197 }
2198
2199 if likely(group.match_empty().any_bit_set()) {
2200 return None;
2201 }
2202
2203 probe_seq.move_next(self.bucket_mask);
2204 }
2205 }
2206
2207 /// Prepares for rehashing data in place (that is, without allocating new memory).
2208 /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
2209 /// bytes to `EMPTY`, i.e. performs the following conversion:
2210 ///
2211 /// - `EMPTY` control bytes -> `EMPTY`;
2212 /// - `DELETED` control bytes -> `EMPTY`;
2213 /// - `FULL` control bytes -> `DELETED`.
2214 ///
2215 /// This function does not make any changes to the `data` parts of the table,
2216 /// or any changes to the the `items` or `growth_left` field of the table.
2217 ///
2218 /// # Safety
2219 ///
2220 /// You must observe the following safety rules when calling this function:
2221 ///
2222 /// * The [`RawTableInner`] has already been allocated;
2223 ///
2224 /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
2225 /// bytes when re-inserting them into their ideal position (which was impossible
2226 /// to do during the first insert due to tombstones). If the caller does not do
2227 /// this, then calling this function may result in a memory leak.
2228 ///
2229 /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
2230 /// calling this function results in [`undefined behavior`].
2231 ///
2232 /// Calling this function on a table that has not been allocated results in
2233 /// [`undefined behavior`].
2234 ///
2235 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2236 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2237 ///
2238 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2239 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2240 #[allow(clippy::mut_mut)]
2241 #[inline]
2242 unsafe fn prepare_rehash_in_place(&mut self) {
2243 // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
2244 // This effectively frees up all buckets containing a DELETED entry.
2245 //
2246 // SAFETY:
2247 // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
2248 // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
2249 // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
2250 // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
2251 // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
2252 // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
2253 for i in (0..self.buckets()).step_by(Group::WIDTH) {
2254 let group = Group::load_aligned(self.ctrl(i));
2255 let group = group.convert_special_to_empty_and_full_to_deleted();
2256 group.store_aligned(self.ctrl(i));
2257 }
2258
2259 // Fix up the trailing control bytes. See the comments in set_ctrl
2260 // for the handling of tables smaller than the group width.
2261 //
2262 // SAFETY: The caller of this function guarantees that [`RawTableInner`]
2263 // has already been allocated
2264 if unlikely(self.buckets() < Group::WIDTH) {
2265 // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
2266 // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
2267 // `Group::WIDTH` is safe
2268 self.ctrl(0)
2269 .copy_to(self.ctrl(Group::WIDTH), self.buckets());
2270 } else {
2271 // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
2272 // control bytes,so copying `Group::WIDTH` bytes with offset equal
2273 // to `self.buckets() == self.bucket_mask + 1` is safe
2274 self.ctrl(0)
2275 .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
2276 }
2277 }
2278
2279 /// Returns an iterator over every element in the table.
2280 ///
2281 /// # Safety
2282 ///
2283 /// If any of the following conditions are violated, the result
2284 /// is [`undefined behavior`]:
2285 ///
2286 /// * The caller has to ensure that the `RawTableInner` outlives the
2287 /// `RawIter`. Because we cannot make the `next` method unsafe on
2288 /// the `RawIter` struct, we have to make the `iter` method unsafe.
2289 ///
2290 /// * The [`RawTableInner`] must have properly initialized control bytes.
2291 ///
2292 /// The type `T` must be the actual type of the elements stored in the table,
2293 /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
2294 ///
2295 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2296 #[inline]
2297 unsafe fn iter<T>(&self) -> RawIter<T> {
2298 // SAFETY:
2299 // 1. Since the caller of this function ensures that the control bytes
2300 // are properly initialized and `self.data_end()` points to the start
2301 // of the array of control bytes, therefore: `ctrl` is valid for reads,
2302 // properly aligned to `Group::WIDTH` and points to the properly initialized
2303 // control bytes.
2304 // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
2305 // equal to zero).
2306 // 3. We pass the exact value of buckets of the table to the function.
2307 //
2308 // `ctrl` points here (to the start
2309 // of the first control byte `CT0`)
2310 // ∨
2311 // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2312 // \________ ________/
2313 // \/
2314 // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2315 //
2316 // where: T0...T_n - our stored data;
2317 // CT0...CT_n - control bytes or metadata for `data`.
2318 // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2319 // with loading `Group` bytes from the heap works properly, even if the result
2320 // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2321 // `RawTableInner::set_ctrl` function.
2322 //
2323 // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2324 // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2325 let data = Bucket::from_base_index(self.data_end(), 0);
2326 RawIter {
2327 // SAFETY: See explanation above
2328 iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
2329 items: self.items,
2330 }
2331 }
2332
2333 /// Executes the destructors (if any) of the values stored in the table.
2334 ///
2335 /// # Note
2336 ///
2337 /// This function does not erase the control bytes of the table and does
2338 /// not make any changes to the `items` or `growth_left` fields of the
2339 /// table. If necessary, the caller of this function must manually set
2340 /// up these table fields, for example using the [`clear_no_drop`] function.
2341 ///
2342 /// Be careful during calling this function, because drop function of
2343 /// the elements can panic, and this can leave table in an inconsistent
2344 /// state.
2345 ///
2346 /// # Safety
2347 ///
2348 /// The type `T` must be the actual type of the elements stored in the table,
2349 /// otherwise calling this function may result in [`undefined behavior`].
2350 ///
2351 /// If `T` is a type that should be dropped and **the table is not empty**,
2352 /// calling this function more than once results in [`undefined behavior`].
2353 ///
2354 /// If `T` is not [`Copy`], attempting to use values stored in the table after
2355 /// calling this function may result in [`undefined behavior`].
2356 ///
2357 /// It is safe to call this function on a table that has not been allocated,
2358 /// on a table with uninitialized control bytes, and on a table with no actual
2359 /// data but with `Full` control bytes if `self.items == 0`.
2360 ///
2361 /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
2362 /// about of properly removing or saving `element` from / into the [`RawTable`] /
2363 /// [`RawTableInner`].
2364 ///
2365 /// [`Bucket::drop`]: Bucket::drop
2366 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2367 /// [`clear_no_drop`]: RawTableInner::clear_no_drop
2368 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2369 unsafe fn drop_elements<T>(&mut self) {
2370 // Check that `self.items != 0`. Protects against the possibility
2371 // of creating an iterator on an table with uninitialized control bytes.
2372 if T::NEEDS_DROP && self.items != 0 {
2373 // SAFETY: We know for sure that RawTableInner will outlive the
2374 // returned `RawIter` iterator, and the caller of this function
2375 // must uphold the safety contract for `drop_elements` method.
2376 for item in self.iter::<T>() {
2377 // SAFETY: The caller must uphold the safety contract for
2378 // `drop_elements` method.
2379 item.drop();
2380 }
2381 }
2382 }
2383
2384 /// Executes the destructors (if any) of the values stored in the table and than
2385 /// deallocates the table.
2386 ///
2387 /// # Note
2388 ///
2389 /// Calling this function automatically makes invalid (dangling) all instances of
2390 /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
2391 ///
2392 /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
2393 /// fields of the table. If necessary, the caller of this function must manually set
2394 /// up these table fields.
2395 ///
2396 /// # Safety
2397 ///
2398 /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2399 ///
2400 /// * Calling this function more than once;
2401 ///
2402 /// * The type `T` must be the actual type of the elements stored in the table.
2403 ///
2404 /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
2405 /// to allocate this table.
2406 ///
2407 /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
2408 /// was used to allocate this table.
2409 ///
2410 /// The caller of this function should pay attention to the possibility of the
2411 /// elements' drop function panicking, because this:
2412 ///
2413 /// * May leave the table in an inconsistent state;
2414 ///
2415 /// * Memory is never deallocated, so a memory leak may occur.
2416 ///
2417 /// Attempt to use the `ctrl` field of the table (dereference) after calling this
2418 /// function results in [`undefined behavior`].
2419 ///
2420 /// It is safe to call this function on a table that has not been allocated,
2421 /// on a table with uninitialized control bytes, and on a table with no actual
2422 /// data but with `Full` control bytes if `self.items == 0`.
2423 ///
2424 /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
2425 /// for more information.
2426 ///
2427 /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
2428 /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
2429 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2430 unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
2431 if !self.is_empty_singleton() {
2432 unsafe {
2433 // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
2434 self.drop_elements::<T>();
2435 // SAFETY:
2436 // 1. We have checked that our table is allocated.
2437 // 2. The caller must uphold the safety contract for `drop_inner_table` method.
2438 self.free_buckets(alloc, table_layout);
2439 }
2440 }
2441 }
2442
2443 /// Returns a pointer to an element in the table (convenience for
2444 /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
2445 ///
2446 /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
2447 /// otherwise using it may result in [`undefined behavior`].
2448 ///
2449 /// # Safety
2450 ///
2451 /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
2452 /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
2453 /// this function, the following safety rules must be observed:
2454 ///
2455 /// * The table must already be allocated;
2456 ///
2457 /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2458 /// function, i.e. `(index + 1) <= self.buckets()`.
2459 ///
2460 /// * The type `T` must be the actual type of the elements stored in the table, otherwise
2461 /// using the returned [`Bucket`] may result in [`undefined behavior`].
2462 ///
2463 /// It is safe to call this function with index of zero (`index == 0`) on a table that has
2464 /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
2465 ///
2466 /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
2467 /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
2468 /// `(index + 1) <= self.buckets()`.
2469 ///
2470 /// ```none
2471 /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2472 /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2473 /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2474 ///
2475 /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
2476 /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
2477 /// |
2478 /// | `base = table.data_end::<T>()` points here
2479 /// | (to the start of CT0 or to the end of T0)
2480 /// v v
2481 /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2482 /// ^ \__________ __________/
2483 /// `table.bucket(3)` returns a pointer that points \/
2484 /// here in the `data` part of the `RawTableInner` additional control bytes
2485 /// (to the end of T3) `m = Group::WIDTH - 1`
2486 ///
2487 /// where: T0...T_n - our stored data;
2488 /// CT0...CT_n - control bytes or metadata for `data`;
2489 /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2490 /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2491 /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2492 ///
2493 /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2494 /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2495 /// ```
2496 ///
2497 /// [`Bucket::from_base_index`]: Bucket::from_base_index
2498 /// [`RawTableInner::buckets`]: RawTableInner::buckets
2499 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2500 #[inline]
2501 unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
2502 debug_assert_ne!(self.bucket_mask, 0);
2503 debug_assert!(index < self.buckets());
2504 Bucket::from_base_index(self.data_end(), index)
2505 }
2506
2507 /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
2508 /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
2509 ///
2510 /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
2511 /// otherwise using it may result in [`undefined behavior`].
2512 ///
2513 /// # Safety
2514 ///
2515 /// If any of the following conditions are violated, the result is [`undefined behavior`]:
2516 ///
2517 /// * The table must already be allocated;
2518 ///
2519 /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
2520 /// function, i.e. `(index + 1) <= self.buckets()`;
2521 ///
2522 /// * The `size_of` must be equal to the size of the elements stored in the table;
2523 ///
2524 /// ```none
2525 /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
2526 /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
2527 /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
2528 ///
2529 /// `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
2530 /// `data` part of the `RawTableInner`, i.e. to the start of T3
2531 /// |
2532 /// | `base = table.data_end::<u8>()` points here
2533 /// | (to the start of CT0 or to the end of T0)
2534 /// v v
2535 /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
2536 /// \__________ __________/
2537 /// \/
2538 /// additional control bytes
2539 /// `m = Group::WIDTH - 1`
2540 ///
2541 /// where: T0...T_n - our stored data;
2542 /// CT0...CT_n - control bytes or metadata for `data`;
2543 /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
2544 /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
2545 /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
2546 ///
2547 /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2548 /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2549 /// ```
2550 ///
2551 /// [`RawTableInner::buckets`]: RawTableInner::buckets
2552 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2553 #[inline]
2554 unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
2555 debug_assert_ne!(self.bucket_mask, 0);
2556 debug_assert!(index < self.buckets());
2557 let base: *mut u8 = self.data_end().as_ptr();
2558 base.sub((index + 1) * size_of)
2559 }
2560
2561 /// Returns pointer to one past last `data` element in the the table as viewed from
2562 /// the start point of the allocation (convenience for `self.ctrl.cast()`).
2563 ///
2564 /// This function actually returns a pointer to the end of the `data element` at
2565 /// index "0" (zero).
2566 ///
2567 /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
2568 /// otherwise using it may result in [`undefined behavior`].
2569 ///
2570 /// # Note
2571 ///
2572 /// The type `T` must be the actual type of the elements stored in the table, otherwise
2573 /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
2574 ///
2575 /// ```none
2576 /// `table.data_end::<T>()` returns pointer that points here
2577 /// (to the end of `T0`)
2578 /// ∨
2579 /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
2580 /// \________ ________/
2581 /// \/
2582 /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2583 ///
2584 /// where: T0...T_n - our stored data;
2585 /// CT0...CT_n - control bytes or metadata for `data`.
2586 /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
2587 /// with loading `Group` bytes from the heap works properly, even if the result
2588 /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
2589 /// `RawTableInner::set_ctrl` function.
2590 ///
2591 /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
2592 /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2593 /// ```
2594 ///
2595 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2596 #[inline]
2597 fn data_end<T>(&self) -> NonNull<T> {
2598 unsafe {
2599 // SAFETY: `self.ctrl` is `NonNull`, so casting it is safe
2600 NonNull::new_unchecked(self.ctrl.as_ptr().cast())
2601 }
2602 }
2603
2604 /// Returns an iterator-like object for a probe sequence on the table.
2605 ///
2606 /// This iterator never terminates, but is guaranteed to visit each bucket
2607 /// group exactly once. The loop using `probe_seq` must terminate upon
2608 /// reaching a group containing an empty bucket.
2609 #[inline]
2610 fn probe_seq(&self, hash: u64) -> ProbeSeq {
2611 ProbeSeq {
2612 // This is the same as `hash as usize % self.buckets()` because the number
2613 // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2614 pos: h1(hash) & self.bucket_mask,
2615 stride: 0,
2616 }
2617 }
2618
2619 /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
2620 /// in the table, otherwise returns error
2621 #[cfg(feature = "raw")]
2622 #[inline]
2623 unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
2624 let index = self.find_insert_slot(hash).index;
2625 let old_ctrl = *self.ctrl(index);
2626 if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
2627 Err(())
2628 } else {
2629 self.record_item_insert_at(index, old_ctrl, hash);
2630 Ok(index)
2631 }
2632 }
2633
2634 #[inline]
2635 unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
2636 self.growth_left -= usize::from(special_is_empty(old_ctrl));
2637 self.set_ctrl_h2(index, hash);
2638 self.items += 1;
2639 }
2640
2641 #[inline]
2642 fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool {
2643 let probe_seq_pos = self.probe_seq(hash).pos;
2644 let probe_index =
2645 |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH;
2646 probe_index(i) == probe_index(new_i)
2647 }
2648
2649 /// Sets a control byte to the hash, and possibly also the replicated control byte at
2650 /// the end of the array.
2651 ///
2652 /// This function does not make any changes to the `data` parts of the table,
2653 /// or any changes to the the `items` or `growth_left` field of the table.
2654 ///
2655 /// # Safety
2656 ///
2657 /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
2658 /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
2659 /// following rules when calling this function:
2660 ///
2661 /// * The [`RawTableInner`] has already been allocated;
2662 ///
2663 /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2664 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2665 /// be no greater than the number returned by the function [`RawTableInner::buckets`].
2666 ///
2667 /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2668 ///
2669 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2670 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2671 ///
2672 /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
2673 /// [`RawTableInner::buckets`]: RawTableInner::buckets
2674 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2675 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2676 #[inline]
2677 unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
2678 // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
2679 self.set_ctrl(index, h2(hash));
2680 }
2681
2682 /// Replaces the hash in the control byte at the given index with the provided one,
2683 /// and possibly also replicates the new control byte at the end of the array of control
2684 /// bytes, returning the old control byte.
2685 ///
2686 /// This function does not make any changes to the `data` parts of the table,
2687 /// or any changes to the the `items` or `growth_left` field of the table.
2688 ///
2689 /// # Safety
2690 ///
2691 /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
2692 /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
2693 /// methods, you must observe the following rules when calling this function:
2694 ///
2695 /// * The [`RawTableInner`] has already been allocated;
2696 ///
2697 /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2698 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2699 /// be no greater than the number returned by the function [`RawTableInner::buckets`].
2700 ///
2701 /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2702 ///
2703 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2704 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2705 ///
2706 /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
2707 /// [`RawTableInner::buckets`]: RawTableInner::buckets
2708 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2709 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2710 #[inline]
2711 unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
2712 // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
2713 let prev_ctrl = *self.ctrl(index);
2714 self.set_ctrl_h2(index, hash);
2715 prev_ctrl
2716 }
2717
2718 /// Sets a control byte, and possibly also the replicated control byte at
2719 /// the end of the array.
2720 ///
2721 /// This function does not make any changes to the `data` parts of the table,
2722 /// or any changes to the the `items` or `growth_left` field of the table.
2723 ///
2724 /// # Safety
2725 ///
2726 /// You must observe the following safety rules when calling this function:
2727 ///
2728 /// * The [`RawTableInner`] has already been allocated;
2729 ///
2730 /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
2731 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
2732 /// be no greater than the number returned by the function [`RawTableInner::buckets`].
2733 ///
2734 /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
2735 ///
2736 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
2737 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2738 ///
2739 /// [`RawTableInner::buckets`]: RawTableInner::buckets
2740 /// [`Bucket::as_ptr`]: Bucket::as_ptr
2741 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2742 #[inline]
2743 unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
2744 // Replicate the first Group::WIDTH control bytes at the end of
2745 // the array without using a branch. If the tables smaller than
2746 // the group width (self.buckets() < Group::WIDTH),
2747 // `index2 = Group::WIDTH + index`, otherwise `index2` is:
2748 //
2749 // - If index >= Group::WIDTH then index == index2.
2750 // - Otherwise index2 == self.bucket_mask + 1 + index.
2751 //
2752 // The very last replicated control byte is never actually read because
2753 // we mask the initial index for unaligned loads, but we write it
2754 // anyways because it makes the set_ctrl implementation simpler.
2755 //
2756 // If there are fewer buckets than Group::WIDTH then this code will
2757 // replicate the buckets at the end of the trailing group. For example
2758 // with 2 buckets and a group size of 4, the control bytes will look
2759 // like this:
2760 //
2761 // Real | Replicated
2762 // ---------------------------------------------
2763 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
2764 // ---------------------------------------------
2765
2766 // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
2767 // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
2768 let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
2769
2770 // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
2771 *self.ctrl(index) = ctrl;
2772 *self.ctrl(index2) = ctrl;
2773 }
2774
2775 /// Returns a pointer to a control byte.
2776 ///
2777 /// # Safety
2778 ///
2779 /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
2780 /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
2781 /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
2782 /// will return a pointer to the end of the allocated table and it is useless on its own.
2783 ///
2784 /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
2785 /// table that has not been allocated results in [`Undefined Behavior`].
2786 ///
2787 /// So to satisfy both requirements you should always follow the rule that
2788 /// `index < self.bucket_mask + 1 + Group::WIDTH`
2789 ///
2790 /// Calling this function on [`RawTableInner`] that are not already allocated is safe
2791 /// for read-only purpose.
2792 ///
2793 /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
2794 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
2795 ///
2796 /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
2797 /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2798 #[inline]
2799 unsafe fn ctrl(&self, index: usize) -> *mut u8 {
2800 debug_assert!(index < self.num_ctrl_bytes());
2801 // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
2802 self.ctrl.as_ptr().add(index)
2803 }
2804
2805 #[inline]
2806 fn buckets(&self) -> usize {
2807 self.bucket_mask + 1
2808 }
2809
2810 /// Checks whether the bucket at `index` is full.
2811 ///
2812 /// # Safety
2813 ///
2814 /// The caller must ensure `index` is less than the number of buckets.
2815 #[inline]
2816 unsafe fn is_bucket_full(&self, index: usize) -> bool {
2817 debug_assert!(index < self.buckets());
2818 is_full(*self.ctrl(index))
2819 }
2820
2821 #[inline]
2822 fn num_ctrl_bytes(&self) -> usize {
2823 self.bucket_mask + 1 + Group::WIDTH
2824 }
2825
2826 #[inline]
2827 fn is_empty_singleton(&self) -> bool {
2828 self.bucket_mask == 0
2829 }
2830
2831 /// Attempts to allocate a new hash table with at least enough capacity
2832 /// for inserting the given number of elements without reallocating,
2833 /// and return it inside ScopeGuard to protect against panic in the hash
2834 /// function.
2835 ///
2836 /// # Note
2837 ///
2838 /// It is recommended (but not required):
2839 ///
2840 /// * That the new table's `capacity` be greater than or equal to `self.items`.
2841 ///
2842 /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
2843 /// to allocate this table.
2844 ///
2845 /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
2846 /// to allocate this table.
2847 ///
2848 /// If `table_layout` does not match the `TableLayout` that was used to allocate
2849 /// this table, then using `mem::swap` with the `self` and the new table returned
2850 /// by this function results in [`undefined behavior`].
2851 ///
2852 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2853 #[allow(clippy::mut_mut)]
2854 #[inline]
2855 fn prepare_resize<'a, A>(
2856 &self,
2857 alloc: &'a A,
2858 table_layout: TableLayout,
2859 capacity: usize,
2860 fallibility: Fallibility,
2861 ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
2862 where
2863 A: Allocator,
2864 {
2865 debug_assert!(self.items <= capacity);
2866
2867 // Allocate and initialize the new table.
2868 let new_table =
2869 RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
2870
2871 // The hash function may panic, in which case we simply free the new
2872 // table without dropping any elements that may have been copied into
2873 // it.
2874 //
2875 // This guard is also used to free the old table on success, see
2876 // the comment at the bottom of this function.
2877 Ok(guard(new_table, move |self_| {
2878 if !self_.is_empty_singleton() {
2879 // SAFETY:
2880 // 1. We have checked that our table is allocated.
2881 // 2. We know for sure that the `alloc` and `table_layout` matches the
2882 // [`Allocator`] and [`TableLayout`] used to allocate this table.
2883 unsafe { self_.free_buckets(alloc, table_layout) };
2884 }
2885 }))
2886 }
2887
2888 /// Reserves or rehashes to make room for `additional` more elements.
2889 ///
2890 /// This uses dynamic dispatch to reduce the amount of
2891 /// code generated, but it is eliminated by LLVM optimizations when inlined.
2892 ///
2893 /// # Safety
2894 ///
2895 /// If any of the following conditions are violated, the result is
2896 /// [`undefined behavior`]:
2897 ///
2898 /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
2899 /// to allocate this table.
2900 ///
2901 /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
2902 /// used to allocate this table.
2903 ///
2904 /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
2905 /// the elements stored in the table.
2906 ///
2907 /// * The [`RawTableInner`] must have properly initialized control bytes.
2908 ///
2909 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
2910 #[allow(clippy::inline_always)]
2911 #[inline(always)]
2912 unsafe fn reserve_rehash_inner<A>(
2913 &mut self,
2914 alloc: &A,
2915 additional: usize,
2916 hasher: &dyn Fn(&mut Self, usize) -> u64,
2917 fallibility: Fallibility,
2918 layout: TableLayout,
2919 drop: Option<fn(*mut u8)>,
2920 ) -> Result<(), TryReserveError>
2921 where
2922 A: Allocator,
2923 {
2924 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
2925 let new_items = match self.items.checked_add(additional) {
2926 Some(new_items) => new_items,
2927 None => return Err(fallibility.capacity_overflow()),
2928 };
2929 let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
2930 if new_items <= full_capacity / 2 {
2931 // Rehash in-place without re-allocating if we have plenty of spare
2932 // capacity that is locked up due to DELETED entries.
2933
2934 // SAFETY:
2935 // 1. We know for sure that `[`RawTableInner`]` has already been allocated
2936 // (since new_items <= full_capacity / 2);
2937 // 2. The caller ensures that `drop` function is the actual drop function of
2938 // the elements stored in the table.
2939 // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
2940 // used to allocate this table.
2941 // 4. The caller ensures that the control bytes of the `RawTableInner`
2942 // are already initialized.
2943 self.rehash_in_place(hasher, layout.size, drop);
2944 Ok(())
2945 } else {
2946 // Otherwise, conservatively resize to at least the next size up
2947 // to avoid churning deletes into frequent rehashes.
2948 //
2949 // SAFETY:
2950 // 1. We know for sure that `capacity >= self.items`.
2951 // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
2952 // [`TableLayout`] that were used to allocate this table.
2953 // 3. The caller ensures that the control bytes of the `RawTableInner`
2954 // are already initialized.
2955 self.resize_inner(
2956 alloc,
2957 usize::max(new_items, full_capacity + 1),
2958 hasher,
2959 fallibility,
2960 layout,
2961 )
2962 }
2963 }
2964
2965 /// Returns an iterator over full buckets indices in the table.
2966 ///
2967 /// # Safety
2968 ///
2969 /// Behavior is undefined if any of the following conditions are violated:
2970 ///
2971 /// * The caller has to ensure that the `RawTableInner` outlives the
2972 /// `FullBucketsIndices`. Because we cannot make the `next` method
2973 /// unsafe on the `FullBucketsIndices` struct, we have to make the
2974 /// `full_buckets_indices` method unsafe.
2975 ///
2976 /// * The [`RawTableInner`] must have properly initialized control bytes.
2977 #[inline(always)]
2978 unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
2979 // SAFETY:
2980 // 1. Since the caller of this function ensures that the control bytes
2981 // are properly initialized and `self.ctrl(0)` points to the start
2982 // of the array of control bytes, therefore: `ctrl` is valid for reads,
2983 // properly aligned to `Group::WIDTH` and points to the properly initialized
2984 // control bytes.
2985 // 2. The value of `items` is equal to the amount of data (values) added
2986 // to the table.
2987 //
2988 // `ctrl` points here (to the start
2989 // of the first control byte `CT0`)
2990 // ∨
2991 // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
2992 // \________ ________/
2993 // \/
2994 // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
2995 //
2996 // where: T0...T_n - our stored data;
2997 // CT0...CT_n - control bytes or metadata for `data`.
2998 let ctrl = NonNull::new_unchecked(self.ctrl(0));
2999
3000 FullBucketsIndices {
3001 // Load the first group
3002 // SAFETY: See explanation above.
3003 current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
3004 group_first_index: 0,
3005 ctrl,
3006 items: self.items,
3007 }
3008 }
3009
3010 /// Allocates a new table of a different size and moves the contents of the
3011 /// current table into it.
3012 ///
3013 /// This uses dynamic dispatch to reduce the amount of
3014 /// code generated, but it is eliminated by LLVM optimizations when inlined.
3015 ///
3016 /// # Safety
3017 ///
3018 /// If any of the following conditions are violated, the result is
3019 /// [`undefined behavior`]:
3020 ///
3021 /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
3022 /// to allocate this table;
3023 ///
3024 /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
3025 /// used to allocate this table;
3026 ///
3027 /// * The [`RawTableInner`] must have properly initialized control bytes.
3028 ///
3029 /// The caller of this function must ensure that `capacity >= self.items`
3030 /// otherwise:
3031 ///
3032 /// * If `self.items != 0`, calling of this function with `capacity == 0`
3033 /// results in [`undefined behavior`].
3034 ///
3035 /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
3036 /// `self.items > capacity_to_buckets(capacity)` calling this function
3037 /// results in [`undefined behavior`].
3038 ///
3039 /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
3040 /// `self.items > capacity_to_buckets(capacity)` calling this function
3041 /// are never return (will go into an infinite loop).
3042 ///
3043 /// Note: It is recommended (but not required) that the new table's `capacity`
3044 /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
3045 /// this function can never return. See [`RawTableInner::find_insert_slot`] for
3046 /// more information.
3047 ///
3048 /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
3049 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3050 #[allow(clippy::inline_always)]
3051 #[inline(always)]
3052 unsafe fn resize_inner<A>(
3053 &mut self,
3054 alloc: &A,
3055 capacity: usize,
3056 hasher: &dyn Fn(&mut Self, usize) -> u64,
3057 fallibility: Fallibility,
3058 layout: TableLayout,
3059 ) -> Result<(), TryReserveError>
3060 where
3061 A: Allocator,
3062 {
3063 // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
3064 // that were used to allocate this table.
3065 let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
3066
3067 // SAFETY: We know for sure that RawTableInner will outlive the
3068 // returned `FullBucketsIndices` iterator, and the caller of this
3069 // function ensures that the control bytes are properly initialized.
3070 for full_byte_index in self.full_buckets_indices() {
3071 // This may panic.
3072 let hash = hasher(self, full_byte_index);
3073
3074 // SAFETY:
3075 // We can use a simpler version of insert() here since:
3076 // 1. There are no DELETED entries.
3077 // 2. We know there is enough space in the table.
3078 // 3. All elements are unique.
3079 // 4. The caller of this function guarantees that `capacity > 0`
3080 // so `new_table` must already have some allocated memory.
3081 // 5. We set `growth_left` and `items` fields of the new table
3082 // after the loop.
3083 // 6. We insert into the table, at the returned index, the data
3084 // matching the given hash immediately after calling this function.
3085 let (new_index, _) = new_table.prepare_insert_slot(hash);
3086
3087 // SAFETY:
3088 //
3089 // * `src` is valid for reads of `layout.size` bytes, since the
3090 // table is alive and the `full_byte_index` is guaranteed to be
3091 // within bounds (see `FullBucketsIndices::next_impl`);
3092 //
3093 // * `dst` is valid for writes of `layout.size` bytes, since the
3094 // caller ensures that `table_layout` matches the [`TableLayout`]
3095 // that was used to allocate old table and we have the `new_index`
3096 // returned by `prepare_insert_slot`.
3097 //
3098 // * Both `src` and `dst` are properly aligned.
3099 //
3100 // * Both `src` and `dst` point to different region of memory.
3101 ptr::copy_nonoverlapping(
3102 self.bucket_ptr(full_byte_index, layout.size),
3103 new_table.bucket_ptr(new_index, layout.size),
3104 layout.size,
3105 );
3106 }
3107
3108 // The hash function didn't panic, so we can safely set the
3109 // `growth_left` and `items` fields of the new table.
3110 new_table.growth_left -= self.items;
3111 new_table.items = self.items;
3112
3113 // We successfully copied all elements without panicking. Now replace
3114 // self with the new table. The old table will have its memory freed but
3115 // the items will not be dropped (since they have been moved into the
3116 // new table).
3117 // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
3118 // that was used to allocate this table.
3119 mem::swap(self, &mut new_table);
3120
3121 Ok(())
3122 }
3123
3124 /// Rehashes the contents of the table in place (i.e. without changing the
3125 /// allocation).
3126 ///
3127 /// If `hasher` panics then some the table's contents may be lost.
3128 ///
3129 /// This uses dynamic dispatch to reduce the amount of
3130 /// code generated, but it is eliminated by LLVM optimizations when inlined.
3131 ///
3132 /// # Safety
3133 ///
3134 /// If any of the following conditions are violated, the result is [`undefined behavior`]:
3135 ///
3136 /// * The `size_of` must be equal to the size of the elements stored in the table;
3137 ///
3138 /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
3139 /// the elements stored in the table.
3140 ///
3141 /// * The [`RawTableInner`] has already been allocated;
3142 ///
3143 /// * The [`RawTableInner`] must have properly initialized control bytes.
3144 ///
3145 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3146 #[allow(clippy::inline_always)]
3147 #[cfg_attr(feature = "inline-more", inline(always))]
3148 #[cfg_attr(not(feature = "inline-more"), inline)]
3149 unsafe fn rehash_in_place(
3150 &mut self,
3151 hasher: &dyn Fn(&mut Self, usize) -> u64,
3152 size_of: usize,
3153 drop: Option<fn(*mut u8)>,
3154 ) {
3155 // If the hash function panics then properly clean up any elements
3156 // that we haven't rehashed yet. We unfortunately can't preserve the
3157 // element since we lost their hash and have no way of recovering it
3158 // without risking another panic.
3159 self.prepare_rehash_in_place();
3160
3161 let mut guard = guard(self, move |self_| {
3162 if let Some(drop) = drop {
3163 for i in 0..self_.buckets() {
3164 if *self_.ctrl(i) == DELETED {
3165 self_.set_ctrl(i, EMPTY);
3166 drop(self_.bucket_ptr(i, size_of));
3167 self_.items -= 1;
3168 }
3169 }
3170 }
3171 self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
3172 });
3173
3174 // At this point, DELETED elements are elements that we haven't
3175 // rehashed yet. Find them and re-insert them at their ideal
3176 // position.
3177 'outer: for i in 0..guard.buckets() {
3178 if *guard.ctrl(i) != DELETED {
3179 continue;
3180 }
3181
3182 let i_p = guard.bucket_ptr(i, size_of);
3183
3184 'inner: loop {
3185 // Hash the current item
3186 let hash = hasher(*guard, i);
3187
3188 // Search for a suitable place to put it
3189 //
3190 // SAFETY: Caller of this function ensures that the control bytes
3191 // are properly initialized.
3192 let new_i = guard.find_insert_slot(hash).index;
3193
3194 // Probing works by scanning through all of the control
3195 // bytes in groups, which may not be aligned to the group
3196 // size. If both the new and old position fall within the
3197 // same unaligned group, then there is no benefit in moving
3198 // it and we can just continue to the next item.
3199 if likely(guard.is_in_same_group(i, new_i, hash)) {
3200 guard.set_ctrl_h2(i, hash);
3201 continue 'outer;
3202 }
3203
3204 let new_i_p = guard.bucket_ptr(new_i, size_of);
3205
3206 // We are moving the current item to a new position. Write
3207 // our H2 to the control byte of the new position.
3208 let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
3209 if prev_ctrl == EMPTY {
3210 guard.set_ctrl(i, EMPTY);
3211 // If the target slot is empty, simply move the current
3212 // element into the new slot and clear the old control
3213 // byte.
3214 ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
3215 continue 'outer;
3216 } else {
3217 // If the target slot is occupied, swap the two elements
3218 // and then continue processing the element that we just
3219 // swapped into the old slot.
3220 debug_assert_eq!(prev_ctrl, DELETED);
3221 ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
3222 continue 'inner;
3223 }
3224 }
3225 }
3226
3227 guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
3228
3229 mem::forget(guard);
3230 }
3231
3232 /// Deallocates the table without dropping any entries.
3233 ///
3234 /// # Note
3235 ///
3236 /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
3237 /// else it can lead to leaking of memory. Also calling this function automatically
3238 /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
3239 /// (dangling) the `ctrl` field of the table.
3240 ///
3241 /// # Safety
3242 ///
3243 /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
3244 ///
3245 /// * The [`RawTableInner`] has already been allocated;
3246 ///
3247 /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
3248 /// to allocate this table.
3249 ///
3250 /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
3251 /// to allocate this table.
3252 ///
3253 /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
3254 ///
3255 /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3256 /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3257 /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3258 #[inline]
3259 unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
3260 where
3261 A: Allocator,
3262 {
3263 // SAFETY: The caller must uphold the safety contract for `free_buckets`
3264 // method.
3265 let (ptr, layout) = self.allocation_info(table_layout);
3266 alloc.deallocate(ptr, layout);
3267 }
3268
3269 /// Returns a pointer to the allocated memory and the layout that was used to
3270 /// allocate the table.
3271 ///
3272 /// # Safety
3273 ///
3274 /// Caller of this function must observe the following safety rules:
3275 ///
3276 /// * The [`RawTableInner`] has already been allocated, otherwise
3277 /// calling this function results in [`undefined behavior`]
3278 ///
3279 /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3280 /// that was used to allocate this table. Failure to comply with this condition
3281 /// may result in [`undefined behavior`].
3282 ///
3283 /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
3284 ///
3285 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3286 /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3287 /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3288 #[inline]
3289 unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3290 debug_assert!(
3291 !self.is_empty_singleton(),
3292 "this function can only be called on non-empty tables"
3293 );
3294
3295 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
3296 let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
3297 Some(lco) => lco,
3298 None => unsafe { hint::unreachable_unchecked() },
3299 };
3300 (
3301 // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
3302 unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
3303 layout,
3304 )
3305 }
3306
3307 /// Returns a pointer to the allocated memory and the layout that was used to
3308 /// allocate the table. If [`RawTableInner`] has not been allocated, this
3309 /// function return `dangling` pointer and `()` (unit) layout.
3310 ///
3311 /// # Safety
3312 ///
3313 /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
3314 /// that was used to allocate this table. Failure to comply with this condition
3315 /// may result in [`undefined behavior`].
3316 ///
3317 /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
3318 ///
3319 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3320 /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
3321 /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
3322 #[cfg(feature = "raw")]
3323 unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
3324 if self.is_empty_singleton() {
3325 (NonNull::dangling(), Layout::new::<()>())
3326 } else {
3327 // SAFETY:
3328 // 1. We have checked that our table is allocated.
3329 // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
3330 // that was used to allocate this table.
3331 unsafe { self.allocation_info(table_layout) }
3332 }
3333 }
3334
3335 /// Marks all table buckets as empty without dropping their contents.
3336 #[inline]
3337 fn clear_no_drop(&mut self) {
3338 if !self.is_empty_singleton() {
3339 unsafe {
3340 self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
3341 }
3342 }
3343 self.items = 0;
3344 self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
3345 }
3346
3347 /// Erases the [`Bucket`]'s control byte at the given index so that it does not
3348 /// triggered as full, decreases the `items` of the table and, if it can be done,
3349 /// increases `self.growth_left`.
3350 ///
3351 /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
3352 /// does not make any changes to the `data` parts of the table. The caller of this
3353 /// function must take care to properly drop the `data`, otherwise calling this
3354 /// function may result in a memory leak.
3355 ///
3356 /// # Safety
3357 ///
3358 /// You must observe the following safety rules when calling this function:
3359 ///
3360 /// * The [`RawTableInner`] has already been allocated;
3361 ///
3362 /// * It must be the full control byte at the given position;
3363 ///
3364 /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
3365 /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
3366 /// be no greater than the number returned by the function [`RawTableInner::buckets`].
3367 ///
3368 /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
3369 ///
3370 /// Calling this function on a table with no elements is unspecified, but calling subsequent
3371 /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
3372 /// (`self.items -= 1 cause overflow when self.items == 0`).
3373 ///
3374 /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
3375 /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
3376 ///
3377 /// [`RawTableInner::buckets`]: RawTableInner::buckets
3378 /// [`Bucket::as_ptr`]: Bucket::as_ptr
3379 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3380 #[inline]
3381 unsafe fn erase(&mut self, index: usize) {
3382 debug_assert!(self.is_bucket_full(index));
3383
3384 // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
3385 // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
3386 let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
3387 // SAFETY:
3388 // - The caller must uphold the safety contract for `erase` method;
3389 // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
3390 let empty_before = Group::load(self.ctrl(index_before)).match_empty();
3391 let empty_after = Group::load(self.ctrl(index)).match_empty();
3392
3393 // Inserting and searching in the map is performed by two key functions:
3394 //
3395 // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
3396 // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
3397 // slot immediately in the first group, it jumps to the next `Group` looking for it,
3398 // and so on until it has gone through all the groups in the control bytes.
3399 //
3400 // - The `find_inner` function that looks for the index of the desired element by looking
3401 // at all the `FULL` bytes in the group. If it did not find the element right away, and
3402 // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
3403 // function may have found a suitable slot in the next group. Therefore, `find_inner`
3404 // jumps further, and if it does not find the desired element and again there is no `EMPTY`
3405 // byte, then it jumps further, and so on. The search stops only if `find_inner` function
3406 // finds the desired element or hits an `EMPTY` slot/byte.
3407 //
3408 // Accordingly, this leads to two consequences:
3409 //
3410 // - The map must have `EMPTY` slots (bytes);
3411 //
3412 // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
3413 // function may stumble upon an `EMPTY` byte before finding the desired element and stop
3414 // searching.
3415 //
3416 // Thus it is necessary to check all bytes after and before the erased element. If we are in
3417 // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
3418 // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
3419 // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
3420 // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
3421 // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
3422 //
3423 // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
3424 // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
3425 // cannot have `DELETED` bytes.
3426 //
3427 // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
3428 // `trailing_zeros` refers to the bytes at the beginning of a group.
3429 let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
3430 DELETED
3431 } else {
3432 self.growth_left += 1;
3433 EMPTY
3434 };
3435 // SAFETY: the caller must uphold the safety contract for `erase` method.
3436 self.set_ctrl(index, ctrl);
3437 self.items -= 1;
3438 }
3439}
3440
3441impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
3442 fn clone(&self) -> Self {
3443 if self.table.is_empty_singleton() {
3444 Self::new_in(self.alloc.clone())
3445 } else {
3446 unsafe {
3447 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
3448 //
3449 // SAFETY: This is safe as we are taking the size of an already allocated table
3450 // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
3451 // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
3452 let mut new_table = match Self::new_uninitialized(
3453 self.alloc.clone(),
3454 self.table.buckets(),
3455 Fallibility::Infallible,
3456 ) {
3457 Ok(table) => table,
3458 Err(_) => hint::unreachable_unchecked(),
3459 };
3460
3461 // Cloning elements may fail (the clone function may panic). But we don't
3462 // need to worry about uninitialized control bits, since:
3463 // 1. The number of items (elements) in the table is zero, which means that
3464 // the control bits will not be readed by Drop function.
3465 // 2. The `clone_from_spec` method will first copy all control bits from
3466 // `self` (thus initializing them). But this will not affect the `Drop`
3467 // function, since the `clone_from_spec` function sets `items` only after
3468 // successfully clonning all elements.
3469 new_table.clone_from_spec(self);
3470 new_table
3471 }
3472 }
3473 }
3474
3475 fn clone_from(&mut self, source: &Self) {
3476 if source.table.is_empty_singleton() {
3477 let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
3478 unsafe {
3479 // SAFETY:
3480 // 1. We call the function only once;
3481 // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3482 // and [`TableLayout`] that were used to allocate this table.
3483 // 3. If any elements' drop function panics, then there will only be a memory leak,
3484 // because we have replaced the inner table with a new one.
3485 old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3486 }
3487 } else {
3488 unsafe {
3489 // Make sure that if any panics occurs, we clear the table and
3490 // leave it in an empty state.
3491 let mut self_ = guard(self, |self_| {
3492 self_.clear_no_drop();
3493 });
3494
3495 // First, drop all our elements without clearing the control
3496 // bytes. If this panics then the scope guard will clear the
3497 // table, leaking any elements that were not dropped yet.
3498 //
3499 // This leak is unavoidable: we can't try dropping more elements
3500 // since this could lead to another panic and abort the process.
3501 //
3502 // SAFETY: If something gets wrong we clear our table right after
3503 // dropping the elements, so there is no double drop, since `items`
3504 // will be equal to zero.
3505 self_.table.drop_elements::<T>();
3506
3507 // If necessary, resize our table to match the source.
3508 if self_.buckets() != source.buckets() {
3509 let new_inner = match RawTableInner::new_uninitialized(
3510 &self_.alloc,
3511 Self::TABLE_LAYOUT,
3512 source.buckets(),
3513 Fallibility::Infallible,
3514 ) {
3515 Ok(table) => table,
3516 Err(_) => hint::unreachable_unchecked(),
3517 };
3518 // Replace the old inner with new uninitialized one. It's ok, since if something gets
3519 // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
3520 let mut old_inner = mem::replace(&mut self_.table, new_inner);
3521 if !old_inner.is_empty_singleton() {
3522 // SAFETY:
3523 // 1. We have checked that our table is allocated.
3524 // 2. We know for sure that `alloc` and `table_layout` matches
3525 // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
3526 old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
3527 }
3528 }
3529
3530 // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
3531 // inside the `clone_from_impl` function will take care of that, dropping all
3532 // cloned elements if necessary. Our `ScopeGuard` will clear the table.
3533 self_.clone_from_spec(source);
3534
3535 // Disarm the scope guard if cloning was successful.
3536 ScopeGuard::into_inner(self_);
3537 }
3538 }
3539 }
3540}
3541
3542/// Specialization of `clone_from` for `Copy` types
3543trait RawTableClone {
3544 unsafe fn clone_from_spec(&mut self, source: &Self);
3545}
3546impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3547 default_fn! {
3548 #[cfg_attr(feature = "inline-more", inline)]
3549 unsafe fn clone_from_spec(&mut self, source: &Self) {
3550 self.clone_from_impl(source);
3551 }
3552 }
3553}
3554#[cfg(feature = "nightly")]
3555impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
3556 #[cfg_attr(feature = "inline-more", inline)]
3557 unsafe fn clone_from_spec(&mut self, source: &Self) {
3558 source
3559 .table
3560 .ctrl(0)
3561 .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3562 source
3563 .data_start()
3564 .as_ptr()
3565 .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
3566
3567 self.table.items = source.table.items;
3568 self.table.growth_left = source.table.growth_left;
3569 }
3570}
3571
3572impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
3573 /// Common code for clone and clone_from. Assumes:
3574 /// - `self.buckets() == source.buckets()`.
3575 /// - Any existing elements have been dropped.
3576 /// - The control bytes are not initialized yet.
3577 #[cfg_attr(feature = "inline-more", inline)]
3578 unsafe fn clone_from_impl(&mut self, source: &Self) {
3579 // Copy the control bytes unchanged. We do this in a single pass
3580 source
3581 .table
3582 .ctrl(0)
3583 .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
3584
3585 // The cloning of elements may panic, in which case we need
3586 // to make sure we drop only the elements that have been
3587 // cloned so far.
3588 let mut guard = guard((0, &mut *self), |(index, self_)| {
3589 if T::NEEDS_DROP {
3590 for i in 0..=*index {
3591 if self_.is_bucket_full(i) {
3592 self_.bucket(i).drop();
3593 }
3594 }
3595 }
3596 });
3597
3598 for from in source.iter() {
3599 let index = source.bucket_index(&from);
3600 let to = guard.1.bucket(index);
3601 to.write(from.as_ref().clone());
3602
3603 // Update the index in case we need to unwind.
3604 guard.0 = index;
3605 }
3606
3607 // Successfully cloned all items, no need to clean up.
3608 mem::forget(guard);
3609
3610 self.table.items = source.table.items;
3611 self.table.growth_left = source.table.growth_left;
3612 }
3613
3614 /// Variant of `clone_from` to use when a hasher is available.
3615 #[cfg(feature = "raw")]
3616 pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
3617 // If we have enough capacity in the table, just clear it and insert
3618 // elements one by one. We don't do this if we have the same number of
3619 // buckets as the source since we can just copy the contents directly
3620 // in that case.
3621 if self.table.buckets() != source.table.buckets()
3622 && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len()
3623 {
3624 self.clear();
3625
3626 let mut guard_self = guard(&mut *self, |self_| {
3627 // Clear the partially copied table if a panic occurs, otherwise
3628 // items and growth_left will be out of sync with the contents
3629 // of the table.
3630 self_.clear();
3631 });
3632
3633 unsafe {
3634 for item in source.iter() {
3635 // This may panic.
3636 let item = item.as_ref().clone();
3637 let hash = hasher(&item);
3638
3639 // We can use a simpler version of insert() here since:
3640 // - there are no DELETED entries.
3641 // - we know there is enough space in the table.
3642 // - all elements are unique.
3643 let (index, _) = guard_self.table.prepare_insert_slot(hash);
3644 guard_self.bucket(index).write(item);
3645 }
3646 }
3647
3648 // Successfully cloned all items, no need to clean up.
3649 mem::forget(guard_self);
3650
3651 self.table.items = source.table.items;
3652 self.table.growth_left -= source.table.items;
3653 } else {
3654 self.clone_from(source);
3655 }
3656 }
3657}
3658
3659impl<T, A: Allocator + Default> Default for RawTable<T, A> {
3660 #[inline]
3661 fn default() -> Self {
3662 Self::new_in(alloc:Default::default())
3663 }
3664}
3665
3666#[cfg(feature = "nightly")]
3667unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
3668 #[cfg_attr(feature = "inline-more", inline)]
3669 fn drop(&mut self) {
3670 unsafe {
3671 // SAFETY:
3672 // 1. We call the function only once;
3673 // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3674 // and [`TableLayout`] that were used to allocate this table.
3675 // 3. If the drop function of any elements fails, then only a memory leak will occur,
3676 // and we don't care because we are inside the `Drop` function of the `RawTable`,
3677 // so there won't be any table left in an inconsistent state.
3678 self.table
3679 .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3680 }
3681 }
3682}
3683#[cfg(not(feature = "nightly"))]
3684impl<T, A: Allocator> Drop for RawTable<T, A> {
3685 #[cfg_attr(feature = "inline-more", inline)]
3686 fn drop(&mut self) {
3687 unsafe {
3688 // SAFETY:
3689 // 1. We call the function only once;
3690 // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
3691 // and [`TableLayout`] that were used to allocate this table.
3692 // 3. If the drop function of any elements fails, then only a memory leak will occur,
3693 // and we don't care because we are inside the `Drop` function of the `RawTable`,
3694 // so there won't be any table left in an inconsistent state.
3695 self.table
3696 .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
3697 }
3698 }
3699}
3700
3701impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
3702 type Item = T;
3703 type IntoIter = RawIntoIter<T, A>;
3704
3705 #[cfg_attr(feature = "inline-more", inline)]
3706 fn into_iter(self) -> RawIntoIter<T, A> {
3707 unsafe {
3708 let iter: RawIter = self.iter();
3709 self.into_iter_from(iter)
3710 }
3711 }
3712}
3713
3714/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
3715/// not track an item count.
3716pub(crate) struct RawIterRange<T> {
3717 // Mask of full buckets in the current group. Bits are cleared from this
3718 // mask as each element is processed.
3719 current_group: BitMaskIter,
3720
3721 // Pointer to the buckets for the current group.
3722 data: Bucket<T>,
3723
3724 // Pointer to the next group of control bytes,
3725 // Must be aligned to the group size.
3726 next_ctrl: *const u8,
3727
3728 // Pointer one past the last control byte of this range.
3729 end: *const u8,
3730}
3731
3732impl<T> RawIterRange<T> {
3733 /// Returns a `RawIterRange` covering a subset of a table.
3734 ///
3735 /// # Safety
3736 ///
3737 /// If any of the following conditions are violated, the result is
3738 /// [`undefined behavior`]:
3739 ///
3740 /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
3741 ///
3742 /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
3743 ///
3744 /// * `ctrl` must point to the array of properly initialized control bytes;
3745 ///
3746 /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
3747 ///
3748 /// * the value of `len` must be less than or equal to the number of table buckets,
3749 /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
3750 /// must be positive.
3751 ///
3752 /// * The `ctrl.add(len)` pointer must be either in bounds or one
3753 /// byte past the end of the same [allocated table].
3754 ///
3755 /// * The `len` must be a power of two.
3756 ///
3757 /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
3758 /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3759 #[cfg_attr(feature = "inline-more", inline)]
3760 unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
3761 debug_assert_ne!(len, 0);
3762 debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
3763 // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3764 let end = ctrl.add(len);
3765
3766 // Load the first group and advance ctrl to point to the next group
3767 // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
3768 let current_group = Group::load_aligned(ctrl).match_full();
3769 let next_ctrl = ctrl.add(Group::WIDTH);
3770
3771 Self {
3772 current_group: current_group.into_iter(),
3773 data,
3774 next_ctrl,
3775 end,
3776 }
3777 }
3778
3779 /// Splits a `RawIterRange` into two halves.
3780 ///
3781 /// Returns `None` if the remaining range is smaller than or equal to the
3782 /// group width.
3783 #[cfg_attr(feature = "inline-more", inline)]
3784 #[cfg(feature = "rayon")]
3785 pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
3786 unsafe {
3787 if self.end <= self.next_ctrl {
3788 // Nothing to split if the group that we are current processing
3789 // is the last one.
3790 (self, None)
3791 } else {
3792 // len is the remaining number of elements after the group that
3793 // we are currently processing. It must be a multiple of the
3794 // group size (small tables are caught by the check above).
3795 let len = offset_from(self.end, self.next_ctrl);
3796 debug_assert_eq!(len % Group::WIDTH, 0);
3797
3798 // Split the remaining elements into two halves, but round the
3799 // midpoint down in case there is an odd number of groups
3800 // remaining. This ensures that:
3801 // - The tail is at least 1 group long.
3802 // - The split is roughly even considering we still have the
3803 // current group to process.
3804 let mid = (len / 2) & !(Group::WIDTH - 1);
3805
3806 let tail = Self::new(
3807 self.next_ctrl.add(mid),
3808 self.data.next_n(Group::WIDTH).next_n(mid),
3809 len - mid,
3810 );
3811 debug_assert_eq!(
3812 self.data.next_n(Group::WIDTH).next_n(mid).ptr,
3813 tail.data.ptr
3814 );
3815 debug_assert_eq!(self.end, tail.end);
3816 self.end = self.next_ctrl.add(mid);
3817 debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
3818 (self, Some(tail))
3819 }
3820 }
3821 }
3822
3823 /// # Safety
3824 /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate
3825 /// after yielding all elements.
3826 #[cfg_attr(feature = "inline-more", inline)]
3827 unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
3828 loop {
3829 if let Some(index) = self.current_group.next() {
3830 return Some(self.data.next_n(index));
3831 }
3832
3833 if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end {
3834 return None;
3835 }
3836
3837 // We might read past self.end up to the next group boundary,
3838 // but this is fine because it only occurs on tables smaller
3839 // than the group size where the trailing control bytes are all
3840 // EMPTY. On larger tables self.end is guaranteed to be aligned
3841 // to the group size (since tables are power-of-two sized).
3842 self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3843 self.data = self.data.next_n(Group::WIDTH);
3844 self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3845 }
3846 }
3847
3848 /// Folds every element into an accumulator by applying an operation,
3849 /// returning the final result.
3850 ///
3851 /// `fold_impl()` takes three arguments: the number of items remaining in
3852 /// the iterator, an initial value, and a closure with two arguments: an
3853 /// 'accumulator', and an element. The closure returns the value that the
3854 /// accumulator should have for the next iteration.
3855 ///
3856 /// The initial value is the value the accumulator will have on the first call.
3857 ///
3858 /// After applying this closure to every element of the iterator, `fold_impl()`
3859 /// returns the accumulator.
3860 ///
3861 /// # Safety
3862 ///
3863 /// If any of the following conditions are violated, the result is
3864 /// [`Undefined Behavior`]:
3865 ///
3866 /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
3867 /// i.e. table outlives the `RawIterRange`;
3868 ///
3869 /// * The provided `n` value must match the actual number of items
3870 /// in the table.
3871 ///
3872 /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
3873 #[allow(clippy::while_let_on_iterator)]
3874 #[cfg_attr(feature = "inline-more", inline)]
3875 unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
3876 where
3877 F: FnMut(B, Bucket<T>) -> B,
3878 {
3879 loop {
3880 while let Some(index) = self.current_group.next() {
3881 // The returned `index` will always be in the range `0..Group::WIDTH`,
3882 // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
3883 debug_assert!(n != 0);
3884 let bucket = self.data.next_n(index);
3885 acc = f(acc, bucket);
3886 n -= 1;
3887 }
3888
3889 if n == 0 {
3890 return acc;
3891 }
3892
3893 // SAFETY: The caller of this function ensures that:
3894 //
3895 // 1. The provided `n` value matches the actual number of items in the table;
3896 // 2. The table is alive and did not moved.
3897 //
3898 // Taking the above into account, we always stay within the bounds, because:
3899 //
3900 // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
3901 // we will never end up in the given branch, since we should have already
3902 // yielded all the elements of the table.
3903 //
3904 // 2. For tables larger than the group width. The the number of buckets is a
3905 // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse
3906 // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
3907 // the start of the array of control bytes, and never try to iterate after
3908 // getting all the elements, the last `self.current_group` will read bytes
3909 // from the `self.buckets() - Group::WIDTH` index. We know also that
3910 // `self.current_group.next()` will always retun indices within the range
3911 // `0..Group::WIDTH`.
3912 //
3913 // Knowing all of the above and taking into account that we are synchronizing
3914 // the `self.data` index with the index we used to read the `self.current_group`,
3915 // the subsequent `self.data.next_n(index)` will always return a bucket with
3916 // an index number less than `self.buckets()`.
3917 //
3918 // The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
3919 // actually be read, since we should have already yielded all the elements of
3920 // the table.
3921 self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
3922 self.data = self.data.next_n(Group::WIDTH);
3923 self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
3924 }
3925 }
3926}
3927
3928// We make raw iterators unconditionally Send and Sync, and let the PhantomData
3929// in the actual iterator implementations determine the real Send/Sync bounds.
3930unsafe impl<T> Send for RawIterRange<T> {}
3931unsafe impl<T> Sync for RawIterRange<T> {}
3932
3933impl<T> Clone for RawIterRange<T> {
3934 #[cfg_attr(feature = "inline-more", inline)]
3935 fn clone(&self) -> Self {
3936 Self {
3937 data: self.data.clone(),
3938 next_ctrl: self.next_ctrl,
3939 current_group: self.current_group,
3940 end: self.end,
3941 }
3942 }
3943}
3944
3945impl<T> Iterator for RawIterRange<T> {
3946 type Item = Bucket<T>;
3947
3948 #[cfg_attr(feature = "inline-more", inline)]
3949 fn next(&mut self) -> Option<Bucket<T>> {
3950 unsafe {
3951 // SAFETY: We set checker flag to true.
3952 self.next_impl::<true>()
3953 }
3954 }
3955
3956 #[inline]
3957 fn size_hint(&self) -> (usize, Option<usize>) {
3958 // We don't have an item count, so just guess based on the range size.
3959 let remaining_buckets: usize = if self.end > self.next_ctrl {
3960 unsafe { offset_from(self.end, self.next_ctrl) }
3961 } else {
3962 0
3963 };
3964
3965 // Add a group width to include the group we are currently processing.
3966 (0, Some(Group::WIDTH + remaining_buckets))
3967 }
3968}
3969
3970impl<T> FusedIterator for RawIterRange<T> {}
3971
3972/// Iterator which returns a raw pointer to every full bucket in the table.
3973///
3974/// For maximum flexibility this iterator is not bound by a lifetime, but you
3975/// must observe several rules when using it:
3976/// - You must not free the hash table while iterating (including via growing/shrinking).
3977/// - It is fine to erase a bucket that has been yielded by the iterator.
3978/// - Erasing a bucket that has not yet been yielded by the iterator may still
3979/// result in the iterator yielding that bucket (unless `reflect_remove` is called).
3980/// - It is unspecified whether an element inserted after the iterator was
3981/// created will be yielded by that iterator (unless `reflect_insert` is called).
3982/// - The order in which the iterator yields bucket is unspecified and may
3983/// change in the future.
3984pub struct RawIter<T> {
3985 pub(crate) iter: RawIterRange<T>,
3986 items: usize,
3987}
3988
3989impl<T> RawIter<T> {
3990 /// Refresh the iterator so that it reflects a removal from the given bucket.
3991 ///
3992 /// For the iterator to remain valid, this method must be called once
3993 /// for each removed bucket before `next` is called again.
3994 ///
3995 /// This method should be called _before_ the removal is made. It is not necessary to call this
3996 /// method if you are removing an item that this iterator yielded in the past.
3997 #[cfg(feature = "raw")]
3998 pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
3999 self.reflect_toggle_full(b, false);
4000 }
4001
4002 /// Refresh the iterator so that it reflects an insertion into the given bucket.
4003 ///
4004 /// For the iterator to remain valid, this method must be called once
4005 /// for each insert before `next` is called again.
4006 ///
4007 /// This method does not guarantee that an insertion of a bucket with a greater
4008 /// index than the last one yielded will be reflected in the iterator.
4009 ///
4010 /// This method should be called _after_ the given insert is made.
4011 #[cfg(feature = "raw")]
4012 pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
4013 self.reflect_toggle_full(b, true);
4014 }
4015
4016 /// Refresh the iterator so that it reflects a change to the state of the given bucket.
4017 #[cfg(feature = "raw")]
4018 unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
4019 if b.as_ptr() > self.iter.data.as_ptr() {
4020 // The iterator has already passed the bucket's group.
4021 // So the toggle isn't relevant to this iterator.
4022 return;
4023 }
4024
4025 if self.iter.next_ctrl < self.iter.end
4026 && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
4027 {
4028 // The iterator has not yet reached the bucket's group.
4029 // We don't need to reload anything, but we do need to adjust the item count.
4030
4031 if cfg!(debug_assertions) {
4032 // Double-check that the user isn't lying to us by checking the bucket state.
4033 // To do that, we need to find its control byte. We know that self.iter.data is
4034 // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
4035 let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
4036 let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
4037 // This method should be called _before_ a removal, or _after_ an insert,
4038 // so in both cases the ctrl byte should indicate that the bucket is full.
4039 assert!(is_full(*ctrl));
4040 }
4041
4042 if is_insert {
4043 self.items += 1;
4044 } else {
4045 self.items -= 1;
4046 }
4047
4048 return;
4049 }
4050
4051 // The iterator is at the bucket group that the toggled bucket is in.
4052 // We need to do two things:
4053 //
4054 // - Determine if the iterator already yielded the toggled bucket.
4055 // If it did, we're done.
4056 // - Otherwise, update the iterator cached group so that it won't
4057 // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
4058 // We'll also need to update the item count accordingly.
4059 if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
4060 let next_bucket = self.iter.data.next_n(index);
4061 if b.as_ptr() > next_bucket.as_ptr() {
4062 // The toggled bucket is "before" the bucket the iterator would yield next. We
4063 // therefore don't need to do anything --- the iterator has already passed the
4064 // bucket in question.
4065 //
4066 // The item count must already be correct, since a removal or insert "prior" to
4067 // the iterator's position wouldn't affect the item count.
4068 } else {
4069 // The removed bucket is an upcoming bucket. We need to make sure it does _not_
4070 // get yielded, and also that it's no longer included in the item count.
4071 //
4072 // NOTE: We can't just reload the group here, both since that might reflect
4073 // inserts we've already passed, and because that might inadvertently unset the
4074 // bits for _other_ removals. If we do that, we'd have to also decrement the
4075 // item count for those other bits that we unset. But the presumably subsequent
4076 // call to reflect for those buckets might _also_ decrement the item count.
4077 // Instead, we _just_ flip the bit for the particular bucket the caller asked
4078 // us to reflect.
4079 let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
4080 let was_full = self.iter.current_group.flip(our_bit);
4081 debug_assert_ne!(was_full, is_insert);
4082
4083 if is_insert {
4084 self.items += 1;
4085 } else {
4086 self.items -= 1;
4087 }
4088
4089 if cfg!(debug_assertions) {
4090 if b.as_ptr() == next_bucket.as_ptr() {
4091 // The removed bucket should no longer be next
4092 debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
4093 } else {
4094 // We should not have changed what bucket comes next.
4095 debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
4096 }
4097 }
4098 }
4099 } else {
4100 // We must have already iterated past the removed item.
4101 }
4102 }
4103
4104 unsafe fn drop_elements(&mut self) {
4105 if T::NEEDS_DROP && self.items != 0 {
4106 for item in self {
4107 item.drop();
4108 }
4109 }
4110 }
4111}
4112
4113impl<T> Clone for RawIter<T> {
4114 #[cfg_attr(feature = "inline-more", inline)]
4115 fn clone(&self) -> Self {
4116 Self {
4117 iter: self.iter.clone(),
4118 items: self.items,
4119 }
4120 }
4121}
4122
4123impl<T> Iterator for RawIter<T> {
4124 type Item = Bucket<T>;
4125
4126 #[cfg_attr(feature = "inline-more", inline)]
4127 fn next(&mut self) -> Option<Bucket<T>> {
4128 // Inner iterator iterates over buckets
4129 // so it can do unnecessary work if we already yielded all items.
4130 if self.items == 0 {
4131 return None;
4132 }
4133
4134 let nxt = unsafe {
4135 // SAFETY: We check number of items to yield using `items` field.
4136 self.iter.next_impl::<false>()
4137 };
4138
4139 debug_assert!(nxt.is_some());
4140 self.items -= 1;
4141
4142 nxt
4143 }
4144
4145 #[inline]
4146 fn size_hint(&self) -> (usize, Option<usize>) {
4147 (self.items, Some(self.items))
4148 }
4149
4150 #[inline]
4151 fn fold<B, F>(self, init: B, f: F) -> B
4152 where
4153 Self: Sized,
4154 F: FnMut(B, Self::Item) -> B,
4155 {
4156 unsafe { self.iter.fold_impl(self.items, init, f) }
4157 }
4158}
4159
4160impl<T> ExactSizeIterator for RawIter<T> {}
4161impl<T> FusedIterator for RawIter<T> {}
4162
4163/// Iterator which returns an index of every full bucket in the table.
4164///
4165/// For maximum flexibility this iterator is not bound by a lifetime, but you
4166/// must observe several rules when using it:
4167/// - You must not free the hash table while iterating (including via growing/shrinking).
4168/// - It is fine to erase a bucket that has been yielded by the iterator.
4169/// - Erasing a bucket that has not yet been yielded by the iterator may still
4170/// result in the iterator yielding index of that bucket.
4171/// - It is unspecified whether an element inserted after the iterator was
4172/// created will be yielded by that iterator.
4173/// - The order in which the iterator yields indices of the buckets is unspecified
4174/// and may change in the future.
4175pub(crate) struct FullBucketsIndices {
4176 // Mask of full buckets in the current group. Bits are cleared from this
4177 // mask as each element is processed.
4178 current_group: BitMaskIter,
4179
4180 // Initial value of the bytes' indices of the current group (relative
4181 // to the start of the control bytes).
4182 group_first_index: usize,
4183
4184 // Pointer to the current group of control bytes,
4185 // Must be aligned to the group size (Group::WIDTH).
4186 ctrl: NonNull<u8>,
4187
4188 // Number of elements in the table.
4189 items: usize,
4190}
4191
4192impl FullBucketsIndices {
4193 /// Advances the iterator and returns the next value.
4194 ///
4195 /// # Safety
4196 ///
4197 /// If any of the following conditions are violated, the result is
4198 /// [`Undefined Behavior`]:
4199 ///
4200 /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
4201 /// i.e. table outlives the `FullBucketsIndices`;
4202 ///
4203 /// * It never tries to iterate after getting all elements.
4204 ///
4205 /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
4206 #[inline(always)]
4207 unsafe fn next_impl(&mut self) -> Option<usize> {
4208 loop {
4209 if let Some(index) = self.current_group.next() {
4210 // The returned `self.group_first_index + index` will always
4211 // be in the range `0..self.buckets()`. See explanation below.
4212 return Some(self.group_first_index + index);
4213 }
4214
4215 // SAFETY: The caller of this function ensures that:
4216 //
4217 // 1. It never tries to iterate after getting all the elements;
4218 // 2. The table is alive and did not moved;
4219 // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
4220 //
4221 // Taking the above into account, we always stay within the bounds, because:
4222 //
4223 // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
4224 // we will never end up in the given branch, since we should have already
4225 // yielded all the elements of the table.
4226 //
4227 // 2. For tables larger than the group width. The the number of buckets is a
4228 // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse
4229 // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
4230 // the start of the array of control bytes, and never try to iterate after
4231 // getting all the elements, the last `self.ctrl` will be equal to
4232 // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
4233 // will always contains indices within the range `0..Group::WIDTH`,
4234 // and subsequent `self.group_first_index + index` will always return a
4235 // number less than `self.buckets()`.
4236 self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
4237
4238 // SAFETY: See explanation above.
4239 self.current_group = Group::load_aligned(self.ctrl.as_ptr())
4240 .match_full()
4241 .into_iter();
4242 self.group_first_index += Group::WIDTH;
4243 }
4244 }
4245}
4246
4247impl Iterator for FullBucketsIndices {
4248 type Item = usize;
4249
4250 /// Advances the iterator and returns the next value. It is up to
4251 /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
4252 /// because we cannot make the `next` method unsafe.
4253 #[inline(always)]
4254 fn next(&mut self) -> Option<usize> {
4255 // Return if we already yielded all items.
4256 if self.items == 0 {
4257 return None;
4258 }
4259
4260 let nxt = unsafe {
4261 // SAFETY:
4262 // 1. We check number of items to yield using `items` field.
4263 // 2. The caller ensures that the table is alive and has not moved.
4264 self.next_impl()
4265 };
4266
4267 debug_assert!(nxt.is_some());
4268 self.items -= 1;
4269
4270 nxt
4271 }
4272
4273 #[inline(always)]
4274 fn size_hint(&self) -> (usize, Option<usize>) {
4275 (self.items, Some(self.items))
4276 }
4277}
4278
4279impl ExactSizeIterator for FullBucketsIndices {}
4280impl FusedIterator for FullBucketsIndices {}
4281
4282/// Iterator which consumes a table and returns elements.
4283pub struct RawIntoIter<T, A: Allocator = Global> {
4284 iter: RawIter<T>,
4285 allocation: Option<(NonNull<u8>, Layout, A)>,
4286 marker: PhantomData<T>,
4287}
4288
4289impl<T, A: Allocator> RawIntoIter<T, A> {
4290 #[cfg_attr(feature = "inline-more", inline)]
4291 pub fn iter(&self) -> RawIter<T> {
4292 self.iter.clone()
4293 }
4294}
4295
4296unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
4297where
4298 T: Send,
4299 A: Send,
4300{
4301}
4302unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
4303where
4304 T: Sync,
4305 A: Sync,
4306{
4307}
4308
4309#[cfg(feature = "nightly")]
4310unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
4311 #[cfg_attr(feature = "inline-more", inline)]
4312 fn drop(&mut self) {
4313 unsafe {
4314 // Drop all remaining elements
4315 self.iter.drop_elements();
4316
4317 // Free the table
4318 if let Some((ptr, layout, ref alloc)) = self.allocation {
4319 alloc.deallocate(ptr, layout);
4320 }
4321 }
4322 }
4323}
4324#[cfg(not(feature = "nightly"))]
4325impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
4326 #[cfg_attr(feature = "inline-more", inline)]
4327 fn drop(&mut self) {
4328 unsafe {
4329 // Drop all remaining elements
4330 self.iter.drop_elements();
4331
4332 // Free the table
4333 if let Some((ptr: NonNull, layout: Layout, ref alloc: &A)) = self.allocation {
4334 alloc.deallocate(ptr, layout);
4335 }
4336 }
4337 }
4338}
4339
4340impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
4341 type Item = T;
4342
4343 #[cfg_attr(feature = "inline-more", inline)]
4344 fn next(&mut self) -> Option<T> {
4345 unsafe { Some(self.iter.next()?.read()) }
4346 }
4347
4348 #[inline]
4349 fn size_hint(&self) -> (usize, Option<usize>) {
4350 self.iter.size_hint()
4351 }
4352}
4353
4354impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
4355impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
4356
4357/// Iterator which consumes elements without freeing the table storage.
4358pub struct RawDrain<'a, T, A: Allocator = Global> {
4359 iter: RawIter<T>,
4360
4361 // The table is moved into the iterator for the duration of the drain. This
4362 // ensures that an empty table is left if the drain iterator is leaked
4363 // without dropping.
4364 table: RawTableInner,
4365 orig_table: NonNull<RawTableInner>,
4366
4367 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
4368 // covariant over T.
4369 marker: PhantomData<&'a RawTable<T, A>>,
4370}
4371
4372impl<T, A: Allocator> RawDrain<'_, T, A> {
4373 #[cfg_attr(feature = "inline-more", inline)]
4374 pub fn iter(&self) -> RawIter<T> {
4375 self.iter.clone()
4376 }
4377}
4378
4379unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
4380where
4381 T: Send,
4382 A: Send,
4383{
4384}
4385unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
4386where
4387 T: Sync,
4388 A: Sync,
4389{
4390}
4391
4392impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
4393 #[cfg_attr(feature = "inline-more", inline)]
4394 fn drop(&mut self) {
4395 unsafe {
4396 // Drop all remaining elements. Note that this may panic.
4397 self.iter.drop_elements();
4398
4399 // Reset the contents of the table now that all elements have been
4400 // dropped.
4401 self.table.clear_no_drop();
4402
4403 // Move the now empty table back to its original location.
4404 self.orig_table
4405 .as_ptr()
4406 .copy_from_nonoverlapping(&self.table, count:1);
4407 }
4408 }
4409}
4410
4411impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
4412 type Item = T;
4413
4414 #[cfg_attr(feature = "inline-more", inline)]
4415 fn next(&mut self) -> Option<T> {
4416 unsafe {
4417 let item: Bucket = self.iter.next()?;
4418 Some(item.read())
4419 }
4420 }
4421
4422 #[inline]
4423 fn size_hint(&self) -> (usize, Option<usize>) {
4424 self.iter.size_hint()
4425 }
4426}
4427
4428impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
4429impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
4430
4431/// Iterator over occupied buckets that could match a given hash.
4432///
4433/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4434/// items that have a hash value different than the one provided. You should
4435/// always validate the returned values before using them.
4436///
4437/// For maximum flexibility this iterator is not bound by a lifetime, but you
4438/// must observe several rules when using it:
4439/// - You must not free the hash table while iterating (including via growing/shrinking).
4440/// - It is fine to erase a bucket that has been yielded by the iterator.
4441/// - Erasing a bucket that has not yet been yielded by the iterator may still
4442/// result in the iterator yielding that bucket.
4443/// - It is unspecified whether an element inserted after the iterator was
4444/// created will be yielded by that iterator.
4445/// - The order in which the iterator yields buckets is unspecified and may
4446/// change in the future.
4447pub struct RawIterHash<T> {
4448 inner: RawIterHashInner,
4449 _marker: PhantomData<T>,
4450}
4451
4452struct RawIterHashInner {
4453 // See `RawTableInner`'s corresponding fields for details.
4454 // We can't store a `*const RawTableInner` as it would get
4455 // invalidated by the user calling `&mut` methods on `RawTable`.
4456 bucket_mask: usize,
4457 ctrl: NonNull<u8>,
4458
4459 // The top 7 bits of the hash.
4460 h2_hash: u8,
4461
4462 // The sequence of groups to probe in the search.
4463 probe_seq: ProbeSeq,
4464
4465 group: Group,
4466
4467 // The elements within the group with a matching h2-hash.
4468 bitmask: BitMaskIter,
4469}
4470
4471impl<T> RawIterHash<T> {
4472 #[cfg_attr(feature = "inline-more", inline)]
4473 #[cfg(feature = "raw")]
4474 unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
4475 RawIterHash {
4476 inner: RawIterHashInner::new(&table.table, hash),
4477 _marker: PhantomData,
4478 }
4479 }
4480}
4481impl RawIterHashInner {
4482 #[cfg_attr(feature = "inline-more", inline)]
4483 #[cfg(feature = "raw")]
4484 unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
4485 let h2_hash: u8 = h2(hash);
4486 let probe_seq: ProbeSeq = table.probe_seq(hash);
4487 let group: Group = Group::load(ptr:table.ctrl(index:probe_seq.pos));
4488 let bitmask: BitMaskIter = group.match_byte(h2_hash).into_iter();
4489
4490 RawIterHashInner {
4491 bucket_mask: table.bucket_mask,
4492 ctrl: table.ctrl,
4493 h2_hash,
4494 probe_seq,
4495 group,
4496 bitmask,
4497 }
4498 }
4499}
4500
4501impl<T> Iterator for RawIterHash<T> {
4502 type Item = Bucket<T>;
4503
4504 fn next(&mut self) -> Option<Bucket<T>> {
4505 unsafe {
4506 match self.inner.next() {
4507 Some(index: usize) => {
4508 // Can't use `RawTable::bucket` here as we don't have
4509 // an actual `RawTable` reference to use.
4510 debug_assert!(index <= self.inner.bucket_mask);
4511 let bucket: Bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
4512 Some(bucket)
4513 }
4514 None => None,
4515 }
4516 }
4517 }
4518}
4519
4520impl Iterator for RawIterHashInner {
4521 type Item = usize;
4522
4523 fn next(&mut self) -> Option<Self::Item> {
4524 unsafe {
4525 loop {
4526 if let Some(bit) = self.bitmask.next() {
4527 let index = (self.probe_seq.pos + bit) & self.bucket_mask;
4528 return Some(index);
4529 }
4530 if likely(self.group.match_empty().any_bit_set()) {
4531 return None;
4532 }
4533 self.probe_seq.move_next(self.bucket_mask);
4534
4535 // Can't use `RawTableInner::ctrl` here as we don't have
4536 // an actual `RawTableInner` reference to use.
4537 let index = self.probe_seq.pos;
4538 debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
4539 let group_ctrl = self.ctrl.as_ptr().add(index);
4540
4541 self.group = Group::load(group_ctrl);
4542 self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
4543 }
4544 }
4545 }
4546}
4547
4548pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
4549 pub iter: RawIter<T>,
4550 pub table: &'a mut RawTable<T, A>,
4551}
4552
4553impl<T, A: Allocator> RawExtractIf<'_, T, A> {
4554 #[cfg_attr(feature = "inline-more", inline)]
4555 pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
4556 where
4557 F: FnMut(&mut T) -> bool,
4558 {
4559 unsafe {
4560 for item: Bucket in &mut self.iter {
4561 if f(item.as_mut()) {
4562 return Some(self.table.remove(item).0);
4563 }
4564 }
4565 }
4566 None
4567 }
4568}
4569
4570#[cfg(test)]
4571mod test_map {
4572 use super::*;
4573
4574 fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
4575 unsafe {
4576 table.table.rehash_in_place(
4577 &|table, index| hasher(table.bucket::<T>(index).as_ref()),
4578 mem::size_of::<T>(),
4579 if mem::needs_drop::<T>() {
4580 Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
4581 } else {
4582 None
4583 },
4584 );
4585 }
4586 }
4587
4588 #[test]
4589 fn rehash() {
4590 let mut table = RawTable::new();
4591 let hasher = |i: &u64| *i;
4592 for i in 0..100 {
4593 table.insert(i, i, hasher);
4594 }
4595
4596 for i in 0..100 {
4597 unsafe {
4598 assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4599 }
4600 assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4601 }
4602
4603 rehash_in_place(&mut table, hasher);
4604
4605 for i in 0..100 {
4606 unsafe {
4607 assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i));
4608 }
4609 assert!(table.find(i + 100, |x| *x == i + 100).is_none());
4610 }
4611 }
4612
4613 /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
4614 /// AN UNINITIALIZED TABLE DURING THE DROP
4615 #[test]
4616 fn test_drop_uninitialized() {
4617 use ::alloc::vec::Vec;
4618
4619 let table = unsafe {
4620 // SAFETY: The `buckets` is power of two and we're not
4621 // trying to actually use the returned RawTable.
4622 RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4623 .unwrap()
4624 };
4625 drop(table);
4626 }
4627
4628 /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4629 /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4630 #[test]
4631 fn test_drop_zero_items() {
4632 use ::alloc::vec::Vec;
4633 unsafe {
4634 // SAFETY: The `buckets` is power of two and we're not
4635 // trying to actually use the returned RawTable.
4636 let table =
4637 RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
4638 .unwrap();
4639
4640 // WE SIMULATE, AS IT WERE, A FULL TABLE.
4641
4642 // SAFETY: We checked that the table is allocated and therefore the table already has
4643 // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
4644 // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
4645 table
4646 .table
4647 .ctrl(0)
4648 .write_bytes(EMPTY, table.table.num_ctrl_bytes());
4649
4650 // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
4651 table.table.ctrl(0).write_bytes(0, table.capacity());
4652
4653 // Fix up the trailing control bytes. See the comments in set_ctrl
4654 // for the handling of tables smaller than the group width.
4655 if table.buckets() < Group::WIDTH {
4656 // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
4657 // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
4658 // `Group::WIDTH` is safe
4659 table
4660 .table
4661 .ctrl(0)
4662 .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
4663 } else {
4664 // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
4665 // control bytes,so copying `Group::WIDTH` bytes with offset equal
4666 // to `self.buckets() == self.bucket_mask + 1` is safe
4667 table
4668 .table
4669 .ctrl(0)
4670 .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
4671 }
4672 drop(table);
4673 }
4674 }
4675
4676 /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
4677 /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
4678 #[test]
4679 fn test_catch_panic_clone_from() {
4680 use ::alloc::sync::Arc;
4681 use ::alloc::vec::Vec;
4682 use allocator_api2::alloc::{AllocError, Allocator, Global};
4683 use core::sync::atomic::{AtomicI8, Ordering};
4684 use std::thread;
4685
4686 struct MyAllocInner {
4687 drop_count: Arc<AtomicI8>,
4688 }
4689
4690 #[derive(Clone)]
4691 struct MyAlloc {
4692 _inner: Arc<MyAllocInner>,
4693 }
4694
4695 impl Drop for MyAllocInner {
4696 fn drop(&mut self) {
4697 println!("MyAlloc freed.");
4698 self.drop_count.fetch_sub(1, Ordering::SeqCst);
4699 }
4700 }
4701
4702 unsafe impl Allocator for MyAlloc {
4703 fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
4704 let g = Global;
4705 g.allocate(layout)
4706 }
4707
4708 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
4709 let g = Global;
4710 g.deallocate(ptr, layout)
4711 }
4712 }
4713
4714 const DISARMED: bool = false;
4715 const ARMED: bool = true;
4716
4717 struct CheckedCloneDrop {
4718 panic_in_clone: bool,
4719 dropped: bool,
4720 need_drop: Vec<u64>,
4721 }
4722
4723 impl Clone for CheckedCloneDrop {
4724 fn clone(&self) -> Self {
4725 if self.panic_in_clone {
4726 panic!("panic in clone")
4727 }
4728 Self {
4729 panic_in_clone: self.panic_in_clone,
4730 dropped: self.dropped,
4731 need_drop: self.need_drop.clone(),
4732 }
4733 }
4734 }
4735
4736 impl Drop for CheckedCloneDrop {
4737 fn drop(&mut self) {
4738 if self.dropped {
4739 panic!("double drop");
4740 }
4741 self.dropped = true;
4742 }
4743 }
4744
4745 let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
4746
4747 let mut table = RawTable::new_in(MyAlloc {
4748 _inner: Arc::new(MyAllocInner {
4749 drop_count: dropped.clone(),
4750 }),
4751 });
4752
4753 for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
4754 let idx = idx as u64;
4755 table.insert(
4756 idx,
4757 (
4758 idx,
4759 CheckedCloneDrop {
4760 panic_in_clone,
4761 dropped: false,
4762 need_drop: vec![idx],
4763 },
4764 ),
4765 |(k, _)| *k,
4766 );
4767 }
4768
4769 assert_eq!(table.len(), 7);
4770
4771 thread::scope(|s| {
4772 let result = s.spawn(|| {
4773 let armed_flags = [
4774 DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
4775 ];
4776 let mut scope_table = RawTable::new_in(MyAlloc {
4777 _inner: Arc::new(MyAllocInner {
4778 drop_count: dropped.clone(),
4779 }),
4780 });
4781 for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
4782 let idx = idx as u64;
4783 scope_table.insert(
4784 idx,
4785 (
4786 idx,
4787 CheckedCloneDrop {
4788 panic_in_clone,
4789 dropped: false,
4790 need_drop: vec![idx + 100],
4791 },
4792 ),
4793 |(k, _)| *k,
4794 );
4795 }
4796 table.clone_from(&scope_table);
4797 });
4798 assert!(result.join().is_err());
4799 });
4800
4801 // Let's check that all iterators work fine and do not return elements
4802 // (especially `RawIterRange`, which does not depend on the number of
4803 // elements in the table, but looks directly at the control bytes)
4804 //
4805 // SAFETY: We know for sure that `RawTable` will outlive
4806 // the returned `RawIter / RawIterRange` iterator.
4807 assert_eq!(table.len(), 0);
4808 assert_eq!(unsafe { table.iter().count() }, 0);
4809 assert_eq!(unsafe { table.iter().iter.count() }, 0);
4810
4811 for idx in 0..table.buckets() {
4812 let idx = idx as u64;
4813 assert!(
4814 table.find(idx, |(k, _)| *k == idx).is_none(),
4815 "Index: {idx}"
4816 );
4817 }
4818
4819 // All allocator clones should already be dropped.
4820 assert_eq!(dropped.load(Ordering::SeqCst), 1);
4821 }
4822}
4823