1use super::bitmask::BitMask;
2use super::EMPTY;
3use core::{mem, ptr};
4
5// Use the native word size as the group size. Using a 64-bit group size on
6// a 32-bit architecture will just end up being more expensive because
7// shifts and multiplies will need to be emulated.
8
9cfg_if! {
10 if #[cfg(any(
11 target_pointer_width = "64",
12 target_arch = "aarch64",
13 target_arch = "x86_64",
14 target_arch = "wasm32",
15 ))] {
16 type GroupWord = u64;
17 type NonZeroGroupWord = core::num::NonZeroU64;
18 } else {
19 type GroupWord = u32;
20 type NonZeroGroupWord = core::num::NonZeroU32;
21 }
22}
23
24pub(crate) type BitMaskWord = GroupWord;
25pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord;
26pub(crate) const BITMASK_STRIDE: usize = 8;
27// We only care about the highest bit of each byte for the mask.
28#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
29pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
30pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
31
32/// Helper function to replicate a byte across a `GroupWord`.
33#[inline]
34fn repeat(byte: u8) -> GroupWord {
35 GroupWord::from_ne_bytes([byte; Group::WIDTH])
36}
37
38/// Abstraction over a group of control bytes which can be scanned in
39/// parallel.
40///
41/// This implementation uses a word-sized integer.
42#[derive(Copy, Clone)]
43pub(crate) struct Group(GroupWord);
44
45// We perform all operations in the native endianness, and convert to
46// little-endian just before creating a BitMask. The can potentially
47// enable the compiler to eliminate unnecessary byte swaps if we are
48// only checking whether a BitMask is empty.
49#[allow(clippy::use_self)]
50impl Group {
51 /// Number of bytes in the group.
52 pub(crate) const WIDTH: usize = mem::size_of::<Self>();
53
54 /// Returns a full group of empty bytes, suitable for use as the initial
55 /// value for an empty hash table.
56 ///
57 /// This is guaranteed to be aligned to the group size.
58 #[inline]
59 pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
60 #[repr(C)]
61 struct AlignedBytes {
62 _align: [Group; 0],
63 bytes: [u8; Group::WIDTH],
64 }
65 const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
66 _align: [],
67 bytes: [EMPTY; Group::WIDTH],
68 };
69 &ALIGNED_BYTES.bytes
70 }
71
72 /// Loads a group of bytes starting at the given address.
73 #[inline]
74 #[allow(clippy::cast_ptr_alignment)] // unaligned load
75 pub(crate) unsafe fn load(ptr: *const u8) -> Self {
76 Group(ptr::read_unaligned(ptr.cast()))
77 }
78
79 /// Loads a group of bytes starting at the given address, which must be
80 /// aligned to `mem::align_of::<Group>()`.
81 #[inline]
82 #[allow(clippy::cast_ptr_alignment)]
83 pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
84 // FIXME: use align_offset once it stabilizes
85 debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
86 Group(ptr::read(ptr.cast()))
87 }
88
89 /// Stores the group of bytes to the given address, which must be
90 /// aligned to `mem::align_of::<Group>()`.
91 #[inline]
92 #[allow(clippy::cast_ptr_alignment)]
93 pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
94 // FIXME: use align_offset once it stabilizes
95 debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
96 ptr::write(ptr.cast(), self.0);
97 }
98
99 /// Returns a `BitMask` indicating all bytes in the group which *may*
100 /// have the given value.
101 ///
102 /// This function may return a false positive in certain cases where
103 /// the byte in the group differs from the searched value only in its
104 /// lowest bit. This is fine because:
105 /// - This never happens for `EMPTY` and `DELETED`, only full entries.
106 /// - The check for key equality will catch these.
107 /// - This only happens if there is at least 1 true match.
108 /// - The chance of this happening is very low (< 1% chance per byte).
109 #[inline]
110 pub(crate) fn match_byte(self, byte: u8) -> BitMask {
111 // This algorithm is derived from
112 // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
113 let cmp = self.0 ^ repeat(byte);
114 BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
115 }
116
117 /// Returns a `BitMask` indicating all bytes in the group which are
118 /// `EMPTY`.
119 #[inline]
120 pub(crate) fn match_empty(self) -> BitMask {
121 // If the high bit is set, then the byte must be either:
122 // 1111_1111 (EMPTY) or 1000_0000 (DELETED).
123 // So we can just check if the top two bits are 1 by ANDing them.
124 BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le())
125 }
126
127 /// Returns a `BitMask` indicating all bytes in the group which are
128 /// `EMPTY` or `DELETED`.
129 #[inline]
130 pub(crate) fn match_empty_or_deleted(self) -> BitMask {
131 // A byte is EMPTY or DELETED iff the high bit is set
132 BitMask((self.0 & repeat(0x80)).to_le())
133 }
134
135 /// Returns a `BitMask` indicating all bytes in the group which are full.
136 #[inline]
137 pub(crate) fn match_full(self) -> BitMask {
138 self.match_empty_or_deleted().invert()
139 }
140
141 /// Performs the following transformation on all bytes in the group:
142 /// - `EMPTY => EMPTY`
143 /// - `DELETED => EMPTY`
144 /// - `FULL => DELETED`
145 #[inline]
146 pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
147 // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
148 // and high_bit = 0 (FULL) to 1000_0000
149 //
150 // Here's this logic expanded to concrete values:
151 // let full = 1000_0000 (true) or 0000_0000 (false)
152 // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
153 // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
154 let full = !self.0 & repeat(0x80);
155 Group(!full + (full >> 7))
156 }
157}
158