1 | //! This is a copy of the `rustc_hash` crate, adapted to work as a module. |
2 | //! |
3 | //! If in the future it becomes more reasonable to add dependencies to |
4 | //! `proc_macro`, this module should be removed and replaced with a dependency |
5 | //! on the `rustc_hash` crate. |
6 | |
7 | use std::collections::HashMap; |
8 | use std::hash::{BuildHasherDefault, Hasher}; |
9 | use std::ops::BitXor; |
10 | |
11 | /// Type alias for a hashmap using the `fx` hash algorithm. |
12 | pub(super) type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>; |
13 | |
14 | /// A speedy hash algorithm for use within rustc. The hashmap in alloc by |
15 | /// default uses SipHash which isn't quite as speedy as we want. In the compiler |
16 | /// we're not really worried about DOS attempts, so we use a fast |
17 | /// non-cryptographic hash. |
18 | /// |
19 | /// This is the same as the algorithm used by Firefox -- which is a homespun |
20 | /// one not based on any widely-known algorithm -- though modified to produce |
21 | /// 64-bit hash values instead of 32-bit hash values. It consistently |
22 | /// out-performs an FNV-based hash within rustc itself -- the collision rate is |
23 | /// similar or slightly worse than FNV, but the speed of the hash function |
24 | /// itself is much higher because it works on up to 8 bytes at a time. |
25 | #[derive (Default)] |
26 | pub(super) struct FxHasher { |
27 | hash: usize, |
28 | } |
29 | |
30 | #[cfg (target_pointer_width = "32" )] |
31 | const K: usize = 0x9e3779b9; |
32 | #[cfg (target_pointer_width = "64" )] |
33 | const K: usize = 0x517cc1b727220a95; |
34 | |
35 | impl FxHasher { |
36 | #[inline ] |
37 | fn add_to_hash(&mut self, i: usize) { |
38 | self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); |
39 | } |
40 | } |
41 | |
42 | impl Hasher for FxHasher { |
43 | #[inline ] |
44 | fn write(&mut self, mut bytes: &[u8]) { |
45 | #[cfg (target_pointer_width = "32" )] |
46 | let read_usize = |bytes: &[u8]| u32::from_ne_bytes(bytes[..4].try_into().unwrap()); |
47 | #[cfg (target_pointer_width = "64" )] |
48 | let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap()); |
49 | |
50 | let mut hash = FxHasher { hash: self.hash }; |
51 | assert!(size_of::<usize>() <= 8); |
52 | while bytes.len() >= size_of::<usize>() { |
53 | hash.add_to_hash(read_usize(bytes) as usize); |
54 | bytes = &bytes[size_of::<usize>()..]; |
55 | } |
56 | if (size_of::<usize>() > 4) && (bytes.len() >= 4) { |
57 | hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as usize); |
58 | bytes = &bytes[4..]; |
59 | } |
60 | if (size_of::<usize>() > 2) && bytes.len() >= 2 { |
61 | hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as usize); |
62 | bytes = &bytes[2..]; |
63 | } |
64 | if (size_of::<usize>() > 1) && !bytes.is_empty() { |
65 | hash.add_to_hash(bytes[0] as usize); |
66 | } |
67 | self.hash = hash.hash; |
68 | } |
69 | |
70 | #[inline ] |
71 | fn write_u8(&mut self, i: u8) { |
72 | self.add_to_hash(i as usize); |
73 | } |
74 | |
75 | #[inline ] |
76 | fn write_u16(&mut self, i: u16) { |
77 | self.add_to_hash(i as usize); |
78 | } |
79 | |
80 | #[inline ] |
81 | fn write_u32(&mut self, i: u32) { |
82 | self.add_to_hash(i as usize); |
83 | } |
84 | |
85 | #[cfg (target_pointer_width = "32" )] |
86 | #[inline ] |
87 | fn write_u64(&mut self, i: u64) { |
88 | self.add_to_hash(i as usize); |
89 | self.add_to_hash((i >> 32) as usize); |
90 | } |
91 | |
92 | #[cfg (target_pointer_width = "64" )] |
93 | #[inline ] |
94 | fn write_u64(&mut self, i: u64) { |
95 | self.add_to_hash(i as usize); |
96 | } |
97 | |
98 | #[inline ] |
99 | fn write_usize(&mut self, i: usize) { |
100 | self.add_to_hash(i); |
101 | } |
102 | |
103 | #[inline ] |
104 | fn finish(&self) -> u64 { |
105 | self.hash as u64 |
106 | } |
107 | } |
108 | |