1 | //! This is a copy of the `rustc_hash` crate, adapted to work as a module. |
2 | //! |
3 | //! If in the future it becomes more reasonable to add dependencies to |
4 | //! `proc_macro`, this module should be removed and replaced with a dependency |
5 | //! on the `rustc_hash` crate. |
6 | |
7 | use std::collections::HashMap; |
8 | use std::hash::BuildHasherDefault; |
9 | use std::hash::Hasher; |
10 | use std::mem::size_of; |
11 | use std::ops::BitXor; |
12 | |
13 | /// Type alias for a hashmap using the `fx` hash algorithm. |
14 | pub type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>; |
15 | |
16 | /// A speedy hash algorithm for use within rustc. The hashmap in alloc by |
17 | /// default uses SipHash which isn't quite as speedy as we want. In the compiler |
18 | /// we're not really worried about DOS attempts, so we use a fast |
19 | /// non-cryptographic hash. |
20 | /// |
21 | /// This is the same as the algorithm used by Firefox -- which is a homespun |
22 | /// one not based on any widely-known algorithm -- though modified to produce |
23 | /// 64-bit hash values instead of 32-bit hash values. It consistently |
24 | /// out-performs an FNV-based hash within rustc itself -- the collision rate is |
25 | /// similar or slightly worse than FNV, but the speed of the hash function |
26 | /// itself is much higher because it works on up to 8 bytes at a time. |
27 | pub struct FxHasher { |
28 | hash: usize, |
29 | } |
30 | |
31 | #[cfg (target_pointer_width = "32" )] |
32 | const K: usize = 0x9e3779b9; |
33 | #[cfg (target_pointer_width = "64" )] |
34 | const K: usize = 0x517cc1b727220a95; |
35 | |
36 | impl Default for FxHasher { |
37 | #[inline ] |
38 | fn default() -> FxHasher { |
39 | FxHasher { hash: 0 } |
40 | } |
41 | } |
42 | |
43 | impl FxHasher { |
44 | #[inline ] |
45 | fn add_to_hash(&mut self, i: usize) { |
46 | self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); |
47 | } |
48 | } |
49 | |
50 | impl Hasher for FxHasher { |
51 | #[inline ] |
52 | fn write(&mut self, mut bytes: &[u8]) { |
53 | #[cfg (target_pointer_width = "32" )] |
54 | let read_usize = |bytes: &[u8]| u32::from_ne_bytes(bytes[..4].try_into().unwrap()); |
55 | #[cfg (target_pointer_width = "64" )] |
56 | let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap()); |
57 | |
58 | let mut hash = FxHasher { hash: self.hash }; |
59 | assert!(size_of::<usize>() <= 8); |
60 | while bytes.len() >= size_of::<usize>() { |
61 | hash.add_to_hash(read_usize(bytes) as usize); |
62 | bytes = &bytes[size_of::<usize>()..]; |
63 | } |
64 | if (size_of::<usize>() > 4) && (bytes.len() >= 4) { |
65 | hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as usize); |
66 | bytes = &bytes[4..]; |
67 | } |
68 | if (size_of::<usize>() > 2) && bytes.len() >= 2 { |
69 | hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as usize); |
70 | bytes = &bytes[2..]; |
71 | } |
72 | if (size_of::<usize>() > 1) && bytes.len() >= 1 { |
73 | hash.add_to_hash(bytes[0] as usize); |
74 | } |
75 | self.hash = hash.hash; |
76 | } |
77 | |
78 | #[inline ] |
79 | fn write_u8(&mut self, i: u8) { |
80 | self.add_to_hash(i as usize); |
81 | } |
82 | |
83 | #[inline ] |
84 | fn write_u16(&mut self, i: u16) { |
85 | self.add_to_hash(i as usize); |
86 | } |
87 | |
88 | #[inline ] |
89 | fn write_u32(&mut self, i: u32) { |
90 | self.add_to_hash(i as usize); |
91 | } |
92 | |
93 | #[cfg (target_pointer_width = "32" )] |
94 | #[inline ] |
95 | fn write_u64(&mut self, i: u64) { |
96 | self.add_to_hash(i as usize); |
97 | self.add_to_hash((i >> 32) as usize); |
98 | } |
99 | |
100 | #[cfg (target_pointer_width = "64" )] |
101 | #[inline ] |
102 | fn write_u64(&mut self, i: u64) { |
103 | self.add_to_hash(i as usize); |
104 | } |
105 | |
106 | #[inline ] |
107 | fn write_usize(&mut self, i: usize) { |
108 | self.add_to_hash(i); |
109 | } |
110 | |
111 | #[inline ] |
112 | fn finish(&self) -> u64 { |
113 | self.hash as u64 |
114 | } |
115 | } |
116 | |