1 | //! Caches run-time feature detection so that it only needs to be computed |
2 | //! once. |
3 | |
4 | #![allow (dead_code)] // not used on all platforms |
5 | |
6 | use core::sync::atomic::Ordering; |
7 | |
8 | use core::sync::atomic::AtomicUsize; |
9 | |
10 | /// Sets the `bit` of `x`. |
11 | #[inline ] |
12 | const fn set_bit(x: u128, bit: u32) -> u128 { |
13 | x | 1 << bit |
14 | } |
15 | |
16 | /// Tests the `bit` of `x`. |
17 | #[inline ] |
18 | const fn test_bit(x: u128, bit: u32) -> bool { |
19 | x & (1 << bit) != 0 |
20 | } |
21 | |
22 | /// Unset the `bit of `x`. |
23 | #[inline ] |
24 | const fn unset_bit(x: u128, bit: u32) -> u128 { |
25 | x & !(1 << bit) |
26 | } |
27 | |
28 | /// Maximum number of features that can be cached. |
29 | const CACHE_CAPACITY: u32 = 93; |
30 | |
31 | /// This type is used to initialize the cache |
32 | // The derived `Default` implementation will initialize the field to zero, |
33 | // which is what we want. |
34 | #[derive (Copy, Clone, Default)] |
35 | pub(crate) struct Initializer(u128); |
36 | |
37 | // NOTE: the `debug_assert!` would catch that we do not add more Features than |
38 | // the one fitting our cache. |
39 | impl Initializer { |
40 | /// Tests the `bit` of the cache. |
41 | #[inline ] |
42 | pub(crate) fn test(self, bit: u32) -> bool { |
43 | debug_assert!( |
44 | bit < CACHE_CAPACITY, |
45 | "too many features, time to increase the cache size!" |
46 | ); |
47 | test_bit(self.0, bit) |
48 | } |
49 | |
50 | /// Sets the `bit` of the cache. |
51 | #[inline ] |
52 | pub(crate) fn set(&mut self, bit: u32) { |
53 | debug_assert!( |
54 | bit < CACHE_CAPACITY, |
55 | "too many features, time to increase the cache size!" |
56 | ); |
57 | let v = self.0; |
58 | self.0 = set_bit(v, bit); |
59 | } |
60 | |
61 | /// Unsets the `bit` of the cache. |
62 | #[inline ] |
63 | pub(crate) fn unset(&mut self, bit: u32) { |
64 | debug_assert!( |
65 | bit < CACHE_CAPACITY, |
66 | "too many features, time to increase the cache size!" |
67 | ); |
68 | let v = self.0; |
69 | self.0 = unset_bit(v, bit); |
70 | } |
71 | } |
72 | |
73 | /// This global variable is a cache of the features supported by the CPU. |
74 | // Note: the third slot is only used in x86 |
75 | // Another Slot can be added if needed without any change to `Initializer` |
76 | static CACHE: [Cache; 3] = [ |
77 | Cache::uninitialized(), |
78 | Cache::uninitialized(), |
79 | Cache::uninitialized(), |
80 | ]; |
81 | |
82 | /// Feature cache with capacity for `size_of::<usize>() * 8 - 1` features. |
83 | /// |
84 | /// Note: 0 is used to represent an uninitialized cache, and (at least) the most |
85 | /// significant bit is set on any cache which has been initialized. |
86 | /// |
87 | /// Note: we use `Relaxed` atomic operations, because we are only interested in |
88 | /// the effects of operations on a single memory location. That is, we only need |
89 | /// "modification order", and not the full-blown "happens before". |
90 | struct Cache(AtomicUsize); |
91 | |
92 | impl Cache { |
93 | const CAPACITY: u32 = (core::mem::size_of::<usize>() * 8 - 1) as u32; |
94 | const MASK: usize = (1 << Cache::CAPACITY) - 1; |
95 | const INITIALIZED_BIT: usize = 1usize << Cache::CAPACITY; |
96 | |
97 | /// Creates an uninitialized cache. |
98 | #[allow (clippy::declare_interior_mutable_const)] |
99 | const fn uninitialized() -> Self { |
100 | Cache(AtomicUsize::new(0)) |
101 | } |
102 | |
103 | /// Is the `bit` in the cache set? Returns `None` if the cache has not been initialized. |
104 | #[inline ] |
105 | pub(crate) fn test(&self, bit: u32) -> Option<bool> { |
106 | let cached = self.0.load(Ordering::Relaxed); |
107 | if cached == 0 { |
108 | None |
109 | } else { |
110 | Some(test_bit(cached as u128, bit)) |
111 | } |
112 | } |
113 | |
114 | /// Initializes the cache. |
115 | #[inline ] |
116 | fn initialize(&self, value: usize) -> usize { |
117 | debug_assert_eq!((value & !Cache::MASK), 0); |
118 | self.0 |
119 | .store(value | Cache::INITIALIZED_BIT, Ordering::Relaxed); |
120 | value |
121 | } |
122 | } |
123 | |
124 | cfg_if::cfg_if! { |
125 | if #[cfg(feature = "std_detect_env_override" )] { |
126 | #[inline] |
127 | fn disable_features(disable: &[u8], value: &mut Initializer) { |
128 | if let Ok(disable) = core::str::from_utf8(disable) { |
129 | for v in disable.split(" " ) { |
130 | let _ = super::Feature::from_str(v).map(|v| value.unset(v as u32)); |
131 | } |
132 | } |
133 | } |
134 | |
135 | #[inline] |
136 | fn initialize(mut value: Initializer) -> Initializer { |
137 | use core::ffi::CStr; |
138 | const RUST_STD_DETECT_UNSTABLE: &CStr = c"RUST_STD_DETECT_UNSTABLE" ; |
139 | cfg_if::cfg_if! { |
140 | if #[cfg(windows)] { |
141 | use alloc::vec; |
142 | #[link(name = "kernel32" )] |
143 | unsafe extern "system" { |
144 | fn GetEnvironmentVariableA(name: *const u8, buffer: *mut u8, size: u32) -> u32; |
145 | } |
146 | let len = unsafe { GetEnvironmentVariableA(RUST_STD_DETECT_UNSTABLE.as_ptr().cast::<u8>(), core::ptr::null_mut(), 0) }; |
147 | if len > 0 { |
148 | // +1 to include the null terminator. |
149 | let mut env = vec![0; len as usize + 1]; |
150 | let len = unsafe { GetEnvironmentVariableA(RUST_STD_DETECT_UNSTABLE.as_ptr().cast::<u8>(), env.as_mut_ptr(), len + 1) }; |
151 | if len > 0 { |
152 | disable_features(&env[..len as usize], &mut value); |
153 | } |
154 | } |
155 | } else { |
156 | let env = unsafe { |
157 | libc::getenv(RUST_STD_DETECT_UNSTABLE.as_ptr()) |
158 | }; |
159 | if !env.is_null() { |
160 | let len = unsafe { libc::strlen(env) }; |
161 | let env = unsafe { core::slice::from_raw_parts(env as *const u8, len) }; |
162 | disable_features(env, &mut value); |
163 | } |
164 | } |
165 | } |
166 | do_initialize(value); |
167 | value |
168 | } |
169 | } else { |
170 | #[inline ] |
171 | fn initialize(value: Initializer) -> Initializer { |
172 | do_initialize(value); |
173 | value |
174 | } |
175 | } |
176 | } |
177 | |
178 | #[inline ] |
179 | fn do_initialize(value: Initializer) { |
180 | CACHE[0].initialize((value.0) as usize & Cache::MASK); |
181 | CACHE[1].initialize((value.0 >> Cache::CAPACITY) as usize & Cache::MASK); |
182 | CACHE[2].initialize((value.0 >> (2 * Cache::CAPACITY)) as usize & Cache::MASK); |
183 | } |
184 | |
185 | // We only have to detect features once, and it's fairly costly, so hint to LLVM |
186 | // that it should assume that cache hits are more common than misses (which is |
187 | // the point of caching). It's possibly unfortunate that this function needs to |
188 | // reach across modules like this to call `os::detect_features`, but it produces |
189 | // the best code out of several attempted variants. |
190 | // |
191 | // The `Initializer` that the cache was initialized with is returned, so that |
192 | // the caller can call `test()` on it without having to load the value from the |
193 | // cache again. |
194 | #[cold ] |
195 | fn detect_and_initialize() -> Initializer { |
196 | initialize(super::os::detect_features()) |
197 | } |
198 | |
199 | /// Tests the `bit` of the storage. If the storage has not been initialized, |
200 | /// initializes it with the result of `os::detect_features()`. |
201 | /// |
202 | /// On its first invocation, it detects the CPU features and caches them in the |
203 | /// `CACHE` global variable as an `AtomicU64`. |
204 | /// |
205 | /// It uses the `Feature` variant to index into this variable as a bitset. If |
206 | /// the bit is set, the feature is enabled, and otherwise it is disabled. |
207 | /// |
208 | /// If the feature `std_detect_env_override` is enabled looks for the env |
209 | /// variable `RUST_STD_DETECT_UNSTABLE` and uses its content to disable |
210 | /// Features that would had been otherwise detected. |
211 | #[inline ] |
212 | pub(crate) fn test(bit: u32) -> bool { |
213 | let (relative_bit: u32, idx: usize) = if bit < Cache::CAPACITY { |
214 | (bit, 0) |
215 | } else if bit < 2 * Cache::CAPACITY { |
216 | (bit - Cache::CAPACITY, 1) |
217 | } else { |
218 | (bit - 2 * Cache::CAPACITY, 2) |
219 | }; |
220 | CACHEOption[idx] |
221 | .test(relative_bit) |
222 | .unwrap_or_else(|| detect_and_initialize().test(bit)) |
223 | } |
224 | |