1//! Caches run-time feature detection so that it only needs to be computed
2//! once.
3
4#![allow(dead_code)] // not used on all platforms
5
6use core::sync::atomic::Ordering;
7
8use core::sync::atomic::AtomicUsize;
9
10/// Sets the `bit` of `x`.
11#[inline]
12const fn set_bit(x: u64, bit: u32) -> u64 {
13 x | 1 << bit
14}
15
16/// Tests the `bit` of `x`.
17#[inline]
18const fn test_bit(x: u64, bit: u32) -> bool {
19 x & (1 << bit) != 0
20}
21
22/// Unset the `bit of `x`.
23#[inline]
24const fn unset_bit(x: u64, bit: u32) -> u64 {
25 x & !(1 << bit)
26}
27
28/// Maximum number of features that can be cached.
29const CACHE_CAPACITY: u32 = 62;
30
31/// This type is used to initialize the cache
32#[derive(Copy, Clone)]
33pub(crate) struct Initializer(u64);
34
35#[allow(clippy::use_self)]
36impl Default for Initializer {
37 fn default() -> Self {
38 Initializer(0)
39 }
40}
41
42// NOTE: the `debug_assert!` would catch that we do not add more Features than
43// the one fitting our cache.
44impl Initializer {
45 /// Tests the `bit` of the cache.
46 #[inline]
47 pub(crate) fn test(self, bit: u32) -> bool {
48 debug_assert!(
49 bit < CACHE_CAPACITY,
50 "too many features, time to increase the cache size!"
51 );
52 test_bit(self.0, bit)
53 }
54
55 /// Sets the `bit` of the cache.
56 #[inline]
57 pub(crate) fn set(&mut self, bit: u32) {
58 debug_assert!(
59 bit < CACHE_CAPACITY,
60 "too many features, time to increase the cache size!"
61 );
62 let v = self.0;
63 self.0 = set_bit(v, bit);
64 }
65
66 /// Unsets the `bit` of the cache.
67 #[inline]
68 pub(crate) fn unset(&mut self, bit: u32) {
69 debug_assert!(
70 bit < CACHE_CAPACITY,
71 "too many features, time to increase the cache size!"
72 );
73 let v = self.0;
74 self.0 = unset_bit(v, bit);
75 }
76}
77
78/// This global variable is a cache of the features supported by the CPU.
79// Note: on x64, we only use the first slot
80static CACHE: [Cache; 2] = [Cache::uninitialized(), Cache::uninitialized()];
81
82/// Feature cache with capacity for `size_of::<usize::MAX>() * 8 - 1` features.
83///
84/// Note: 0 is used to represent an uninitialized cache, and (at least) the most
85/// significant bit is set on any cache which has been initialized.
86///
87/// Note: we use `Relaxed` atomic operations, because we are only interested in
88/// the effects of operations on a single memory location. That is, we only need
89/// "modification order", and not the full-blown "happens before".
90struct Cache(AtomicUsize);
91
92impl Cache {
93 const CAPACITY: u32 = (core::mem::size_of::<usize>() * 8 - 1) as u32;
94 const MASK: usize = (1 << Cache::CAPACITY) - 1;
95 const INITIALIZED_BIT: usize = 1usize << Cache::CAPACITY;
96
97 /// Creates an uninitialized cache.
98 #[allow(clippy::declare_interior_mutable_const)]
99 const fn uninitialized() -> Self {
100 Cache(AtomicUsize::new(0))
101 }
102
103 /// Is the `bit` in the cache set? Returns `None` if the cache has not been initialized.
104 #[inline]
105 pub(crate) fn test(&self, bit: u32) -> Option<bool> {
106 let cached = self.0.load(Ordering::Relaxed);
107 if cached == 0 {
108 None
109 } else {
110 Some(test_bit(cached as u64, bit))
111 }
112 }
113
114 /// Initializes the cache.
115 #[inline]
116 fn initialize(&self, value: usize) -> usize {
117 debug_assert_eq!((value & !Cache::MASK), 0);
118 self.0
119 .store(value | Cache::INITIALIZED_BIT, Ordering::Relaxed);
120 value
121 }
122}
123
124cfg_if::cfg_if! {
125 if #[cfg(feature = "std_detect_env_override")] {
126 #[inline]
127 fn initialize(mut value: Initializer) -> Initializer {
128 let env = unsafe {
129 libc::getenv(b"RUST_STD_DETECT_UNSTABLE\0".as_ptr() as *const libc::c_char)
130 };
131 if !env.is_null() {
132 let len = unsafe { libc::strlen(env) };
133 let env = unsafe { core::slice::from_raw_parts(env as *const u8, len) };
134 if let Ok(disable) = core::str::from_utf8(env) {
135 for v in disable.split(" ") {
136 let _ = super::Feature::from_str(v).map(|v| value.unset(v as u32));
137 }
138 }
139 }
140 do_initialize(value);
141 value
142 }
143 } else {
144 #[inline]
145 fn initialize(value: Initializer) -> Initializer {
146 do_initialize(value);
147 value
148 }
149 }
150}
151
152#[inline]
153fn do_initialize(value: Initializer) {
154 CACHE[0].initialize((value.0) as usize & Cache::MASK);
155 CACHE[1].initialize((value.0 >> Cache::CAPACITY) as usize & Cache::MASK);
156}
157
158// We only have to detect features once, and it's fairly costly, so hint to LLVM
159// that it should assume that cache hits are more common than misses (which is
160// the point of caching). It's possibly unfortunate that this function needs to
161// reach across modules like this to call `os::detect_features`, but it produces
162// the best code out of several attempted variants.
163//
164// The `Initializer` that the cache was initialized with is returned, so that
165// the caller can call `test()` on it without having to load the value from the
166// cache again.
167#[cold]
168fn detect_and_initialize() -> Initializer {
169 initialize(super::os::detect_features())
170}
171
172/// Tests the `bit` of the storage. If the storage has not been initialized,
173/// initializes it with the result of `os::detect_features()`.
174///
175/// On its first invocation, it detects the CPU features and caches them in the
176/// `CACHE` global variable as an `AtomicU64`.
177///
178/// It uses the `Feature` variant to index into this variable as a bitset. If
179/// the bit is set, the feature is enabled, and otherwise it is disabled.
180///
181/// If the feature `std_detect_env_override` is enabled looks for the env
182/// variable `RUST_STD_DETECT_UNSTABLE` and uses its content to disable
183/// Features that would had been otherwise detected.
184#[inline]
185pub(crate) fn test(bit: u32) -> bool {
186 let (relative_bit: u32, idx: i32) = if bit < Cache::CAPACITY {
187 (bit, 0)
188 } else {
189 (bit - Cache::CAPACITY, 1)
190 };
191 CACHE[idx]
192 .test(relative_bit)
193 .unwrap_or_else(|| detect_and_initialize().test(bit))
194}
195