| 1 | // Copyright 2017 Amanieu d'Antras |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
| 4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
| 5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
| 6 | // copied, modified, or distributed except according to those terms. |
| 7 | |
| 8 | use crate::POINTER_WIDTH; |
| 9 | use once_cell::sync::Lazy; |
| 10 | use std::cell::Cell; |
| 11 | use std::cmp::Reverse; |
| 12 | use std::collections::BinaryHeap; |
| 13 | use std::sync::Mutex; |
| 14 | use std::usize; |
| 15 | |
| 16 | /// Thread ID manager which allocates thread IDs. It attempts to aggressively |
| 17 | /// reuse thread IDs where possible to avoid cases where a ThreadLocal grows |
| 18 | /// indefinitely when it is used by many short-lived threads. |
| 19 | struct ThreadIdManager { |
| 20 | free_from: usize, |
| 21 | free_list: BinaryHeap<Reverse<usize>>, |
| 22 | } |
| 23 | impl ThreadIdManager { |
| 24 | fn new() -> ThreadIdManager { |
| 25 | ThreadIdManager { |
| 26 | free_from: 0, |
| 27 | free_list: BinaryHeap::new(), |
| 28 | } |
| 29 | } |
| 30 | fn alloc(&mut self) -> usize { |
| 31 | if let Some(id) = self.free_list.pop() { |
| 32 | id.0 |
| 33 | } else { |
| 34 | let id = self.free_from; |
| 35 | self.free_from = self |
| 36 | .free_from |
| 37 | .checked_add(1) |
| 38 | .expect("Ran out of thread IDs" ); |
| 39 | id |
| 40 | } |
| 41 | } |
| 42 | fn free(&mut self, id: usize) { |
| 43 | self.free_list.push(Reverse(id)); |
| 44 | } |
| 45 | } |
| 46 | static THREAD_ID_MANAGER: Lazy<Mutex<ThreadIdManager>> = |
| 47 | Lazy::new(|| Mutex::new(ThreadIdManager::new())); |
| 48 | |
| 49 | /// Data which is unique to the current thread while it is running. |
| 50 | /// A thread ID may be reused after a thread exits. |
| 51 | #[derive(Clone, Copy)] |
| 52 | pub(crate) struct Thread { |
| 53 | /// The thread ID obtained from the thread ID manager. |
| 54 | pub(crate) id: usize, |
| 55 | /// The bucket this thread's local storage will be in. |
| 56 | pub(crate) bucket: usize, |
| 57 | /// The size of the bucket this thread's local storage will be in. |
| 58 | pub(crate) bucket_size: usize, |
| 59 | /// The index into the bucket this thread's local storage is in. |
| 60 | pub(crate) index: usize, |
| 61 | } |
| 62 | impl Thread { |
| 63 | fn new(id: usize) -> Thread { |
| 64 | let bucket = usize::from(POINTER_WIDTH) - id.leading_zeros() as usize; |
| 65 | let bucket_size = 1 << bucket.saturating_sub(1); |
| 66 | let index = if id != 0 { id ^ bucket_size } else { 0 }; |
| 67 | |
| 68 | Thread { |
| 69 | id, |
| 70 | bucket, |
| 71 | bucket_size, |
| 72 | index, |
| 73 | } |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | cfg_if::cfg_if! { |
| 78 | if #[cfg(feature = "nightly" )] { |
| 79 | // This is split into 2 thread-local variables so that we can check whether the |
| 80 | // thread is initialized without having to register a thread-local destructor. |
| 81 | // |
| 82 | // This makes the fast path smaller. |
| 83 | #[thread_local ] |
| 84 | static mut THREAD: Option<Thread> = None; |
| 85 | thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } |
| 86 | |
| 87 | // Guard to ensure the thread ID is released on thread exit. |
| 88 | struct ThreadGuard { |
| 89 | // We keep a copy of the thread ID in the ThreadGuard: we can't |
| 90 | // reliably access THREAD in our Drop impl due to the unpredictable |
| 91 | // order of TLS destructors. |
| 92 | id: Cell<usize>, |
| 93 | } |
| 94 | |
| 95 | impl Drop for ThreadGuard { |
| 96 | fn drop(&mut self) { |
| 97 | // Release the thread ID. Any further accesses to the thread ID |
| 98 | // will go through get_slow which will either panic or |
| 99 | // initialize a new ThreadGuard. |
| 100 | unsafe { |
| 101 | THREAD = None; |
| 102 | } |
| 103 | THREAD_ID_MANAGER.lock().unwrap().free(self.id.get()); |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | /// Returns a thread ID for the current thread, allocating one if needed. |
| 108 | #[inline ] |
| 109 | pub(crate) fn get() -> Thread { |
| 110 | if let Some(thread) = unsafe { THREAD } { |
| 111 | thread |
| 112 | } else { |
| 113 | get_slow() |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | /// Out-of-line slow path for allocating a thread ID. |
| 118 | #[cold ] |
| 119 | fn get_slow() -> Thread { |
| 120 | let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()); |
| 121 | unsafe { |
| 122 | THREAD = Some(new); |
| 123 | } |
| 124 | THREAD_GUARD.with(|guard| guard.id.set(new.id)); |
| 125 | new |
| 126 | } |
| 127 | } else { |
| 128 | // This is split into 2 thread-local variables so that we can check whether the |
| 129 | // thread is initialized without having to register a thread-local destructor. |
| 130 | // |
| 131 | // This makes the fast path smaller. |
| 132 | thread_local! { static THREAD: Cell<Option<Thread>> = const { Cell::new(None) }; } |
| 133 | thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } |
| 134 | |
| 135 | // Guard to ensure the thread ID is released on thread exit. |
| 136 | struct ThreadGuard { |
| 137 | // We keep a copy of the thread ID in the ThreadGuard: we can't |
| 138 | // reliably access THREAD in our Drop impl due to the unpredictable |
| 139 | // order of TLS destructors. |
| 140 | id: Cell<usize>, |
| 141 | } |
| 142 | |
| 143 | impl Drop for ThreadGuard { |
| 144 | fn drop(&mut self) { |
| 145 | // Release the thread ID. Any further accesses to the thread ID |
| 146 | // will go through get_slow which will either panic or |
| 147 | // initialize a new ThreadGuard. |
| 148 | let _ = THREAD.try_with(|thread| thread.set(None)); |
| 149 | THREAD_ID_MANAGER.lock().unwrap().free(self.id.get()); |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | /// Returns a thread ID for the current thread, allocating one if needed. |
| 154 | #[inline ] |
| 155 | pub(crate) fn get() -> Thread { |
| 156 | THREAD.with(|thread| { |
| 157 | if let Some(thread) = thread.get() { |
| 158 | thread |
| 159 | } else { |
| 160 | get_slow(thread) |
| 161 | } |
| 162 | }) |
| 163 | } |
| 164 | |
| 165 | /// Out-of-line slow path for allocating a thread ID. |
| 166 | #[cold ] |
| 167 | fn get_slow(thread: &Cell<Option<Thread>>) -> Thread { |
| 168 | let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()); |
| 169 | thread.set(Some(new)); |
| 170 | THREAD_GUARD.with(|guard| guard.id.set(new.id)); |
| 171 | new |
| 172 | } |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | #[test] |
| 177 | fn test_thread() { |
| 178 | let thread = Thread::new(0); |
| 179 | assert_eq!(thread.id, 0); |
| 180 | assert_eq!(thread.bucket, 0); |
| 181 | assert_eq!(thread.bucket_size, 1); |
| 182 | assert_eq!(thread.index, 0); |
| 183 | |
| 184 | let thread = Thread::new(1); |
| 185 | assert_eq!(thread.id, 1); |
| 186 | assert_eq!(thread.bucket, 1); |
| 187 | assert_eq!(thread.bucket_size, 1); |
| 188 | assert_eq!(thread.index, 0); |
| 189 | |
| 190 | let thread = Thread::new(2); |
| 191 | assert_eq!(thread.id, 2); |
| 192 | assert_eq!(thread.bucket, 2); |
| 193 | assert_eq!(thread.bucket_size, 2); |
| 194 | assert_eq!(thread.index, 0); |
| 195 | |
| 196 | let thread = Thread::new(3); |
| 197 | assert_eq!(thread.id, 3); |
| 198 | assert_eq!(thread.bucket, 2); |
| 199 | assert_eq!(thread.bucket_size, 2); |
| 200 | assert_eq!(thread.index, 1); |
| 201 | |
| 202 | let thread = Thread::new(19); |
| 203 | assert_eq!(thread.id, 19); |
| 204 | assert_eq!(thread.bucket, 5); |
| 205 | assert_eq!(thread.bucket_size, 16); |
| 206 | assert_eq!(thread.index, 3); |
| 207 | } |
| 208 | |