1 | // Copyright 2017 Amanieu d'Antras |
2 | // |
3 | // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or |
4 | // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or |
5 | // http://opensource.org/licenses/MIT>, at your option. This file may not be |
6 | // copied, modified, or distributed except according to those terms. |
7 | |
8 | use crate::POINTER_WIDTH; |
9 | use once_cell::sync::Lazy; |
10 | use std::cell::Cell; |
11 | use std::cmp::Reverse; |
12 | use std::collections::BinaryHeap; |
13 | use std::sync::Mutex; |
14 | |
15 | /// Thread ID manager which allocates thread IDs. It attempts to aggressively |
16 | /// reuse thread IDs where possible to avoid cases where a ThreadLocal grows |
17 | /// indefinitely when it is used by many short-lived threads. |
18 | struct ThreadIdManager { |
19 | free_from: usize, |
20 | free_list: BinaryHeap<Reverse<usize>>, |
21 | } |
22 | impl ThreadIdManager { |
23 | fn new() -> Self { |
24 | Self { |
25 | free_from: 0, |
26 | free_list: BinaryHeap::new(), |
27 | } |
28 | } |
29 | fn alloc(&mut self) -> usize { |
30 | if let Some(id: Reverse) = self.free_list.pop() { |
31 | id.0 |
32 | } else { |
33 | // `free_from` can't overflow as each thread takes up at least 2 bytes of memory and |
34 | // thus we can't even have `usize::MAX / 2 + 1` threads. |
35 | |
36 | let id: usize = self.free_from; |
37 | self.free_from += 1; |
38 | id |
39 | } |
40 | } |
41 | fn free(&mut self, id: usize) { |
42 | self.free_list.push(item:Reverse(id)); |
43 | } |
44 | } |
45 | static THREAD_ID_MANAGER: Lazy<Mutex<ThreadIdManager>> = |
46 | Lazy::new(|| Mutex::new(ThreadIdManager::new())); |
47 | |
48 | /// Data which is unique to the current thread while it is running. |
49 | /// A thread ID may be reused after a thread exits. |
50 | #[derive (Clone, Copy)] |
51 | pub(crate) struct Thread { |
52 | /// The thread ID obtained from the thread ID manager. |
53 | pub(crate) id: usize, |
54 | /// The bucket this thread's local storage will be in. |
55 | pub(crate) bucket: usize, |
56 | /// The size of the bucket this thread's local storage will be in. |
57 | pub(crate) bucket_size: usize, |
58 | /// The index into the bucket this thread's local storage is in. |
59 | pub(crate) index: usize, |
60 | } |
61 | impl Thread { |
62 | pub(crate) fn new(id: usize) -> Self { |
63 | let bucket: usize = usize::from(POINTER_WIDTH) - ((id + 1).leading_zeros() as usize) - 1; |
64 | let bucket_size: usize = 1 << bucket; |
65 | let index: usize = id - (bucket_size - 1); |
66 | |
67 | Self { |
68 | id, |
69 | bucket, |
70 | bucket_size, |
71 | index, |
72 | } |
73 | } |
74 | } |
75 | |
76 | cfg_if::cfg_if! { |
77 | if #[cfg(feature = "nightly" )] { |
78 | // This is split into 2 thread-local variables so that we can check whether the |
79 | // thread is initialized without having to register a thread-local destructor. |
80 | // |
81 | // This makes the fast path smaller. |
82 | #[thread_local] |
83 | static mut THREAD: Option<Thread> = None; |
84 | thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } |
85 | |
86 | // Guard to ensure the thread ID is released on thread exit. |
87 | struct ThreadGuard { |
88 | // We keep a copy of the thread ID in the ThreadGuard: we can't |
89 | // reliably access THREAD in our Drop impl due to the unpredictable |
90 | // order of TLS destructors. |
91 | id: Cell<usize>, |
92 | } |
93 | |
94 | impl Drop for ThreadGuard { |
95 | fn drop(&mut self) { |
96 | // Release the thread ID. Any further accesses to the thread ID |
97 | // will go through get_slow which will either panic or |
98 | // initialize a new ThreadGuard. |
99 | unsafe { |
100 | THREAD = None; |
101 | } |
102 | THREAD_ID_MANAGER.lock().unwrap().free(self.id.get()); |
103 | } |
104 | } |
105 | |
106 | /// Returns a thread ID for the current thread, allocating one if needed. |
107 | #[inline] |
108 | pub(crate) fn get() -> Thread { |
109 | if let Some(thread) = unsafe { THREAD } { |
110 | thread |
111 | } else { |
112 | get_slow() |
113 | } |
114 | } |
115 | |
116 | /// Out-of-line slow path for allocating a thread ID. |
117 | #[cold] |
118 | fn get_slow() -> Thread { |
119 | let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()); |
120 | unsafe { |
121 | THREAD = Some(new); |
122 | } |
123 | THREAD_GUARD.with(|guard| guard.id.set(new.id)); |
124 | new |
125 | } |
126 | } else { |
127 | // This is split into 2 thread-local variables so that we can check whether the |
128 | // thread is initialized without having to register a thread-local destructor. |
129 | // |
130 | // This makes the fast path smaller. |
131 | thread_local! { static THREAD: Cell<Option<Thread>> = const { Cell::new(None) }; } |
132 | thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } |
133 | |
134 | // Guard to ensure the thread ID is released on thread exit. |
135 | struct ThreadGuard { |
136 | // We keep a copy of the thread ID in the ThreadGuard: we can't |
137 | // reliably access THREAD in our Drop impl due to the unpredictable |
138 | // order of TLS destructors. |
139 | id: Cell<usize>, |
140 | } |
141 | |
142 | impl Drop for ThreadGuard { |
143 | fn drop(&mut self) { |
144 | // Release the thread ID. Any further accesses to the thread ID |
145 | // will go through get_slow which will either panic or |
146 | // initialize a new ThreadGuard. |
147 | let _ = THREAD.try_with(|thread| thread.set(None)); |
148 | THREAD_ID_MANAGER.lock().unwrap().free(self.id.get()); |
149 | } |
150 | } |
151 | |
152 | /// Returns a thread ID for the current thread, allocating one if needed. |
153 | #[inline ] |
154 | pub(crate) fn get() -> Thread { |
155 | THREAD.with(|thread| { |
156 | if let Some(thread) = thread.get() { |
157 | thread |
158 | } else { |
159 | get_slow(thread) |
160 | } |
161 | }) |
162 | } |
163 | |
164 | /// Out-of-line slow path for allocating a thread ID. |
165 | #[cold ] |
166 | fn get_slow(thread: &Cell<Option<Thread>>) -> Thread { |
167 | let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()); |
168 | thread.set(Some(new)); |
169 | THREAD_GUARD.with(|guard| guard.id.set(new.id)); |
170 | new |
171 | } |
172 | } |
173 | } |
174 | |
175 | #[test ] |
176 | fn test_thread() { |
177 | let thread = Thread::new(0); |
178 | assert_eq!(thread.id, 0); |
179 | assert_eq!(thread.bucket, 0); |
180 | assert_eq!(thread.bucket_size, 1); |
181 | assert_eq!(thread.index, 0); |
182 | |
183 | let thread = Thread::new(1); |
184 | assert_eq!(thread.id, 1); |
185 | assert_eq!(thread.bucket, 1); |
186 | assert_eq!(thread.bucket_size, 2); |
187 | assert_eq!(thread.index, 0); |
188 | |
189 | let thread = Thread::new(2); |
190 | assert_eq!(thread.id, 2); |
191 | assert_eq!(thread.bucket, 1); |
192 | assert_eq!(thread.bucket_size, 2); |
193 | assert_eq!(thread.index, 1); |
194 | |
195 | let thread = Thread::new(3); |
196 | assert_eq!(thread.id, 3); |
197 | assert_eq!(thread.bucket, 2); |
198 | assert_eq!(thread.bucket_size, 4); |
199 | assert_eq!(thread.index, 0); |
200 | |
201 | let thread = Thread::new(19); |
202 | assert_eq!(thread.id, 19); |
203 | assert_eq!(thread.bucket, 4); |
204 | assert_eq!(thread.bucket_size, 16); |
205 | assert_eq!(thread.index, 4); |
206 | } |
207 | |