1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::util::UncheckedOptionExt;
9use core::{
10 fmt, mem,
11 sync::atomic::{fence, AtomicU8, Ordering},
12};
13use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
14
15const DONE_BIT: u8 = 1;
16const POISON_BIT: u8 = 2;
17const LOCKED_BIT: u8 = 4;
18const PARKED_BIT: u8 = 8;
19
20/// Current state of a `Once`.
21#[derive(Copy, Clone, Eq, PartialEq, Debug)]
22pub enum OnceState {
23 /// A closure has not been executed yet
24 New,
25
26 /// A closure was executed but panicked.
27 Poisoned,
28
29 /// A thread is currently executing a closure.
30 InProgress,
31
32 /// A closure has completed successfully.
33 Done,
34}
35
36impl OnceState {
37 /// Returns whether the associated `Once` has been poisoned.
38 ///
39 /// Once an initialization routine for a `Once` has panicked it will forever
40 /// indicate to future forced initialization routines that it is poisoned.
41 #[inline]
42 pub fn poisoned(self) -> bool {
43 match self {
44 OnceState::Poisoned => true,
45 _ => false,
46 }
47 }
48
49 /// Returns whether the associated `Once` has successfully executed a
50 /// closure.
51 #[inline]
52 pub fn done(self) -> bool {
53 match self {
54 OnceState::Done => true,
55 _ => false,
56 }
57 }
58}
59
60/// A synchronization primitive which can be used to run a one-time
61/// initialization. Useful for one-time initialization for globals, FFI or
62/// related functionality.
63///
64/// # Differences from the standard library `Once`
65///
66/// - Only requires 1 byte of space, instead of 1 word.
67/// - Not required to be `'static`.
68/// - Relaxed memory barriers in the fast path, which can significantly improve
69/// performance on some architectures.
70/// - Efficient handling of micro-contention using adaptive spinning.
71///
72/// # Examples
73///
74/// ```
75/// use parking_lot::Once;
76///
77/// static START: Once = Once::new();
78///
79/// START.call_once(|| {
80/// // run initialization here
81/// });
82/// ```
83pub struct Once(AtomicU8);
84
85impl Once {
86 /// Creates a new `Once` value.
87 #[inline]
88 pub const fn new() -> Once {
89 Once(AtomicU8::new(0))
90 }
91
92 /// Returns the current state of this `Once`.
93 #[inline]
94 pub fn state(&self) -> OnceState {
95 let state = self.0.load(Ordering::Acquire);
96 if state & DONE_BIT != 0 {
97 OnceState::Done
98 } else if state & LOCKED_BIT != 0 {
99 OnceState::InProgress
100 } else if state & POISON_BIT != 0 {
101 OnceState::Poisoned
102 } else {
103 OnceState::New
104 }
105 }
106
107 /// Performs an initialization routine once and only once. The given closure
108 /// will be executed if this is the first time `call_once` has been called,
109 /// and otherwise the routine will *not* be invoked.
110 ///
111 /// This method will block the calling thread if another initialization
112 /// routine is currently running.
113 ///
114 /// When this function returns, it is guaranteed that some initialization
115 /// has run and completed (it may not be the closure specified). It is also
116 /// guaranteed that any memory writes performed by the executed closure can
117 /// be reliably observed by other threads at this point (there is a
118 /// happens-before relation between the closure and code executing after the
119 /// return).
120 ///
121 /// # Examples
122 ///
123 /// ```
124 /// use parking_lot::Once;
125 ///
126 /// static mut VAL: usize = 0;
127 /// static INIT: Once = Once::new();
128 ///
129 /// // Accessing a `static mut` is unsafe much of the time, but if we do so
130 /// // in a synchronized fashion (e.g. write once or read all) then we're
131 /// // good to go!
132 /// //
133 /// // This function will only call `expensive_computation` once, and will
134 /// // otherwise always return the value returned from the first invocation.
135 /// fn get_cached_val() -> usize {
136 /// unsafe {
137 /// INIT.call_once(|| {
138 /// VAL = expensive_computation();
139 /// });
140 /// VAL
141 /// }
142 /// }
143 ///
144 /// fn expensive_computation() -> usize {
145 /// // ...
146 /// # 2
147 /// }
148 /// ```
149 ///
150 /// # Panics
151 ///
152 /// The closure `f` will only be executed once if this is called
153 /// concurrently amongst many threads. If that closure panics, however, then
154 /// it will *poison* this `Once` instance, causing all future invocations of
155 /// `call_once` to also panic.
156 #[inline]
157 pub fn call_once<F>(&self, f: F)
158 where
159 F: FnOnce(),
160 {
161 if self.0.load(Ordering::Acquire) == DONE_BIT {
162 return;
163 }
164
165 let mut f = Some(f);
166 self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() });
167 }
168
169 /// Performs the same function as `call_once` except ignores poisoning.
170 ///
171 /// If this `Once` has been poisoned (some initialization panicked) then
172 /// this function will continue to attempt to call initialization functions
173 /// until one of them doesn't panic.
174 ///
175 /// The closure `f` is yielded a structure which can be used to query the
176 /// state of this `Once` (whether initialization has previously panicked or
177 /// not).
178 #[inline]
179 pub fn call_once_force<F>(&self, f: F)
180 where
181 F: FnOnce(OnceState),
182 {
183 if self.0.load(Ordering::Acquire) == DONE_BIT {
184 return;
185 }
186
187 let mut f = Some(f);
188 self.call_once_slow(true, &mut |state| unsafe {
189 f.take().unchecked_unwrap()(state)
190 });
191 }
192
193 // This is a non-generic function to reduce the monomorphization cost of
194 // using `call_once` (this isn't exactly a trivial or small implementation).
195 //
196 // Additionally, this is tagged with `#[cold]` as it should indeed be cold
197 // and it helps let LLVM know that calls to this function should be off the
198 // fast path. Essentially, this should help generate more straight line code
199 // in LLVM.
200 //
201 // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
202 // currently no way to take an `FnOnce` and call it via virtual dispatch
203 // without some allocation overhead.
204 #[cold]
205 fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) {
206 let mut spinwait = SpinWait::new();
207 let mut state = self.0.load(Ordering::Relaxed);
208 loop {
209 // If another thread called the closure, we're done
210 if state & DONE_BIT != 0 {
211 // An acquire fence is needed here since we didn't load the
212 // state with Ordering::Acquire.
213 fence(Ordering::Acquire);
214 return;
215 }
216
217 // If the state has been poisoned and we aren't forcing, then panic
218 if state & POISON_BIT != 0 && !ignore_poison {
219 // Need the fence here as well for the same reason
220 fence(Ordering::Acquire);
221 panic!("Once instance has previously been poisoned");
222 }
223
224 // Grab the lock if it isn't locked, even if there is a queue on it.
225 // We also clear the poison bit since we are going to try running
226 // the closure again.
227 if state & LOCKED_BIT == 0 {
228 match self.0.compare_exchange_weak(
229 state,
230 (state | LOCKED_BIT) & !POISON_BIT,
231 Ordering::Acquire,
232 Ordering::Relaxed,
233 ) {
234 Ok(_) => break,
235 Err(x) => state = x,
236 }
237 continue;
238 }
239
240 // If there is no queue, try spinning a few times
241 if state & PARKED_BIT == 0 && spinwait.spin() {
242 state = self.0.load(Ordering::Relaxed);
243 continue;
244 }
245
246 // Set the parked bit
247 if state & PARKED_BIT == 0 {
248 if let Err(x) = self.0.compare_exchange_weak(
249 state,
250 state | PARKED_BIT,
251 Ordering::Relaxed,
252 Ordering::Relaxed,
253 ) {
254 state = x;
255 continue;
256 }
257 }
258
259 // Park our thread until we are woken up by the thread that owns the
260 // lock.
261 let addr = self as *const _ as usize;
262 let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
263 let before_sleep = || {};
264 let timed_out = |_, _| unreachable!();
265 unsafe {
266 parking_lot_core::park(
267 addr,
268 validate,
269 before_sleep,
270 timed_out,
271 DEFAULT_PARK_TOKEN,
272 None,
273 );
274 }
275
276 // Loop back and check if the done bit was set
277 spinwait.reset();
278 state = self.0.load(Ordering::Relaxed);
279 }
280
281 struct PanicGuard<'a>(&'a Once);
282 impl<'a> Drop for PanicGuard<'a> {
283 fn drop(&mut self) {
284 // Mark the state as poisoned, unlock it and unpark all threads.
285 let once = self.0;
286 let state = once.0.swap(POISON_BIT, Ordering::Release);
287 if state & PARKED_BIT != 0 {
288 let addr = once as *const _ as usize;
289 unsafe {
290 parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
291 }
292 }
293 }
294 }
295
296 // At this point we have the lock, so run the closure. Make sure we
297 // properly clean up if the closure panicks.
298 let guard = PanicGuard(self);
299 let once_state = if state & POISON_BIT != 0 {
300 OnceState::Poisoned
301 } else {
302 OnceState::New
303 };
304 f(once_state);
305 mem::forget(guard);
306
307 // Now unlock the state, set the done bit and unpark all threads
308 let state = self.0.swap(DONE_BIT, Ordering::Release);
309 if state & PARKED_BIT != 0 {
310 let addr = self as *const _ as usize;
311 unsafe {
312 parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN);
313 }
314 }
315 }
316}
317
318impl Default for Once {
319 #[inline]
320 fn default() -> Once {
321 Once::new()
322 }
323}
324
325impl fmt::Debug for Once {
326 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
327 f.debug_struct("Once")
328 .field("state", &self.state())
329 .finish()
330 }
331}
332
333#[cfg(test)]
334mod tests {
335 use crate::Once;
336 use std::panic;
337 use std::sync::mpsc::channel;
338 use std::thread;
339
340 #[test]
341 fn smoke_once() {
342 static O: Once = Once::new();
343 let mut a = 0;
344 O.call_once(|| a += 1);
345 assert_eq!(a, 1);
346 O.call_once(|| a += 1);
347 assert_eq!(a, 1);
348 }
349
350 #[test]
351 fn stampede_once() {
352 static O: Once = Once::new();
353 static mut RUN: bool = false;
354
355 let (tx, rx) = channel();
356 for _ in 0..10 {
357 let tx = tx.clone();
358 thread::spawn(move || {
359 for _ in 0..4 {
360 thread::yield_now()
361 }
362 unsafe {
363 O.call_once(|| {
364 assert!(!RUN);
365 RUN = true;
366 });
367 assert!(RUN);
368 }
369 tx.send(()).unwrap();
370 });
371 }
372
373 unsafe {
374 O.call_once(|| {
375 assert!(!RUN);
376 RUN = true;
377 });
378 assert!(RUN);
379 }
380
381 for _ in 0..10 {
382 rx.recv().unwrap();
383 }
384 }
385
386 #[test]
387 fn poison_bad() {
388 static O: Once = Once::new();
389
390 // poison the once
391 let t = panic::catch_unwind(|| {
392 O.call_once(|| panic!());
393 });
394 assert!(t.is_err());
395
396 // poisoning propagates
397 let t = panic::catch_unwind(|| {
398 O.call_once(|| {});
399 });
400 assert!(t.is_err());
401
402 // we can subvert poisoning, however
403 let mut called = false;
404 O.call_once_force(|p| {
405 called = true;
406 assert!(p.poisoned())
407 });
408 assert!(called);
409
410 // once any success happens, we stop propagating the poison
411 O.call_once(|| {});
412 }
413
414 #[test]
415 fn wait_for_force_to_finish() {
416 static O: Once = Once::new();
417
418 // poison the once
419 let t = panic::catch_unwind(|| {
420 O.call_once(|| panic!());
421 });
422 assert!(t.is_err());
423
424 // make sure someone's waiting inside the once via a force
425 let (tx1, rx1) = channel();
426 let (tx2, rx2) = channel();
427 let t1 = thread::spawn(move || {
428 O.call_once_force(|p| {
429 assert!(p.poisoned());
430 tx1.send(()).unwrap();
431 rx2.recv().unwrap();
432 });
433 });
434
435 rx1.recv().unwrap();
436
437 // put another waiter on the once
438 let t2 = thread::spawn(|| {
439 let mut called = false;
440 O.call_once(|| {
441 called = true;
442 });
443 assert!(!called);
444 });
445
446 tx2.send(()).unwrap();
447
448 assert!(t1.join().is_ok());
449 assert!(t2.join().is_ok());
450 }
451
452 #[test]
453 fn test_once_debug() {
454 static O: Once = Once::new();
455
456 assert_eq!(format!("{:?}", O), "Once { state: New }");
457 }
458}
459