1//! Synchronization primitives for one-time evaluation.
2
3use crate::{
4 atomic::{AtomicU8, Ordering},
5 RelaxStrategy, Spin,
6};
7use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
8
9/// A primitive that provides lazy one-time initialization.
10///
11/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
12/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
13/// `Once`).
14///
15/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
16///
17/// # Examples
18///
19/// ```
20/// use spin;
21///
22/// static START: spin::Once = spin::Once::new();
23///
24/// START.call_once(|| {
25/// // run initialization here
26/// });
27/// ```
28pub struct Once<T = (), R = Spin> {
29 phantom: PhantomData<R>,
30 status: AtomicStatus,
31 data: UnsafeCell<MaybeUninit<T>>,
32}
33
34impl<T, R> Default for Once<T, R> {
35 fn default() -> Self {
36 Self::new()
37 }
38}
39
40impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
41 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42 match self.get() {
43 Some(s: &T) => write!(f, "Once {{ data: ")
44 .and_then(|()| s.fmt(f))
45 .and_then(|()| write!(f, "}}")),
46 None => write!(f, "Once {{ <uninitialized> }}"),
47 }
48 }
49}
50
51// Same unsafe impls as `std::sync::RwLock`, because this also allows for
52// concurrent reads.
53unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
54unsafe impl<T: Send, R> Send for Once<T, R> {}
55
56mod status {
57 use super::*;
58
59 // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
60 // a value for which there exists a valid Status. This means that users of this API must only
61 // be allowed to load and store `Status`es.
62 #[repr(transparent)]
63 pub struct AtomicStatus(AtomicU8);
64
65 // Four states that a Once can be in, encoded into the lower bits of `status` in
66 // the Once structure.
67 #[repr(u8)]
68 #[derive(Clone, Copy, Debug, PartialEq)]
69 pub enum Status {
70 Incomplete = 0x00,
71 Running = 0x01,
72 Complete = 0x02,
73 Panicked = 0x03,
74 }
75 impl Status {
76 // Construct a status from an inner u8 integer.
77 //
78 // # Safety
79 //
80 // For this to be safe, the inner number must have a valid corresponding enum variant.
81 unsafe fn new_unchecked(inner: u8) -> Self {
82 core::mem::transmute(inner)
83 }
84 }
85
86 impl AtomicStatus {
87 #[inline(always)]
88 pub const fn new(status: Status) -> Self {
89 // SAFETY: We got the value directly from status, so transmuting back is fine.
90 Self(AtomicU8::new(status as u8))
91 }
92 #[inline(always)]
93 pub fn load(&self, ordering: Ordering) -> Status {
94 // SAFETY: We know that the inner integer must have been constructed from a Status in
95 // the first place.
96 unsafe { Status::new_unchecked(self.0.load(ordering)) }
97 }
98 #[inline(always)]
99 pub fn store(&self, status: Status, ordering: Ordering) {
100 // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
101 // a status, thus making transmutation safe.
102 self.0.store(status as u8, ordering);
103 }
104 #[inline(always)]
105 pub fn compare_exchange(
106 &self,
107 old: Status,
108 new: Status,
109 success: Ordering,
110 failure: Ordering,
111 ) -> Result<Status, Status> {
112 match self
113 .0
114 .compare_exchange(old as u8, new as u8, success, failure)
115 {
116 // SAFETY: A compare exchange will always return a value that was later stored into
117 // the atomic u8, but due to the invariant that it must be a valid Status, we know
118 // that both Ok(_) and Err(_) will be safely transmutable.
119 Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
120 Err(err) => Err(unsafe { Status::new_unchecked(err) }),
121 }
122 }
123 #[inline(always)]
124 pub fn get_mut(&mut self) -> &mut Status {
125 // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
126 // it to a &mut Status.
127 unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
128 }
129 }
130}
131use self::status::{AtomicStatus, Status};
132
133impl<T, R: RelaxStrategy> Once<T, R> {
134 /// Performs an initialization routine once and only once. The given closure
135 /// will be executed if this is the first time `call_once` has been called,
136 /// and otherwise the routine will *not* be invoked.
137 ///
138 /// This method will block the calling thread if another initialization
139 /// routine is currently running.
140 ///
141 /// When this function returns, it is guaranteed that some initialization
142 /// has run and completed (it may not be the closure specified). The
143 /// returned pointer will point to the result from the closure that was
144 /// run.
145 ///
146 /// # Panics
147 ///
148 /// This function will panic if the [`Once`] previously panicked while attempting
149 /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
150 /// primitives.
151 ///
152 /// # Examples
153 ///
154 /// ```
155 /// use spin;
156 ///
157 /// static INIT: spin::Once<usize> = spin::Once::new();
158 ///
159 /// fn get_cached_val() -> usize {
160 /// *INIT.call_once(expensive_computation)
161 /// }
162 ///
163 /// fn expensive_computation() -> usize {
164 /// // ...
165 /// # 2
166 /// }
167 /// ```
168 pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
169 match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
170 Ok(x) => x,
171 Err(void) => match void {},
172 }
173 }
174
175 /// This method is similar to `call_once`, but allows the given closure to
176 /// fail, and lets the `Once` in a uninitialized state if it does.
177 ///
178 /// This method will block the calling thread if another initialization
179 /// routine is currently running.
180 ///
181 /// When this function returns without error, it is guaranteed that some
182 /// initialization has run and completed (it may not be the closure
183 /// specified). The returned reference will point to the result from the
184 /// closure that was run.
185 ///
186 /// # Panics
187 ///
188 /// This function will panic if the [`Once`] previously panicked while attempting
189 /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
190 /// primitives.
191 ///
192 /// # Examples
193 ///
194 /// ```
195 /// use spin;
196 ///
197 /// static INIT: spin::Once<usize> = spin::Once::new();
198 ///
199 /// fn get_cached_val() -> Result<usize, String> {
200 /// INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
201 /// }
202 ///
203 /// fn expensive_fallible_computation() -> Result<usize, String> {
204 /// // ...
205 /// # Ok(2)
206 /// }
207 /// ```
208 pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
209 if let Some(value) = self.get() {
210 Ok(value)
211 } else {
212 self.try_call_once_slow(f)
213 }
214 }
215
216 #[cold]
217 fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
218 loop {
219 let xchg = self.status.compare_exchange(
220 Status::Incomplete,
221 Status::Running,
222 Ordering::Acquire,
223 Ordering::Acquire,
224 );
225
226 match xchg {
227 Ok(_must_be_state_incomplete) => {
228 // Impl is defined after the match for readability
229 }
230 Err(Status::Panicked) => panic!("Once panicked"),
231 Err(Status::Running) => match self.poll() {
232 Some(v) => return Ok(v),
233 None => continue,
234 },
235 Err(Status::Complete) => {
236 return Ok(unsafe {
237 // SAFETY: The status is Complete
238 self.force_get()
239 });
240 }
241 Err(Status::Incomplete) => {
242 // The compare_exchange failed, so this shouldn't ever be reached,
243 // however if we decide to switch to compare_exchange_weak it will
244 // be safer to leave this here than hit an unreachable
245 continue;
246 }
247 }
248
249 // The compare-exchange succeeded, so we shall initialize it.
250
251 // We use a guard (Finish) to catch panics caused by builder
252 let finish = Finish {
253 status: &self.status,
254 };
255 let val = match f() {
256 Ok(val) => val,
257 Err(err) => {
258 // If an error occurs, clean up everything and leave.
259 core::mem::forget(finish);
260 self.status.store(Status::Incomplete, Ordering::Release);
261 return Err(err);
262 }
263 };
264 unsafe {
265 // SAFETY:
266 // `UnsafeCell`/deref: currently the only accessor, mutably
267 // and immutably by cas exclusion.
268 // `write`: pointer comes from `MaybeUninit`.
269 (*self.data.get()).as_mut_ptr().write(val);
270 };
271 // If there were to be a panic with unwind enabled, the code would
272 // short-circuit and never reach the point where it writes the inner data.
273 // The destructor for Finish will run, and poison the Once to ensure that other
274 // threads accessing it do not exhibit unwanted behavior, if there were to be
275 // any inconsistency in data structures caused by the panicking thread.
276 //
277 // However, f() is expected in the general case not to panic. In that case, we
278 // simply forget the guard, bypassing its destructor. We could theoretically
279 // clear a flag instead, but this eliminates the call to the destructor at
280 // compile time, and unconditionally poisons during an eventual panic, if
281 // unwinding is enabled.
282 core::mem::forget(finish);
283
284 // SAFETY: Release is required here, so that all memory accesses done in the
285 // closure when initializing, become visible to other threads that perform Acquire
286 // loads.
287 //
288 // And, we also know that the changes this thread has done will not magically
289 // disappear from our cache, so it does not need to be AcqRel.
290 self.status.store(Status::Complete, Ordering::Release);
291
292 // This next line is mainly an optimization.
293 return unsafe { Ok(self.force_get()) };
294 }
295 }
296
297 /// Spins until the [`Once`] contains a value.
298 ///
299 /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
300 ///
301 /// # Panics
302 ///
303 /// This function will panic if the [`Once`] previously panicked while attempting
304 /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
305 /// primitives.
306 pub fn wait(&self) -> &T {
307 loop {
308 match self.poll() {
309 Some(x) => break x,
310 None => R::relax(),
311 }
312 }
313 }
314
315 /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
316 /// initialized. If initialization has not even begun, `None` will be returned.
317 ///
318 /// Note that in releases prior to `0.7`, this function was named `wait`.
319 ///
320 /// # Panics
321 ///
322 /// This function will panic if the [`Once`] previously panicked while attempting
323 /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
324 /// primitives.
325 pub fn poll(&self) -> Option<&T> {
326 loop {
327 // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
328 // sure that all memory accessed done while initializing that value, are visible when
329 // we return a reference to the inner data after this load.
330 match self.status.load(Ordering::Acquire) {
331 Status::Incomplete => return None,
332 Status::Running => R::relax(), // We spin
333 Status::Complete => return Some(unsafe { self.force_get() }),
334 Status::Panicked => panic!("Once previously poisoned by a panicked"),
335 }
336 }
337 }
338}
339
340impl<T, R> Once<T, R> {
341 /// Initialization constant of [`Once`].
342 #[allow(clippy::declare_interior_mutable_const)]
343 pub const INIT: Self = Self {
344 phantom: PhantomData,
345 status: AtomicStatus::new(Status::Incomplete),
346 data: UnsafeCell::new(MaybeUninit::uninit()),
347 };
348
349 /// Creates a new [`Once`].
350 pub const fn new() -> Self {
351 Self::INIT
352 }
353
354 /// Creates a new initialized [`Once`].
355 pub const fn initialized(data: T) -> Self {
356 Self {
357 phantom: PhantomData,
358 status: AtomicStatus::new(Status::Complete),
359 data: UnsafeCell::new(MaybeUninit::new(data)),
360 }
361 }
362
363 /// Retrieve a pointer to the inner data.
364 ///
365 /// While this method itself is safe, accessing the pointer before the [`Once`] has been
366 /// initialized is UB, unless this method has already been written to from a pointer coming
367 /// from this method.
368 pub fn as_mut_ptr(&self) -> *mut T {
369 // SAFETY:
370 // * MaybeUninit<T> always has exactly the same layout as T
371 self.data.get().cast::<T>()
372 }
373
374 /// Get a reference to the initialized instance. Must only be called once COMPLETE.
375 unsafe fn force_get(&self) -> &T {
376 // SAFETY:
377 // * `UnsafeCell`/inner deref: data never changes again
378 // * `MaybeUninit`/outer deref: data was initialized
379 &*(*self.data.get()).as_ptr()
380 }
381
382 /// Get a reference to the initialized instance. Must only be called once COMPLETE.
383 unsafe fn force_get_mut(&mut self) -> &mut T {
384 // SAFETY:
385 // * `UnsafeCell`/inner deref: data never changes again
386 // * `MaybeUninit`/outer deref: data was initialized
387 &mut *(*self.data.get()).as_mut_ptr()
388 }
389
390 /// Get a reference to the initialized instance. Must only be called once COMPLETE.
391 unsafe fn force_into_inner(self) -> T {
392 // SAFETY:
393 // * `UnsafeCell`/inner deref: data never changes again
394 // * `MaybeUninit`/outer deref: data was initialized
395 (*self.data.get()).as_ptr().read()
396 }
397
398 /// Returns a reference to the inner value if the [`Once`] has been initialized.
399 pub fn get(&self) -> Option<&T> {
400 // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
401 // nonatomic stores done when initializing, once we have loaded and checked the status.
402 match self.status.load(Ordering::Acquire) {
403 Status::Complete => Some(unsafe { self.force_get() }),
404 _ => None,
405 }
406 }
407
408 /// Returns a reference to the inner value on the unchecked assumption that the [`Once`] has been initialized.
409 ///
410 /// # Safety
411 ///
412 /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
413 /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
414 /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
415 /// checking initialization is unacceptable and the `Once` has already been initialized.
416 pub unsafe fn get_unchecked(&self) -> &T {
417 debug_assert_eq!(
418 self.status.load(Ordering::SeqCst),
419 Status::Complete,
420 "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
421 );
422 self.force_get()
423 }
424
425 /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
426 ///
427 /// Because this method requires a mutable reference to the [`Once`], no synchronization
428 /// overhead is required to access the inner value. In effect, it is zero-cost.
429 pub fn get_mut(&mut self) -> Option<&mut T> {
430 match *self.status.get_mut() {
431 Status::Complete => Some(unsafe { self.force_get_mut() }),
432 _ => None,
433 }
434 }
435
436 /// Returns a mutable reference to the inner value
437 ///
438 /// # Safety
439 ///
440 /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
441 /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
442 /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
443 /// checking initialization is unacceptable and the `Once` has already been initialized.
444 pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
445 debug_assert_eq!(
446 self.status.load(Ordering::SeqCst),
447 Status::Complete,
448 "Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
449 );
450 self.force_get_mut()
451 }
452
453 /// Returns a the inner value if the [`Once`] has been initialized.
454 ///
455 /// Because this method requires ownership of the [`Once`], no synchronization overhead
456 /// is required to access the inner value. In effect, it is zero-cost.
457 pub fn try_into_inner(mut self) -> Option<T> {
458 match *self.status.get_mut() {
459 Status::Complete => Some(unsafe { self.force_into_inner() }),
460 _ => None,
461 }
462 }
463
464 /// Returns a the inner value if the [`Once`] has been initialized.
465 /// # Safety
466 ///
467 /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
468 /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
469 /// This can be useful, if `Once` has already been initialized, and you want to bypass an
470 /// option check.
471 pub unsafe fn into_inner_unchecked(self) -> T {
472 debug_assert_eq!(
473 self.status.load(Ordering::SeqCst),
474 Status::Complete,
475 "Attempted to access an unintialized Once. If this was to run without debug checks, this would be undefined behavior. This is a serious bug and you must fix it.",
476 );
477 self.force_into_inner()
478 }
479
480 /// Checks whether the value has been initialized.
481 ///
482 /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
483 /// therefore it is safe to access the value directly via
484 /// [`get_unchecked`](Self::get_unchecked) if this returns true.
485 pub fn is_completed(&self) -> bool {
486 // TODO: Add a similar variant for Relaxed?
487 self.status.load(Ordering::Acquire) == Status::Complete
488 }
489}
490
491impl<T, R> From<T> for Once<T, R> {
492 fn from(data: T) -> Self {
493 Self::initialized(data)
494 }
495}
496
497impl<T, R> Drop for Once<T, R> {
498 fn drop(&mut self) {
499 // No need to do any atomic access here, we have &mut!
500 if *self.status.get_mut() == Status::Complete {
501 unsafe {
502 //TODO: Use MaybeUninit::assume_init_drop once stabilised
503 core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
504 }
505 }
506 }
507}
508
509struct Finish<'a> {
510 status: &'a AtomicStatus,
511}
512
513impl<'a> Drop for Finish<'a> {
514 fn drop(&mut self) {
515 // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
516 // This is mainly because panics are not meant to be fast at all, but also because if
517 // there were to be a compiler bug which reorders accesses within the same thread,
518 // where it should not, we want to be sure that the panic really is handled, and does
519 // not cause additional problems. SeqCst will therefore help guarding against such
520 // bugs.
521 self.status.store(Status::Panicked, Ordering::SeqCst);
522 }
523}
524
525#[cfg(test)]
526mod tests {
527 use std::prelude::v1::*;
528
529 use std::sync::atomic::AtomicU32;
530 use std::sync::mpsc::channel;
531 use std::sync::Arc;
532 use std::thread;
533
534 use super::*;
535
536 #[test]
537 fn smoke_once() {
538 static O: Once = Once::new();
539 let mut a = 0;
540 O.call_once(|| a += 1);
541 assert_eq!(a, 1);
542 O.call_once(|| a += 1);
543 assert_eq!(a, 1);
544 }
545
546 #[test]
547 fn smoke_once_value() {
548 static O: Once<usize> = Once::new();
549 let a = O.call_once(|| 1);
550 assert_eq!(*a, 1);
551 let b = O.call_once(|| 2);
552 assert_eq!(*b, 1);
553 }
554
555 #[test]
556 fn stampede_once() {
557 static O: Once = Once::new();
558 static mut RUN: bool = false;
559
560 let (tx, rx) = channel();
561 let mut ts = Vec::new();
562 for _ in 0..10 {
563 let tx = tx.clone();
564 ts.push(thread::spawn(move || {
565 for _ in 0..4 {
566 thread::yield_now()
567 }
568 unsafe {
569 O.call_once(|| {
570 assert!(!RUN);
571 RUN = true;
572 });
573 assert!(RUN);
574 }
575 tx.send(()).unwrap();
576 }));
577 }
578
579 unsafe {
580 O.call_once(|| {
581 assert!(!RUN);
582 RUN = true;
583 });
584 assert!(RUN);
585 }
586
587 for _ in 0..10 {
588 rx.recv().unwrap();
589 }
590
591 for t in ts {
592 t.join().unwrap();
593 }
594 }
595
596 #[test]
597 fn get() {
598 static INIT: Once<usize> = Once::new();
599
600 assert!(INIT.get().is_none());
601 INIT.call_once(|| 2);
602 assert_eq!(INIT.get().map(|r| *r), Some(2));
603 }
604
605 #[test]
606 fn get_no_wait() {
607 static INIT: Once<usize> = Once::new();
608
609 assert!(INIT.get().is_none());
610 let t = thread::spawn(move || {
611 INIT.call_once(|| {
612 thread::sleep(std::time::Duration::from_secs(3));
613 42
614 });
615 });
616 assert!(INIT.get().is_none());
617
618 t.join().unwrap();
619 }
620
621 #[test]
622 fn poll() {
623 static INIT: Once<usize> = Once::new();
624
625 assert!(INIT.poll().is_none());
626 INIT.call_once(|| 3);
627 assert_eq!(INIT.poll().map(|r| *r), Some(3));
628 }
629
630 #[test]
631 fn wait() {
632 static INIT: Once<usize> = Once::new();
633
634 let t = std::thread::spawn(|| {
635 assert_eq!(*INIT.wait(), 3);
636 assert!(INIT.is_completed());
637 });
638
639 for _ in 0..4 {
640 thread::yield_now()
641 }
642
643 assert!(INIT.poll().is_none());
644 INIT.call_once(|| 3);
645
646 t.join().unwrap();
647 }
648
649 #[test]
650 fn panic() {
651 use std::panic;
652
653 static INIT: Once = Once::new();
654
655 // poison the once
656 let t = panic::catch_unwind(|| {
657 INIT.call_once(|| panic!());
658 });
659 assert!(t.is_err());
660
661 // poisoning propagates
662 let t = panic::catch_unwind(|| {
663 INIT.call_once(|| {});
664 });
665 assert!(t.is_err());
666 }
667
668 #[test]
669 fn init_constant() {
670 static O: Once = Once::INIT;
671 let mut a = 0;
672 O.call_once(|| a += 1);
673 assert_eq!(a, 1);
674 O.call_once(|| a += 1);
675 assert_eq!(a, 1);
676 }
677
678 static mut CALLED: bool = false;
679
680 struct DropTest {}
681
682 impl Drop for DropTest {
683 fn drop(&mut self) {
684 unsafe {
685 CALLED = true;
686 }
687 }
688 }
689
690 #[test]
691 fn try_call_once_err() {
692 let once = Once::<_, Spin>::new();
693 let shared = Arc::new((once, AtomicU32::new(0)));
694
695 let (tx, rx) = channel();
696
697 let t0 = {
698 let shared = shared.clone();
699 thread::spawn(move || {
700 let (once, called) = &*shared;
701
702 once.try_call_once(|| {
703 called.fetch_add(1, Ordering::AcqRel);
704 tx.send(()).unwrap();
705 thread::sleep(std::time::Duration::from_millis(50));
706 Err(())
707 })
708 .ok();
709 })
710 };
711
712 let t1 = {
713 let shared = shared.clone();
714 thread::spawn(move || {
715 rx.recv().unwrap();
716 let (once, called) = &*shared;
717 assert_eq!(
718 called.load(Ordering::Acquire),
719 1,
720 "leader thread did not run first"
721 );
722
723 once.call_once(|| {
724 called.fetch_add(1, Ordering::AcqRel);
725 });
726 })
727 };
728
729 t0.join().unwrap();
730 t1.join().unwrap();
731
732 assert_eq!(shared.1.load(Ordering::Acquire), 2);
733 }
734
735 // This is sort of two test cases, but if we write them as separate test methods
736 // they can be executed concurrently and then fail some small fraction of the
737 // time.
738 #[test]
739 fn drop_occurs_and_skip_uninit_drop() {
740 unsafe {
741 CALLED = false;
742 }
743
744 {
745 let once = Once::<_>::new();
746 once.call_once(|| DropTest {});
747 }
748
749 assert!(unsafe { CALLED });
750 // Now test that we skip drops for the uninitialized case.
751 unsafe {
752 CALLED = false;
753 }
754
755 let once = Once::<DropTest>::new();
756 drop(once);
757
758 assert!(unsafe { !CALLED });
759 }
760
761 #[test]
762 fn call_once_test() {
763 for _ in 0..20 {
764 use std::sync::atomic::AtomicUsize;
765 use std::sync::Arc;
766 use std::time::Duration;
767 let share = Arc::new(AtomicUsize::new(0));
768 let once = Arc::new(Once::<_, Spin>::new());
769 let mut hs = Vec::new();
770 for _ in 0..8 {
771 let h = thread::spawn({
772 let share = share.clone();
773 let once = once.clone();
774 move || {
775 thread::sleep(Duration::from_millis(10));
776 once.call_once(|| {
777 share.fetch_add(1, Ordering::SeqCst);
778 });
779 }
780 });
781 hs.push(h);
782 }
783 for h in hs {
784 h.join().unwrap();
785 }
786 assert_eq!(1, share.load(Ordering::SeqCst));
787 }
788 }
789}
790