1 | //! Epoch-based memory reclamation. |
2 | //! |
3 | //! An interesting problem concurrent collections deal with comes from the remove operation. |
4 | //! Suppose that a thread removes an element from a lock-free map, while another thread is reading |
5 | //! that same element at the same time. The first thread must wait until the second thread stops |
6 | //! reading the element. Only then it is safe to destruct it. |
7 | //! |
8 | //! Programming languages that come with garbage collectors solve this problem trivially. The |
9 | //! garbage collector will destruct the removed element when no thread can hold a reference to it |
10 | //! anymore. |
11 | //! |
12 | //! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an |
13 | //! element gets removed from a concurrent collection, it is inserted into a pile of garbage and |
14 | //! marked with the current epoch. Every time a thread accesses a collection, it checks the current |
15 | //! epoch, attempts to increment it, and destructs some garbage that became so old that no thread |
16 | //! can be referencing it anymore. |
17 | //! |
18 | //! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit |
19 | //! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something |
20 | //! users of concurrent collections don't have to worry much about. |
21 | //! |
22 | //! # Pointers |
23 | //! |
24 | //! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which |
25 | //! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a |
26 | //! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely |
27 | //! read. |
28 | //! |
29 | //! # Pinning |
30 | //! |
31 | //! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant |
32 | //! we declare that any object that gets removed from now on must not be destructed just |
33 | //! yet. Garbage collection of newly removed objects is suspended until the participant gets |
34 | //! unpinned. |
35 | //! |
36 | //! # Garbage |
37 | //! |
38 | //! Objects that get removed from concurrent collections must be stashed away until all currently |
39 | //! pinned participants get unpinned. Such objects can be stored into a thread-local or global |
40 | //! storage, where they are kept until the right time for their destruction comes. |
41 | //! |
42 | //! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an |
43 | //! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data |
44 | //! structures may defer the deallocation of an object. |
45 | //! |
46 | //! # APIs |
47 | //! |
48 | //! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you |
49 | //! want to create your own garbage collector, use the [`Collector`] API. |
50 | |
51 | #![doc (test( |
52 | no_crate_inject, |
53 | attr( |
54 | deny(warnings, rust_2018_idioms), |
55 | allow(dead_code, unused_assignments, unused_variables) |
56 | ) |
57 | ))] |
58 | #![warn ( |
59 | missing_docs, |
60 | missing_debug_implementations, |
61 | rust_2018_idioms, |
62 | unreachable_pub |
63 | )] |
64 | #![cfg_attr (not(feature = "std" ), no_std)] |
65 | |
66 | #[cfg (crossbeam_loom)] |
67 | extern crate loom_crate as loom; |
68 | |
69 | use cfg_if::cfg_if; |
70 | |
71 | #[cfg (crossbeam_loom)] |
72 | #[allow (unused_imports, dead_code)] |
73 | mod primitive { |
74 | pub(crate) mod cell { |
75 | pub(crate) use loom::cell::UnsafeCell; |
76 | } |
77 | pub(crate) mod sync { |
78 | pub(crate) mod atomic { |
79 | use core::sync::atomic::Ordering; |
80 | pub(crate) use loom::sync::atomic::{fence, AtomicUsize}; |
81 | |
82 | // FIXME: loom does not support compiler_fence at the moment. |
83 | // https://github.com/tokio-rs/loom/issues/117 |
84 | // we use fence as a stand-in for compiler_fence for the time being. |
85 | // this may miss some races since fence is stronger than compiler_fence, |
86 | // but it's the best we can do for the time being. |
87 | pub(crate) use self::fence as compiler_fence; |
88 | } |
89 | pub(crate) use loom::sync::Arc; |
90 | } |
91 | pub(crate) use loom::thread_local; |
92 | } |
93 | #[cfg (not(crossbeam_no_atomic_cas))] |
94 | #[cfg (not(crossbeam_loom))] |
95 | #[allow (unused_imports, dead_code)] |
96 | mod primitive { |
97 | #[cfg (feature = "alloc" )] |
98 | pub(crate) mod cell { |
99 | #[derive (Debug)] |
100 | #[repr (transparent)] |
101 | pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>); |
102 | |
103 | // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell. |
104 | // Since we want the rest of the code to be agnostic to whether it's running under loom or |
105 | // not, we write this small wrapper that provides the loom-supported API for the standard |
106 | // library UnsafeCell. This is also what the loom documentation recommends: |
107 | // https://github.com/tokio-rs/loom#handling-loom-api-differences |
108 | impl<T> UnsafeCell<T> { |
109 | #[inline ] |
110 | pub(crate) const fn new(data: T) -> UnsafeCell<T> { |
111 | UnsafeCell(::core::cell::UnsafeCell::new(data)) |
112 | } |
113 | |
114 | #[inline ] |
115 | pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R { |
116 | f(self.0.get()) |
117 | } |
118 | |
119 | #[inline ] |
120 | pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R { |
121 | f(self.0.get()) |
122 | } |
123 | } |
124 | } |
125 | #[cfg (feature = "alloc" )] |
126 | pub(crate) mod sync { |
127 | pub(crate) mod atomic { |
128 | pub(crate) use core::sync::atomic::compiler_fence; |
129 | pub(crate) use core::sync::atomic::fence; |
130 | pub(crate) use core::sync::atomic::AtomicUsize; |
131 | } |
132 | pub(crate) use alloc::sync::Arc; |
133 | } |
134 | |
135 | #[cfg (feature = "std" )] |
136 | pub(crate) use std::thread_local; |
137 | } |
138 | |
139 | #[cfg (not(crossbeam_no_atomic_cas))] |
140 | cfg_if! { |
141 | if #[cfg(feature = "alloc" )] { |
142 | extern crate alloc; |
143 | |
144 | mod atomic; |
145 | mod collector; |
146 | mod deferred; |
147 | mod epoch; |
148 | mod guard; |
149 | mod internal; |
150 | mod sync; |
151 | |
152 | pub use self::atomic::{ |
153 | Pointable, Atomic, CompareExchangeError, |
154 | Owned, Pointer, Shared, |
155 | }; |
156 | pub use self::collector::{Collector, LocalHandle}; |
157 | pub use self::guard::{unprotected, Guard}; |
158 | |
159 | #[allow (deprecated)] |
160 | pub use self::atomic::{CompareAndSetError, CompareAndSetOrdering}; |
161 | } |
162 | } |
163 | |
164 | cfg_if! { |
165 | if #[cfg(feature = "std" )] { |
166 | mod default; |
167 | pub use self::default::{default_collector, is_pinned, pin}; |
168 | } |
169 | } |
170 | |