| 1 | //! Epoch-based memory reclamation. |
| 2 | //! |
| 3 | //! An interesting problem concurrent collections deal with comes from the remove operation. |
| 4 | //! Suppose that a thread removes an element from a lock-free map, while another thread is reading |
| 5 | //! that same element at the same time. The first thread must wait until the second thread stops |
| 6 | //! reading the element. Only then it is safe to destruct it. |
| 7 | //! |
| 8 | //! Programming languages that come with garbage collectors solve this problem trivially. The |
| 9 | //! garbage collector will destruct the removed element when no thread can hold a reference to it |
| 10 | //! anymore. |
| 11 | //! |
| 12 | //! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an |
| 13 | //! element gets removed from a concurrent collection, it is inserted into a pile of garbage and |
| 14 | //! marked with the current epoch. Every time a thread accesses a collection, it checks the current |
| 15 | //! epoch, attempts to increment it, and destructs some garbage that became so old that no thread |
| 16 | //! can be referencing it anymore. |
| 17 | //! |
| 18 | //! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit |
| 19 | //! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something |
| 20 | //! users of concurrent collections don't have to worry much about. |
| 21 | //! |
| 22 | //! # Pointers |
| 23 | //! |
| 24 | //! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which |
| 25 | //! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a |
| 26 | //! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely |
| 27 | //! read. |
| 28 | //! |
| 29 | //! # Pinning |
| 30 | //! |
| 31 | //! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant |
| 32 | //! we declare that any object that gets removed from now on must not be destructed just |
| 33 | //! yet. Garbage collection of newly removed objects is suspended until the participant gets |
| 34 | //! unpinned. |
| 35 | //! |
| 36 | //! # Garbage |
| 37 | //! |
| 38 | //! Objects that get removed from concurrent collections must be stashed away until all currently |
| 39 | //! pinned participants get unpinned. Such objects can be stored into a thread-local or global |
| 40 | //! storage, where they are kept until the right time for their destruction comes. |
| 41 | //! |
| 42 | //! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an |
| 43 | //! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data |
| 44 | //! structures may defer the deallocation of an object. |
| 45 | //! |
| 46 | //! # APIs |
| 47 | //! |
| 48 | //! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you |
| 49 | //! want to create your own garbage collector, use the [`Collector`] API. |
| 50 | |
| 51 | #![doc (test( |
| 52 | no_crate_inject, |
| 53 | attr( |
| 54 | deny(warnings, rust_2018_idioms), |
| 55 | allow(dead_code, unused_assignments, unused_variables) |
| 56 | ) |
| 57 | ))] |
| 58 | #![warn ( |
| 59 | missing_docs, |
| 60 | missing_debug_implementations, |
| 61 | rust_2018_idioms, |
| 62 | unreachable_pub |
| 63 | )] |
| 64 | #![cfg_attr (not(feature = "std" ), no_std)] |
| 65 | |
| 66 | #[cfg (crossbeam_loom)] |
| 67 | extern crate loom_crate as loom; |
| 68 | |
| 69 | #[cfg (crossbeam_loom)] |
| 70 | #[allow (unused_imports, dead_code)] |
| 71 | mod primitive { |
| 72 | pub(crate) mod cell { |
| 73 | pub(crate) use loom::cell::UnsafeCell; |
| 74 | } |
| 75 | pub(crate) mod sync { |
| 76 | pub(crate) mod atomic { |
| 77 | pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; |
| 78 | |
| 79 | // FIXME: loom does not support compiler_fence at the moment. |
| 80 | // https://github.com/tokio-rs/loom/issues/117 |
| 81 | // we use fence as a stand-in for compiler_fence for the time being. |
| 82 | // this may miss some races since fence is stronger than compiler_fence, |
| 83 | // but it's the best we can do for the time being. |
| 84 | pub(crate) use self::fence as compiler_fence; |
| 85 | } |
| 86 | pub(crate) use loom::sync::Arc; |
| 87 | } |
| 88 | pub(crate) use loom::thread_local; |
| 89 | } |
| 90 | #[cfg (target_has_atomic = "ptr" )] |
| 91 | #[cfg (not(crossbeam_loom))] |
| 92 | #[allow (unused_imports, dead_code)] |
| 93 | mod primitive { |
| 94 | pub(crate) mod cell { |
| 95 | #[derive (Debug)] |
| 96 | #[repr (transparent)] |
| 97 | pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>); |
| 98 | |
| 99 | // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell. |
| 100 | // Since we want the rest of the code to be agnostic to whether it's running under loom or |
| 101 | // not, we write this small wrapper that provides the loom-supported API for the standard |
| 102 | // library UnsafeCell. This is also what the loom documentation recommends: |
| 103 | // https://github.com/tokio-rs/loom#handling-loom-api-differences |
| 104 | impl<T> UnsafeCell<T> { |
| 105 | #[inline ] |
| 106 | pub(crate) const fn new(data: T) -> UnsafeCell<T> { |
| 107 | UnsafeCell(::core::cell::UnsafeCell::new(data)) |
| 108 | } |
| 109 | |
| 110 | #[inline ] |
| 111 | pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R { |
| 112 | f(self.0.get()) |
| 113 | } |
| 114 | |
| 115 | #[inline ] |
| 116 | pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R { |
| 117 | f(self.0.get()) |
| 118 | } |
| 119 | } |
| 120 | } |
| 121 | pub(crate) mod sync { |
| 122 | pub(crate) mod atomic { |
| 123 | pub(crate) use core::sync::atomic::{ |
| 124 | compiler_fence, fence, AtomicPtr, AtomicUsize, Ordering, |
| 125 | }; |
| 126 | } |
| 127 | #[cfg (feature = "alloc" )] |
| 128 | pub(crate) use alloc::sync::Arc; |
| 129 | } |
| 130 | |
| 131 | #[cfg (feature = "std" )] |
| 132 | pub(crate) use std::thread_local; |
| 133 | } |
| 134 | |
| 135 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 136 | extern crate alloc; |
| 137 | |
| 138 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 139 | mod atomic; |
| 140 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 141 | mod collector; |
| 142 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 143 | mod deferred; |
| 144 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 145 | mod epoch; |
| 146 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 147 | mod guard; |
| 148 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 149 | mod internal; |
| 150 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 151 | mod sync; |
| 152 | |
| 153 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 154 | #[allow (deprecated)] |
| 155 | pub use crate::atomic::{CompareAndSetError, CompareAndSetOrdering}; |
| 156 | #[cfg (all(feature = "alloc" , target_has_atomic = "ptr" ))] |
| 157 | pub use crate::{ |
| 158 | atomic::{Atomic, CompareExchangeError, Owned, Pointable, Pointer, Shared}, |
| 159 | collector::{Collector, LocalHandle}, |
| 160 | guard::{unprotected, Guard}, |
| 161 | }; |
| 162 | |
| 163 | #[cfg (feature = "std" )] |
| 164 | mod default; |
| 165 | #[cfg (feature = "std" )] |
| 166 | pub use crate::default::{default_collector, is_pinned, pin}; |
| 167 | |