1 | //! A lock-free concurrent slab. |
2 | //! |
3 | //! Slabs provide pre-allocated storage for many instances of a single data |
4 | //! type. When a large number of values of a single type are required, |
5 | //! this can be more efficient than allocating each item individually. Since the |
6 | //! allocated items are the same size, memory fragmentation is reduced, and |
7 | //! creating and removing new items can be very cheap. |
8 | //! |
9 | //! This crate implements a lock-free concurrent slab, indexed by `usize`s. |
10 | //! |
11 | //! ## Usage |
12 | //! |
13 | //! First, add this to your `Cargo.toml`: |
14 | //! |
15 | //! ```toml |
16 | //! sharded-slab = "0.1.1" |
17 | //! ``` |
18 | //! |
19 | //! This crate provides two types, [`Slab`] and [`Pool`], which provide |
20 | //! slightly different APIs for using a sharded slab. |
21 | //! |
22 | //! [`Slab`] implements a slab for _storing_ small types, sharing them between |
23 | //! threads, and accessing them by index. New entries are allocated by |
24 | //! [inserting] data, moving it in by value. Similarly, entries may be |
25 | //! deallocated by [taking] from the slab, moving the value out. This API is |
26 | //! similar to a `Vec<Option<T>>`, but allowing lock-free concurrent insertion |
27 | //! and removal. |
28 | //! |
29 | //! In contrast, the [`Pool`] type provides an [object pool] style API for |
30 | //! _reusing storage_. Rather than constructing values and moving them into the |
31 | //! pool, as with [`Slab`], [allocating an entry][create] from the pool takes a |
32 | //! closure that's provided with a mutable reference to initialize the entry in |
33 | //! place. When entries are deallocated, they are [cleared] in place. Types |
34 | //! which own a heap allocation can be cleared by dropping any _data_ they |
35 | //! store, but retaining any previously-allocated capacity. This means that a |
36 | //! [`Pool`] may be used to reuse a set of existing heap allocations, reducing |
37 | //! allocator load. |
38 | //! |
39 | //! [inserting]: Slab::insert |
40 | //! [taking]: Slab::take |
41 | //! [create]: Pool::create |
42 | //! [cleared]: Clear |
43 | //! [object pool]: https://en.wikipedia.org/wiki/Object_pool_pattern |
44 | //! |
45 | //! # Examples |
46 | //! |
47 | //! Inserting an item into the slab, returning an index: |
48 | //! ```rust |
49 | //! # use sharded_slab::Slab; |
50 | //! let slab = Slab::new(); |
51 | //! |
52 | //! let key = slab.insert("hello world" ).unwrap(); |
53 | //! assert_eq!(slab.get(key).unwrap(), "hello world" ); |
54 | //! ``` |
55 | //! |
56 | //! To share a slab across threads, it may be wrapped in an `Arc`: |
57 | //! ```rust |
58 | //! # use sharded_slab::Slab; |
59 | //! use std::sync::Arc; |
60 | //! let slab = Arc::new(Slab::new()); |
61 | //! |
62 | //! let slab2 = slab.clone(); |
63 | //! let thread2 = std::thread::spawn(move || { |
64 | //! let key = slab2.insert("hello from thread two" ).unwrap(); |
65 | //! assert_eq!(slab2.get(key).unwrap(), "hello from thread two" ); |
66 | //! key |
67 | //! }); |
68 | //! |
69 | //! let key1 = slab.insert("hello from thread one" ).unwrap(); |
70 | //! assert_eq!(slab.get(key1).unwrap(), "hello from thread one" ); |
71 | //! |
72 | //! // Wait for thread 2 to complete. |
73 | //! let key2 = thread2.join().unwrap(); |
74 | //! |
75 | //! // The item inserted by thread 2 remains in the slab. |
76 | //! assert_eq!(slab.get(key2).unwrap(), "hello from thread two" ); |
77 | //!``` |
78 | //! |
79 | //! If items in the slab must be mutated, a `Mutex` or `RwLock` may be used for |
80 | //! each item, providing granular locking of items rather than of the slab: |
81 | //! |
82 | //! ```rust |
83 | //! # use sharded_slab::Slab; |
84 | //! use std::sync::{Arc, Mutex}; |
85 | //! let slab = Arc::new(Slab::new()); |
86 | //! |
87 | //! let key = slab.insert(Mutex::new(String::from("hello world" ))).unwrap(); |
88 | //! |
89 | //! let slab2 = slab.clone(); |
90 | //! let thread2 = std::thread::spawn(move || { |
91 | //! let hello = slab2.get(key).expect("item missing" ); |
92 | //! let mut hello = hello.lock().expect("mutex poisoned" ); |
93 | //! *hello = String::from("hello everyone!" ); |
94 | //! }); |
95 | //! |
96 | //! thread2.join().unwrap(); |
97 | //! |
98 | //! let hello = slab.get(key).expect("item missing" ); |
99 | //! let mut hello = hello.lock().expect("mutex poisoned" ); |
100 | //! assert_eq!(hello.as_str(), "hello everyone!" ); |
101 | //! ``` |
102 | //! |
103 | //! # Configuration |
104 | //! |
105 | //! For performance reasons, several values used by the slab are calculated as |
106 | //! constants. In order to allow users to tune the slab's parameters, we provide |
107 | //! a [`Config`] trait which defines these parameters as associated `consts`. |
108 | //! The `Slab` type is generic over a `C: Config` parameter. |
109 | //! |
110 | //! [`Config`]: trait.Config.html |
111 | //! |
112 | //! # Comparison with Similar Crates |
113 | //! |
114 | //! - [`slab`]: Carl Lerche's `slab` crate provides a slab implementation with a |
115 | //! similar API, implemented by storing all data in a single vector. |
116 | //! |
117 | //! Unlike `sharded_slab`, inserting and removing elements from the slab |
118 | //! requires mutable access. This means that if the slab is accessed |
119 | //! concurrently by multiple threads, it is necessary for it to be protected |
120 | //! by a `Mutex` or `RwLock`. Items may not be inserted or removed (or |
121 | //! accessed, if a `Mutex` is used) concurrently, even when they are |
122 | //! unrelated. In many cases, the lock can become a significant bottleneck. On |
123 | //! the other hand, this crate allows separate indices in the slab to be |
124 | //! accessed, inserted, and removed concurrently without requiring a global |
125 | //! lock. Therefore, when the slab is shared across multiple threads, this |
126 | //! crate offers significantly better performance than `slab`. |
127 | //! |
128 | //! However, the lock free slab introduces some additional constant-factor |
129 | //! overhead. This means that in use-cases where a slab is _not_ shared by |
130 | //! multiple threads and locking is not required, this crate will likely offer |
131 | //! slightly worse performance. |
132 | //! |
133 | //! In summary: `sharded-slab` offers significantly improved performance in |
134 | //! concurrent use-cases, while `slab` should be preferred in single-threaded |
135 | //! use-cases. |
136 | //! |
137 | //! [`slab`]: https://crates.io/crates/loom |
138 | //! |
139 | //! # Safety and Correctness |
140 | //! |
141 | //! Most implementations of lock-free data structures in Rust require some |
142 | //! amount of unsafe code, and this crate is not an exception. In order to catch |
143 | //! potential bugs in this unsafe code, we make use of [`loom`], a |
144 | //! permutation-testing tool for concurrent Rust programs. All `unsafe` blocks |
145 | //! this crate occur in accesses to `loom` `UnsafeCell`s. This means that when |
146 | //! those accesses occur in this crate's tests, `loom` will assert that they are |
147 | //! valid under the C11 memory model across multiple permutations of concurrent |
148 | //! executions of those tests. |
149 | //! |
150 | //! In order to guard against the [ABA problem][aba], this crate makes use of |
151 | //! _generational indices_. Each slot in the slab tracks a generation counter |
152 | //! which is incremented every time a value is inserted into that slot, and the |
153 | //! indices returned by [`Slab::insert`] include the generation of the slot when |
154 | //! the value was inserted, packed into the high-order bits of the index. This |
155 | //! ensures that if a value is inserted, removed, and a new value is inserted |
156 | //! into the same slot in the slab, the key returned by the first call to |
157 | //! `insert` will not map to the new value. |
158 | //! |
159 | //! Since a fixed number of bits are set aside to use for storing the generation |
160 | //! counter, the counter will wrap around after being incremented a number of |
161 | //! times. To avoid situations where a returned index lives long enough to see the |
162 | //! generation counter wrap around to the same value, it is good to be fairly |
163 | //! generous when configuring the allocation of index bits. |
164 | //! |
165 | //! [`loom`]: https://crates.io/crates/loom |
166 | //! [aba]: https://en.wikipedia.org/wiki/ABA_problem |
167 | //! [`Slab::insert`]: struct.Slab.html#method.insert |
168 | //! |
169 | //! # Performance |
170 | //! |
171 | //! These graphs were produced by [benchmarks] of the sharded slab implementation, |
172 | //! using the [`criterion`] crate. |
173 | //! |
174 | //! The first shows the results of a benchmark where an increasing number of |
175 | //! items are inserted and then removed into a slab concurrently by five |
176 | //! threads. It compares the performance of the sharded slab implementation |
177 | //! with a `RwLock<slab::Slab>`: |
178 | //! |
179 | //! <img width="1124" alt="Screen Shot 2019-10-01 at 5 09 49 PM" src="https://user-images.githubusercontent.com/2796466/66078398-cd6c9f80-e516-11e9-9923-0ed6292e8498.png"> |
180 | //! |
181 | //! The second graph shows the results of a benchmark where an increasing |
182 | //! number of items are inserted and then removed by a _single_ thread. It |
183 | //! compares the performance of the sharded slab implementation with an |
184 | //! `RwLock<slab::Slab>` and a `mut slab::Slab`. |
185 | //! |
186 | //! <img width="925" alt="Screen Shot 2019-10-01 at 5 13 45 PM" src="https://user-images.githubusercontent.com/2796466/66078469-f0974f00-e516-11e9-95b5-f65f0aa7e494.png"> |
187 | //! |
188 | //! These benchmarks demonstrate that, while the sharded approach introduces |
189 | //! a small constant-factor overhead, it offers significantly better |
190 | //! performance across concurrent accesses. |
191 | //! |
192 | //! [benchmarks]: https://github.com/hawkw/sharded-slab/blob/master/benches/bench.rs |
193 | //! [`criterion`]: https://crates.io/crates/criterion |
194 | //! |
195 | //! # Implementation Notes |
196 | //! |
197 | //! See [this page](crate::implementation) for details on this crate's design |
198 | //! and implementation. |
199 | //! |
200 | #![doc (html_root_url = "https://docs.rs/sharded-slab/0.1.4" )] |
201 | #![warn (missing_debug_implementations, missing_docs)] |
202 | #![cfg_attr (docsrs, warn(rustdoc::broken_intra_doc_links))] |
203 | #[macro_use ] |
204 | mod macros; |
205 | |
206 | pub mod implementation; |
207 | pub mod pool; |
208 | |
209 | pub(crate) mod cfg; |
210 | pub(crate) mod sync; |
211 | |
212 | mod clear; |
213 | mod iter; |
214 | mod page; |
215 | mod shard; |
216 | mod tid; |
217 | |
218 | pub use self::{ |
219 | cfg::{Config, DefaultConfig}, |
220 | clear::Clear, |
221 | iter::UniqueIter, |
222 | }; |
223 | #[doc (inline)] |
224 | pub use pool::Pool; |
225 | |
226 | pub(crate) use tid::Tid; |
227 | |
228 | use cfg::CfgPrivate; |
229 | use shard::Shard; |
230 | use std::{fmt, marker::PhantomData, ptr, sync::Arc}; |
231 | |
232 | /// A sharded slab. |
233 | /// |
234 | /// See the [crate-level documentation](crate) for details on using this type. |
235 | pub struct Slab<T, C: cfg::Config = DefaultConfig> { |
236 | shards: shard::Array<Option<T>, C>, |
237 | _cfg: PhantomData<C>, |
238 | } |
239 | |
240 | /// A handle that allows access to an occupied entry in a [`Slab`]. |
241 | /// |
242 | /// While the guard exists, it indicates to the slab that the item the guard |
243 | /// references is currently being accessed. If the item is removed from the slab |
244 | /// while a guard exists, the removal will be deferred until all guards are |
245 | /// dropped. |
246 | pub struct Entry<'a, T, C: cfg::Config = DefaultConfig> { |
247 | inner: page::slot::Guard<Option<T>, C>, |
248 | value: ptr::NonNull<T>, |
249 | shard: &'a Shard<Option<T>, C>, |
250 | key: usize, |
251 | } |
252 | |
253 | /// A handle to a vacant entry in a [`Slab`]. |
254 | /// |
255 | /// `VacantEntry` allows constructing values with the key that they will be |
256 | /// assigned to. |
257 | /// |
258 | /// # Examples |
259 | /// |
260 | /// ``` |
261 | /// # use sharded_slab::Slab; |
262 | /// let mut slab = Slab::new(); |
263 | /// |
264 | /// let hello = { |
265 | /// let entry = slab.vacant_entry().unwrap(); |
266 | /// let key = entry.key(); |
267 | /// |
268 | /// entry.insert((key, "hello" )); |
269 | /// key |
270 | /// }; |
271 | /// |
272 | /// assert_eq!(hello, slab.get(hello).unwrap().0); |
273 | /// assert_eq!("hello" , slab.get(hello).unwrap().1); |
274 | /// ``` |
275 | #[derive(Debug)] |
276 | pub struct VacantEntry<'a, T, C: cfg::Config = DefaultConfig> { |
277 | inner: page::slot::InitGuard<Option<T>, C>, |
278 | key: usize, |
279 | _lt: PhantomData<&'a ()>, |
280 | } |
281 | |
282 | /// An owned reference to an occupied entry in a [`Slab`]. |
283 | /// |
284 | /// While the guard exists, it indicates to the slab that the item the guard |
285 | /// references is currently being accessed. If the item is removed from the slab |
286 | /// while the guard exists, the removal will be deferred until all guards are |
287 | /// dropped. |
288 | /// |
289 | /// Unlike [`Entry`], which borrows the slab, an `OwnedEntry` clones the [`Arc`] |
290 | /// around the slab. Therefore, it keeps the slab from being dropped until all |
291 | /// such guards have been dropped. This means that an `OwnedEntry` may be held for |
292 | /// an arbitrary lifetime. |
293 | /// |
294 | /// # Examples |
295 | /// |
296 | /// ``` |
297 | /// # use sharded_slab::Slab; |
298 | /// use std::sync::Arc; |
299 | /// |
300 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
301 | /// let key = slab.insert("hello world" ).unwrap(); |
302 | /// |
303 | /// // Look up the created key, returning an `OwnedEntry`. |
304 | /// let value = slab.clone().get_owned(key).unwrap(); |
305 | /// |
306 | /// // Now, the original `Arc` clone of the slab may be dropped, but the |
307 | /// // returned `OwnedEntry` can still access the value. |
308 | /// assert_eq!(value, "hello world" ); |
309 | /// ``` |
310 | /// |
311 | /// Unlike [`Entry`], an `OwnedEntry` may be stored in a struct which must live |
312 | /// for the `'static` lifetime: |
313 | /// |
314 | /// ``` |
315 | /// # use sharded_slab::Slab; |
316 | /// use sharded_slab::OwnedEntry; |
317 | /// use std::sync::Arc; |
318 | /// |
319 | /// pub struct MyStruct { |
320 | /// entry: OwnedEntry<&'static str>, |
321 | /// // ... other fields ... |
322 | /// } |
323 | /// |
324 | /// // Suppose this is some arbitrary function which requires a value that |
325 | /// // lives for the 'static lifetime... |
326 | /// fn function_requiring_static<T: 'static>(t: &T) { |
327 | /// // ... do something extremely important and interesting ... |
328 | /// } |
329 | /// |
330 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
331 | /// let key = slab.insert("hello world" ).unwrap(); |
332 | /// |
333 | /// // Look up the created key, returning an `OwnedEntry`. |
334 | /// let entry = slab.clone().get_owned(key).unwrap(); |
335 | /// let my_struct = MyStruct { |
336 | /// entry, |
337 | /// // ... |
338 | /// }; |
339 | /// |
340 | /// // We can use `my_struct` anywhere where it is required to have the |
341 | /// // `'static` lifetime: |
342 | /// function_requiring_static(&my_struct); |
343 | /// ``` |
344 | /// |
345 | /// `OwnedEntry`s may be sent between threads: |
346 | /// |
347 | /// ``` |
348 | /// # use sharded_slab::Slab; |
349 | /// use std::{thread, sync::Arc}; |
350 | /// |
351 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
352 | /// let key = slab.insert("hello world" ).unwrap(); |
353 | /// |
354 | /// // Look up the created key, returning an `OwnedEntry`. |
355 | /// let value = slab.clone().get_owned(key).unwrap(); |
356 | /// |
357 | /// thread::spawn(move || { |
358 | /// assert_eq!(value, "hello world" ); |
359 | /// // ... |
360 | /// }).join().unwrap(); |
361 | /// ``` |
362 | /// |
363 | /// [`get`]: Slab::get |
364 | /// [`Arc`]: std::sync::Arc |
365 | pub struct OwnedEntry<T, C = DefaultConfig> |
366 | where |
367 | C: cfg::Config, |
368 | { |
369 | inner: page::slot::Guard<Option<T>, C>, |
370 | value: ptr::NonNull<T>, |
371 | slab: Arc<Slab<T, C>>, |
372 | key: usize, |
373 | } |
374 | |
375 | impl<T> Slab<T> { |
376 | /// Returns a new slab with the default configuration parameters. |
377 | pub fn new() -> Self { |
378 | Self::new_with_config() |
379 | } |
380 | |
381 | /// Returns a new slab with the provided configuration parameters. |
382 | pub fn new_with_config<C: cfg::Config>() -> Slab<T, C> { |
383 | C::validate(); |
384 | Slab { |
385 | shards: shard::Array::new(), |
386 | _cfg: PhantomData, |
387 | } |
388 | } |
389 | } |
390 | |
391 | impl<T, C: cfg::Config> Slab<T, C> { |
392 | /// The number of bits in each index which are used by the slab. |
393 | /// |
394 | /// If other data is packed into the `usize` indices returned by |
395 | /// [`Slab::insert`], user code is free to use any bits higher than the |
396 | /// `USED_BITS`-th bit freely. |
397 | /// |
398 | /// This is determined by the [`Config`] type that configures the slab's |
399 | /// parameters. By default, all bits are used; this can be changed by |
400 | /// overriding the [`Config::RESERVED_BITS`][res] constant. |
401 | /// |
402 | /// [res]: crate::Config#RESERVED_BITS |
403 | pub const USED_BITS: usize = C::USED_BITS; |
404 | |
405 | /// Inserts a value into the slab, returning the integer index at which that |
406 | /// value was inserted. This index can then be used to access the entry. |
407 | /// |
408 | /// If this function returns `None`, then the shard for the current thread |
409 | /// is full and no items can be added until some are removed, or the maximum |
410 | /// number of shards has been reached. |
411 | /// |
412 | /// # Examples |
413 | /// ```rust |
414 | /// # use sharded_slab::Slab; |
415 | /// let slab = Slab::new(); |
416 | /// |
417 | /// let key = slab.insert("hello world" ).unwrap(); |
418 | /// assert_eq!(slab.get(key).unwrap(), "hello world" ); |
419 | /// ``` |
420 | pub fn insert(&self, value: T) -> Option<usize> { |
421 | let (tid, shard) = self.shards.current(); |
422 | test_println!("insert {:?}" , tid); |
423 | let mut value = Some(value); |
424 | shard |
425 | .init_with(|idx, slot| { |
426 | let gen = slot.insert(&mut value)?; |
427 | Some(gen.pack(idx)) |
428 | }) |
429 | .map(|idx| tid.pack(idx)) |
430 | } |
431 | |
432 | /// Return a handle to a vacant entry allowing for further manipulation. |
433 | /// |
434 | /// This function is useful when creating values that must contain their |
435 | /// slab index. The returned [`VacantEntry`] reserves a slot in the slab and |
436 | /// is able to return the index of the entry. |
437 | /// |
438 | /// # Examples |
439 | /// |
440 | /// ``` |
441 | /// # use sharded_slab::Slab; |
442 | /// let mut slab = Slab::new(); |
443 | /// |
444 | /// let hello = { |
445 | /// let entry = slab.vacant_entry().unwrap(); |
446 | /// let key = entry.key(); |
447 | /// |
448 | /// entry.insert((key, "hello" )); |
449 | /// key |
450 | /// }; |
451 | /// |
452 | /// assert_eq!(hello, slab.get(hello).unwrap().0); |
453 | /// assert_eq!("hello" , slab.get(hello).unwrap().1); |
454 | /// ``` |
455 | pub fn vacant_entry(&self) -> Option<VacantEntry<'_, T, C>> { |
456 | let (tid, shard) = self.shards.current(); |
457 | test_println!("vacant_entry {:?}" , tid); |
458 | shard.init_with(|idx, slot| { |
459 | let inner = slot.init()?; |
460 | let key = inner.generation().pack(tid.pack(idx)); |
461 | Some(VacantEntry { |
462 | inner, |
463 | key, |
464 | _lt: PhantomData, |
465 | }) |
466 | }) |
467 | } |
468 | |
469 | /// Remove the value at the given index in the slab, returning `true` if a |
470 | /// value was removed. |
471 | /// |
472 | /// Unlike [`take`], this method does _not_ block the current thread until |
473 | /// the value can be removed. Instead, if another thread is currently |
474 | /// accessing that value, this marks it to be removed by that thread when it |
475 | /// finishes accessing the value. |
476 | /// |
477 | /// # Examples |
478 | /// |
479 | /// ```rust |
480 | /// let slab = sharded_slab::Slab::new(); |
481 | /// let key = slab.insert("hello world" ).unwrap(); |
482 | /// |
483 | /// // Remove the item from the slab. |
484 | /// assert!(slab.remove(key)); |
485 | /// |
486 | /// // Now, the slot is empty. |
487 | /// assert!(!slab.contains(key)); |
488 | /// ``` |
489 | /// |
490 | /// ```rust |
491 | /// use std::sync::Arc; |
492 | /// |
493 | /// let slab = Arc::new(sharded_slab::Slab::new()); |
494 | /// let key = slab.insert("hello world" ).unwrap(); |
495 | /// |
496 | /// let slab2 = slab.clone(); |
497 | /// let thread2 = std::thread::spawn(move || { |
498 | /// // Depending on when this thread begins executing, the item may |
499 | /// // or may not have already been removed... |
500 | /// if let Some(item) = slab2.get(key) { |
501 | /// assert_eq!(item, "hello world" ); |
502 | /// } |
503 | /// }); |
504 | /// |
505 | /// // The item will be removed by thread2 when it finishes accessing it. |
506 | /// assert!(slab.remove(key)); |
507 | /// |
508 | /// thread2.join().unwrap(); |
509 | /// assert!(!slab.contains(key)); |
510 | /// ``` |
511 | /// [`take`]: Slab::take |
512 | pub fn remove(&self, idx: usize) -> bool { |
513 | // The `Drop` impl for `Entry` calls `remove_local` or `remove_remote` based |
514 | // on where the guard was dropped from. If the dropped guard was the last one, this will |
515 | // call `Slot::remove_value` which actually clears storage. |
516 | let tid = C::unpack_tid(idx); |
517 | |
518 | test_println!("rm_deferred {:?}" , tid); |
519 | let shard = self.shards.get(tid.as_usize()); |
520 | if tid.is_current() { |
521 | shard.map(|shard| shard.remove_local(idx)).unwrap_or(false) |
522 | } else { |
523 | shard.map(|shard| shard.remove_remote(idx)).unwrap_or(false) |
524 | } |
525 | } |
526 | |
527 | /// Removes the value associated with the given key from the slab, returning |
528 | /// it. |
529 | /// |
530 | /// If the slab does not contain a value for that key, `None` is returned |
531 | /// instead. |
532 | /// |
533 | /// If the value associated with the given key is currently being |
534 | /// accessed by another thread, this method will block the current thread |
535 | /// until the item is no longer accessed. If this is not desired, use |
536 | /// [`remove`] instead. |
537 | /// |
538 | /// **Note**: This method blocks the calling thread by spinning until the |
539 | /// currently outstanding references are released. Spinning for long periods |
540 | /// of time can result in high CPU time and power consumption. Therefore, |
541 | /// `take` should only be called when other references to the slot are |
542 | /// expected to be dropped soon (e.g., when all accesses are relatively |
543 | /// short). |
544 | /// |
545 | /// # Examples |
546 | /// |
547 | /// ```rust |
548 | /// let slab = sharded_slab::Slab::new(); |
549 | /// let key = slab.insert("hello world" ).unwrap(); |
550 | /// |
551 | /// // Remove the item from the slab, returning it. |
552 | /// assert_eq!(slab.take(key), Some("hello world" )); |
553 | /// |
554 | /// // Now, the slot is empty. |
555 | /// assert!(!slab.contains(key)); |
556 | /// ``` |
557 | /// |
558 | /// ```rust |
559 | /// use std::sync::Arc; |
560 | /// |
561 | /// let slab = Arc::new(sharded_slab::Slab::new()); |
562 | /// let key = slab.insert("hello world" ).unwrap(); |
563 | /// |
564 | /// let slab2 = slab.clone(); |
565 | /// let thread2 = std::thread::spawn(move || { |
566 | /// // Depending on when this thread begins executing, the item may |
567 | /// // or may not have already been removed... |
568 | /// if let Some(item) = slab2.get(key) { |
569 | /// assert_eq!(item, "hello world" ); |
570 | /// } |
571 | /// }); |
572 | /// |
573 | /// // The item will only be removed when the other thread finishes |
574 | /// // accessing it. |
575 | /// assert_eq!(slab.take(key), Some("hello world" )); |
576 | /// |
577 | /// thread2.join().unwrap(); |
578 | /// assert!(!slab.contains(key)); |
579 | /// ``` |
580 | /// [`remove`]: Slab::remove |
581 | pub fn take(&self, idx: usize) -> Option<T> { |
582 | let tid = C::unpack_tid(idx); |
583 | |
584 | test_println!("rm {:?}" , tid); |
585 | let shard = self.shards.get(tid.as_usize())?; |
586 | if tid.is_current() { |
587 | shard.take_local(idx) |
588 | } else { |
589 | shard.take_remote(idx) |
590 | } |
591 | } |
592 | |
593 | /// Return a reference to the value associated with the given key. |
594 | /// |
595 | /// If the slab does not contain a value for the given key, or if the |
596 | /// maximum number of concurrent references to the slot has been reached, |
597 | /// `None` is returned instead. |
598 | /// |
599 | /// # Examples |
600 | /// |
601 | /// ```rust |
602 | /// let slab = sharded_slab::Slab::new(); |
603 | /// let key = slab.insert("hello world" ).unwrap(); |
604 | /// |
605 | /// assert_eq!(slab.get(key).unwrap(), "hello world" ); |
606 | /// assert!(slab.get(12345).is_none()); |
607 | /// ``` |
608 | pub fn get(&self, key: usize) -> Option<Entry<'_, T, C>> { |
609 | let tid = C::unpack_tid(key); |
610 | |
611 | test_println!("get {:?}; current={:?}" , tid, Tid::<C>::current()); |
612 | let shard = self.shards.get(tid.as_usize())?; |
613 | shard.with_slot(key, |slot| { |
614 | let inner = slot.get(C::unpack_gen(key))?; |
615 | let value = ptr::NonNull::from(slot.value().as_ref().unwrap()); |
616 | Some(Entry { |
617 | inner, |
618 | value, |
619 | shard, |
620 | key, |
621 | }) |
622 | }) |
623 | } |
624 | |
625 | /// Return an owned reference to the value at the given index. |
626 | /// |
627 | /// If the slab does not contain a value for the given key, `None` is |
628 | /// returned instead. |
629 | /// |
630 | /// Unlike [`get`], which borrows the slab, this method _clones_ the [`Arc`] |
631 | /// around the slab. This means that the returned [`OwnedEntry`] can be held |
632 | /// for an arbitrary lifetime. However, this method requires that the slab |
633 | /// itself be wrapped in an `Arc`. |
634 | /// |
635 | /// # Examples |
636 | /// |
637 | /// ``` |
638 | /// # use sharded_slab::Slab; |
639 | /// use std::sync::Arc; |
640 | /// |
641 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
642 | /// let key = slab.insert("hello world" ).unwrap(); |
643 | /// |
644 | /// // Look up the created key, returning an `OwnedEntry`. |
645 | /// let value = slab.clone().get_owned(key).unwrap(); |
646 | /// |
647 | /// // Now, the original `Arc` clone of the slab may be dropped, but the |
648 | /// // returned `OwnedEntry` can still access the value. |
649 | /// assert_eq!(value, "hello world" ); |
650 | /// ``` |
651 | /// |
652 | /// Unlike [`Entry`], an `OwnedEntry` may be stored in a struct which must live |
653 | /// for the `'static` lifetime: |
654 | /// |
655 | /// ``` |
656 | /// # use sharded_slab::Slab; |
657 | /// use sharded_slab::OwnedEntry; |
658 | /// use std::sync::Arc; |
659 | /// |
660 | /// pub struct MyStruct { |
661 | /// entry: OwnedEntry<&'static str>, |
662 | /// // ... other fields ... |
663 | /// } |
664 | /// |
665 | /// // Suppose this is some arbitrary function which requires a value that |
666 | /// // lives for the 'static lifetime... |
667 | /// fn function_requiring_static<T: 'static>(t: &T) { |
668 | /// // ... do something extremely important and interesting ... |
669 | /// } |
670 | /// |
671 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
672 | /// let key = slab.insert("hello world" ).unwrap(); |
673 | /// |
674 | /// // Look up the created key, returning an `OwnedEntry`. |
675 | /// let entry = slab.clone().get_owned(key).unwrap(); |
676 | /// let my_struct = MyStruct { |
677 | /// entry, |
678 | /// // ... |
679 | /// }; |
680 | /// |
681 | /// // We can use `my_struct` anywhere where it is required to have the |
682 | /// // `'static` lifetime: |
683 | /// function_requiring_static(&my_struct); |
684 | /// ``` |
685 | /// |
686 | /// [`OwnedEntry`]s may be sent between threads: |
687 | /// |
688 | /// ``` |
689 | /// # use sharded_slab::Slab; |
690 | /// use std::{thread, sync::Arc}; |
691 | /// |
692 | /// let slab: Arc<Slab<&'static str>> = Arc::new(Slab::new()); |
693 | /// let key = slab.insert("hello world" ).unwrap(); |
694 | /// |
695 | /// // Look up the created key, returning an `OwnedEntry`. |
696 | /// let value = slab.clone().get_owned(key).unwrap(); |
697 | /// |
698 | /// thread::spawn(move || { |
699 | /// assert_eq!(value, "hello world" ); |
700 | /// // ... |
701 | /// }).join().unwrap(); |
702 | /// ``` |
703 | /// |
704 | /// [`get`]: Slab::get |
705 | /// [`Arc`]: std::sync::Arc |
706 | pub fn get_owned(self: Arc<Self>, key: usize) -> Option<OwnedEntry<T, C>> { |
707 | let tid = C::unpack_tid(key); |
708 | |
709 | test_println!("get_owned {:?}; current={:?}" , tid, Tid::<C>::current()); |
710 | let shard = self.shards.get(tid.as_usize())?; |
711 | shard.with_slot(key, |slot| { |
712 | let inner = slot.get(C::unpack_gen(key))?; |
713 | let value = ptr::NonNull::from(slot.value().as_ref().unwrap()); |
714 | Some(OwnedEntry { |
715 | inner, |
716 | value, |
717 | slab: self.clone(), |
718 | key, |
719 | }) |
720 | }) |
721 | } |
722 | |
723 | /// Returns `true` if the slab contains a value for the given key. |
724 | /// |
725 | /// # Examples |
726 | /// |
727 | /// ``` |
728 | /// let slab = sharded_slab::Slab::new(); |
729 | /// |
730 | /// let key = slab.insert("hello world" ).unwrap(); |
731 | /// assert!(slab.contains(key)); |
732 | /// |
733 | /// slab.take(key).unwrap(); |
734 | /// assert!(!slab.contains(key)); |
735 | /// ``` |
736 | pub fn contains(&self, key: usize) -> bool { |
737 | self.get(key).is_some() |
738 | } |
739 | |
740 | /// Returns an iterator over all the items in the slab. |
741 | /// |
742 | /// Because this iterator exclusively borrows the slab (i.e. it holds an |
743 | /// `&mut Slab<T>`), elements will not be added or removed while the |
744 | /// iteration is in progress. |
745 | pub fn unique_iter(&mut self) -> iter::UniqueIter<'_, T, C> { |
746 | let mut shards = self.shards.iter_mut(); |
747 | |
748 | let (pages, slots) = match shards.next() { |
749 | Some(shard) => { |
750 | let mut pages = shard.iter(); |
751 | let slots = pages.next().and_then(page::Shared::iter); |
752 | (pages, slots) |
753 | } |
754 | None => ([].iter(), None), |
755 | }; |
756 | |
757 | iter::UniqueIter { |
758 | shards, |
759 | pages, |
760 | slots, |
761 | } |
762 | } |
763 | } |
764 | |
765 | impl<T> Default for Slab<T> { |
766 | fn default() -> Self { |
767 | Self::new() |
768 | } |
769 | } |
770 | |
771 | impl<T: fmt::Debug, C: cfg::Config> fmt::Debug for Slab<T, C> { |
772 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
773 | f.debug_struct("Slab" ) |
774 | .field("shards" , &self.shards) |
775 | .field("config" , &C::debug()) |
776 | .finish() |
777 | } |
778 | } |
779 | |
780 | unsafe impl<T: Send, C: cfg::Config> Send for Slab<T, C> {} |
781 | unsafe impl<T: Sync, C: cfg::Config> Sync for Slab<T, C> {} |
782 | |
783 | // === impl Entry === |
784 | |
785 | impl<'a, T, C: cfg::Config> Entry<'a, T, C> { |
786 | /// Returns the key used to access the guard. |
787 | pub fn key(&self) -> usize { |
788 | self.key |
789 | } |
790 | |
791 | #[inline (always)] |
792 | fn value(&self) -> &T { |
793 | unsafe { |
794 | // Safety: this is always going to be valid, as it's projected from |
795 | // the safe reference to `self.value` --- this is just to avoid |
796 | // having to `expect` an option in the hot path when dereferencing. |
797 | self.value.as_ref() |
798 | } |
799 | } |
800 | } |
801 | |
802 | impl<'a, T, C: cfg::Config> std::ops::Deref for Entry<'a, T, C> { |
803 | type Target = T; |
804 | |
805 | fn deref(&self) -> &Self::Target { |
806 | self.value() |
807 | } |
808 | } |
809 | |
810 | impl<'a, T, C: cfg::Config> Drop for Entry<'a, T, C> { |
811 | fn drop(&mut self) { |
812 | let should_remove = unsafe { |
813 | // Safety: calling `slot::Guard::release` is unsafe, since the |
814 | // `Guard` value contains a pointer to the slot that may outlive the |
815 | // slab containing that slot. Here, the `Entry` guard owns a |
816 | // borrowed reference to the shard containing that slot, which |
817 | // ensures that the slot will not be dropped while this `Guard` |
818 | // exists. |
819 | self.inner.release() |
820 | }; |
821 | if should_remove { |
822 | self.shard.clear_after_release(self.key) |
823 | } |
824 | } |
825 | } |
826 | |
827 | impl<'a, T, C> fmt::Debug for Entry<'a, T, C> |
828 | where |
829 | T: fmt::Debug, |
830 | C: cfg::Config, |
831 | { |
832 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
833 | fmt::Debug::fmt(self.value(), f) |
834 | } |
835 | } |
836 | |
837 | impl<'a, T, C> PartialEq<T> for Entry<'a, T, C> |
838 | where |
839 | T: PartialEq<T>, |
840 | C: cfg::Config, |
841 | { |
842 | fn eq(&self, other: &T) -> bool { |
843 | self.value().eq(other) |
844 | } |
845 | } |
846 | |
847 | // === impl VacantEntry === |
848 | |
849 | impl<'a, T, C: cfg::Config> VacantEntry<'a, T, C> { |
850 | /// Insert a value in the entry. |
851 | /// |
852 | /// To get the integer index at which this value will be inserted, use |
853 | /// [`key`] prior to calling `insert`. |
854 | /// |
855 | /// # Examples |
856 | /// |
857 | /// ``` |
858 | /// # use sharded_slab::Slab; |
859 | /// let mut slab = Slab::new(); |
860 | /// |
861 | /// let hello = { |
862 | /// let entry = slab.vacant_entry().unwrap(); |
863 | /// let key = entry.key(); |
864 | /// |
865 | /// entry.insert((key, "hello" )); |
866 | /// key |
867 | /// }; |
868 | /// |
869 | /// assert_eq!(hello, slab.get(hello).unwrap().0); |
870 | /// assert_eq!("hello" , slab.get(hello).unwrap().1); |
871 | /// ``` |
872 | /// |
873 | /// [`key`]: VacantEntry::key |
874 | pub fn insert(mut self, val: T) { |
875 | let value = unsafe { |
876 | // Safety: this `VacantEntry` only lives as long as the `Slab` it was |
877 | // borrowed from, so it cannot outlive the entry's slot. |
878 | self.inner.value_mut() |
879 | }; |
880 | debug_assert!( |
881 | value.is_none(), |
882 | "tried to insert to a slot that already had a value!" |
883 | ); |
884 | *value = Some(val); |
885 | let _released = unsafe { |
886 | // Safety: again, this `VacantEntry` only lives as long as the |
887 | // `Slab` it was borrowed from, so it cannot outlive the entry's |
888 | // slot. |
889 | self.inner.release() |
890 | }; |
891 | debug_assert!( |
892 | !_released, |
893 | "removing a value before it was inserted should be a no-op" |
894 | ) |
895 | } |
896 | |
897 | /// Return the integer index at which this entry will be inserted. |
898 | /// |
899 | /// A value stored in this entry will be associated with this key. |
900 | /// |
901 | /// # Examples |
902 | /// |
903 | /// ``` |
904 | /// # use sharded_slab::*; |
905 | /// let mut slab = Slab::new(); |
906 | /// |
907 | /// let hello = { |
908 | /// let entry = slab.vacant_entry().unwrap(); |
909 | /// let key = entry.key(); |
910 | /// |
911 | /// entry.insert((key, "hello" )); |
912 | /// key |
913 | /// }; |
914 | /// |
915 | /// assert_eq!(hello, slab.get(hello).unwrap().0); |
916 | /// assert_eq!("hello" , slab.get(hello).unwrap().1); |
917 | /// ``` |
918 | pub fn key(&self) -> usize { |
919 | self.key |
920 | } |
921 | } |
922 | // === impl OwnedEntry === |
923 | |
924 | impl<T, C> OwnedEntry<T, C> |
925 | where |
926 | C: cfg::Config, |
927 | { |
928 | /// Returns the key used to access this guard |
929 | pub fn key(&self) -> usize { |
930 | self.key |
931 | } |
932 | |
933 | #[inline (always)] |
934 | fn value(&self) -> &T { |
935 | unsafe { |
936 | // Safety: this is always going to be valid, as it's projected from |
937 | // the safe reference to `self.value` --- this is just to avoid |
938 | // having to `expect` an option in the hot path when dereferencing. |
939 | self.value.as_ref() |
940 | } |
941 | } |
942 | } |
943 | |
944 | impl<T, C> std::ops::Deref for OwnedEntry<T, C> |
945 | where |
946 | C: cfg::Config, |
947 | { |
948 | type Target = T; |
949 | |
950 | fn deref(&self) -> &Self::Target { |
951 | self.value() |
952 | } |
953 | } |
954 | |
955 | impl<T, C> Drop for OwnedEntry<T, C> |
956 | where |
957 | C: cfg::Config, |
958 | { |
959 | fn drop(&mut self) { |
960 | test_println!("drop OwnedEntry: try clearing data" ); |
961 | let should_clear = unsafe { |
962 | // Safety: calling `slot::Guard::release` is unsafe, since the |
963 | // `Guard` value contains a pointer to the slot that may outlive the |
964 | // slab containing that slot. Here, the `OwnedEntry` owns an `Arc` |
965 | // clone of the pool, which keeps it alive as long as the `OwnedEntry` |
966 | // exists. |
967 | self.inner.release() |
968 | }; |
969 | if should_clear { |
970 | let shard_idx = Tid::<C>::from_packed(self.key); |
971 | test_println!("-> shard={:?}" , shard_idx); |
972 | if let Some(shard) = self.slab.shards.get(shard_idx.as_usize()) { |
973 | shard.clear_after_release(self.key) |
974 | } else { |
975 | test_println!("-> shard={:?} does not exist! THIS IS A BUG" , shard_idx); |
976 | debug_assert!(std::thread::panicking(), "[internal error] tried to drop an `OwnedEntry` to a slot on a shard that never existed!" ); |
977 | } |
978 | } |
979 | } |
980 | } |
981 | |
982 | impl<T, C> fmt::Debug for OwnedEntry<T, C> |
983 | where |
984 | T: fmt::Debug, |
985 | C: cfg::Config, |
986 | { |
987 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
988 | fmt::Debug::fmt(self.value(), f) |
989 | } |
990 | } |
991 | |
992 | impl<T, C> PartialEq<T> for OwnedEntry<T, C> |
993 | where |
994 | T: PartialEq<T>, |
995 | C: cfg::Config, |
996 | { |
997 | fn eq(&self, other: &T) -> bool { |
998 | *self.value() == *other |
999 | } |
1000 | } |
1001 | |
1002 | unsafe impl<T, C> Sync for OwnedEntry<T, C> |
1003 | where |
1004 | T: Sync, |
1005 | C: cfg::Config, |
1006 | { |
1007 | } |
1008 | |
1009 | unsafe impl<T, C> Send for OwnedEntry<T, C> |
1010 | where |
1011 | T: Sync, |
1012 | C: cfg::Config, |
1013 | { |
1014 | } |
1015 | |
1016 | // === pack === |
1017 | |
1018 | pub(crate) trait Pack<C: cfg::Config>: Sized { |
1019 | // ====== provided by each implementation ================================= |
1020 | |
1021 | /// The number of bits occupied by this type when packed into a usize. |
1022 | /// |
1023 | /// This must be provided to determine the number of bits into which to pack |
1024 | /// the type. |
1025 | const LEN: usize; |
1026 | /// The type packed on the less significant side of this type. |
1027 | /// |
1028 | /// If this type is packed into the least significant bit of a usize, this |
1029 | /// should be `()`, which occupies no bytes. |
1030 | /// |
1031 | /// This is used to calculate the shift amount for packing this value. |
1032 | type Prev: Pack<C>; |
1033 | |
1034 | // ====== calculated automatically ======================================== |
1035 | |
1036 | /// A number consisting of `Self::LEN` 1 bits, starting at the least |
1037 | /// significant bit. |
1038 | /// |
1039 | /// This is the higest value this type can represent. This number is shifted |
1040 | /// left by `Self::SHIFT` bits to calculate this type's `MASK`. |
1041 | /// |
1042 | /// This is computed automatically based on `Self::LEN`. |
1043 | const BITS: usize = { |
1044 | let shift = 1 << (Self::LEN - 1); |
1045 | shift | (shift - 1) |
1046 | }; |
1047 | /// The number of bits to shift a number to pack it into a usize with other |
1048 | /// values. |
1049 | /// |
1050 | /// This is caculated automatically based on the `LEN` and `SHIFT` constants |
1051 | /// of the previous value. |
1052 | const SHIFT: usize = Self::Prev::SHIFT + Self::Prev::LEN; |
1053 | |
1054 | /// The mask to extract only this type from a packed `usize`. |
1055 | /// |
1056 | /// This is calculated by shifting `Self::BITS` left by `Self::SHIFT`. |
1057 | const MASK: usize = Self::BITS << Self::SHIFT; |
1058 | |
1059 | fn as_usize(&self) -> usize; |
1060 | fn from_usize(val: usize) -> Self; |
1061 | |
1062 | #[inline (always)] |
1063 | fn pack(&self, to: usize) -> usize { |
1064 | let value = self.as_usize(); |
1065 | debug_assert!(value <= Self::BITS); |
1066 | |
1067 | (to & !Self::MASK) | (value << Self::SHIFT) |
1068 | } |
1069 | |
1070 | #[inline (always)] |
1071 | fn from_packed(from: usize) -> Self { |
1072 | let value = (from & Self::MASK) >> Self::SHIFT; |
1073 | debug_assert!(value <= Self::BITS); |
1074 | Self::from_usize(value) |
1075 | } |
1076 | } |
1077 | |
1078 | impl<C: cfg::Config> Pack<C> for () { |
1079 | const BITS: usize = 0; |
1080 | const LEN: usize = 0; |
1081 | const SHIFT: usize = 0; |
1082 | const MASK: usize = 0; |
1083 | |
1084 | type Prev = (); |
1085 | |
1086 | fn as_usize(&self) -> usize { |
1087 | unreachable!() |
1088 | } |
1089 | fn from_usize(_val: usize) -> Self { |
1090 | unreachable!() |
1091 | } |
1092 | |
1093 | fn pack(&self, _to: usize) -> usize { |
1094 | unreachable!() |
1095 | } |
1096 | |
1097 | fn from_packed(_from: usize) -> Self { |
1098 | unreachable!() |
1099 | } |
1100 | } |
1101 | |
1102 | #[cfg (test)] |
1103 | pub(crate) use self::tests::util as test_util; |
1104 | |
1105 | #[cfg (test)] |
1106 | mod tests; |
1107 | |