1 | use super::FreeList; |
2 | use crate::sync::{ |
3 | atomic::{AtomicUsize, Ordering}, |
4 | hint, UnsafeCell, |
5 | }; |
6 | use crate::{cfg, clear::Clear, Pack, Tid}; |
7 | use std::{fmt, marker::PhantomData, mem, ptr, thread}; |
8 | |
9 | pub(crate) struct Slot<T, C> { |
10 | lifecycle: AtomicUsize, |
11 | /// The offset of the next item on the free list. |
12 | next: UnsafeCell<usize>, |
13 | /// The data stored in the slot. |
14 | item: UnsafeCell<T>, |
15 | _cfg: PhantomData<fn(C)>, |
16 | } |
17 | |
18 | #[derive(Debug)] |
19 | pub(crate) struct Guard<T, C: cfg::Config = cfg::DefaultConfig> { |
20 | slot: ptr::NonNull<Slot<T, C>>, |
21 | } |
22 | |
23 | #[derive(Debug)] |
24 | pub(crate) struct InitGuard<T, C: cfg::Config = cfg::DefaultConfig> { |
25 | slot: ptr::NonNull<Slot<T, C>>, |
26 | curr_lifecycle: usize, |
27 | released: bool, |
28 | } |
29 | |
30 | #[repr (transparent)] |
31 | pub(crate) struct Generation<C = cfg::DefaultConfig> { |
32 | value: usize, |
33 | _cfg: PhantomData<fn(C)>, |
34 | } |
35 | |
36 | #[repr (transparent)] |
37 | pub(crate) struct RefCount<C = cfg::DefaultConfig> { |
38 | value: usize, |
39 | _cfg: PhantomData<fn(C)>, |
40 | } |
41 | |
42 | pub(crate) struct Lifecycle<C> { |
43 | state: State, |
44 | _cfg: PhantomData<fn(C)>, |
45 | } |
46 | struct LifecycleGen<C>(Generation<C>); |
47 | |
48 | #[derive(Debug, Eq, PartialEq, Copy, Clone)] |
49 | #[repr (usize)] |
50 | enum State { |
51 | Present = 0b00, |
52 | Marked = 0b01, |
53 | Removing = 0b11, |
54 | } |
55 | |
56 | impl<C: cfg::Config> Pack<C> for Generation<C> { |
57 | /// Use all the remaining bits in the word for the generation counter, minus |
58 | /// any bits reserved by the user. |
59 | const LEN: usize = (cfg::WIDTH - C::RESERVED_BITS) - Self::SHIFT; |
60 | |
61 | type Prev = Tid<C>; |
62 | |
63 | #[inline (always)] |
64 | fn from_usize(u: usize) -> Self { |
65 | debug_assert!(u <= Self::BITS); |
66 | Self::new(u) |
67 | } |
68 | |
69 | #[inline (always)] |
70 | fn as_usize(&self) -> usize { |
71 | self.value |
72 | } |
73 | } |
74 | |
75 | impl<C: cfg::Config> Generation<C> { |
76 | fn new(value: usize) -> Self { |
77 | Self { |
78 | value, |
79 | _cfg: PhantomData, |
80 | } |
81 | } |
82 | } |
83 | |
84 | // Slot methods which should work across all trait bounds |
85 | impl<T, C> Slot<T, C> |
86 | where |
87 | C: cfg::Config, |
88 | { |
89 | #[inline (always)] |
90 | pub(super) fn next(&self) -> usize { |
91 | self.next.with(|next| unsafe { *next }) |
92 | } |
93 | |
94 | #[inline (always)] |
95 | pub(crate) fn value(&self) -> &T { |
96 | self.item.with(|item| unsafe { &*item }) |
97 | } |
98 | |
99 | #[inline (always)] |
100 | pub(super) fn set_next(&self, next: usize) { |
101 | self.next.with_mut(|n| unsafe { |
102 | (*n) = next; |
103 | }) |
104 | } |
105 | |
106 | #[inline (always)] |
107 | pub(crate) fn get(&self, gen: Generation<C>) -> Option<Guard<T, C>> { |
108 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); |
109 | loop { |
110 | // Unpack the current state. |
111 | let state = Lifecycle::<C>::from_packed(lifecycle); |
112 | let current_gen = LifecycleGen::<C>::from_packed(lifecycle).0; |
113 | let refs = RefCount::<C>::from_packed(lifecycle); |
114 | |
115 | test_println!( |
116 | "-> get {:?}; current_gen={:?}; lifecycle={:#x}; state={:?}; refs={:?};" , |
117 | gen, |
118 | current_gen, |
119 | lifecycle, |
120 | state, |
121 | refs, |
122 | ); |
123 | |
124 | // Is it okay to access this slot? The accessed generation must be |
125 | // current, and the slot must not be in the process of being |
126 | // removed. If we can no longer access the slot at the given |
127 | // generation, return `None`. |
128 | if gen != current_gen || state != Lifecycle::PRESENT { |
129 | test_println!("-> get: no longer exists!" ); |
130 | return None; |
131 | } |
132 | |
133 | // Try to increment the slot's ref count by one. |
134 | let new_refs = refs.incr()?; |
135 | match self.lifecycle.compare_exchange( |
136 | lifecycle, |
137 | new_refs.pack(lifecycle), |
138 | Ordering::AcqRel, |
139 | Ordering::Acquire, |
140 | ) { |
141 | Ok(_) => { |
142 | test_println!("-> {:?}" , new_refs); |
143 | return Some(Guard { |
144 | slot: ptr::NonNull::from(self), |
145 | }); |
146 | } |
147 | Err(actual) => { |
148 | // Another thread modified the slot's state before us! We |
149 | // need to retry with the new state. |
150 | // |
151 | // Since the new state may mean that the accessed generation |
152 | // is no longer valid, we'll check again on the next |
153 | // iteration of the loop. |
154 | test_println!("-> get: retrying; lifecycle={:#x};" , actual); |
155 | lifecycle = actual; |
156 | } |
157 | }; |
158 | } |
159 | } |
160 | |
161 | /// Marks this slot to be released, returning `true` if the slot can be |
162 | /// mutated *now* and `false` otherwise. |
163 | /// |
164 | /// This method checks if there are any references to this slot. If there _are_ valid |
165 | /// references, it just marks them for modification and returns and the next thread calling |
166 | /// either `clear_storage` or `remove_value` will try and modify the storage |
167 | fn mark_release(&self, gen: Generation<C>) -> Option<bool> { |
168 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); |
169 | let mut curr_gen; |
170 | |
171 | // Try to advance the slot's state to "MARKED", which indicates that it |
172 | // should be removed when it is no longer concurrently accessed. |
173 | loop { |
174 | curr_gen = LifecycleGen::from_packed(lifecycle).0; |
175 | test_println!( |
176 | "-> mark_release; gen={:?}; current_gen={:?};" , |
177 | gen, |
178 | curr_gen |
179 | ); |
180 | |
181 | // Is the slot still at the generation we are trying to remove? |
182 | if gen != curr_gen { |
183 | return None; |
184 | } |
185 | |
186 | let state = Lifecycle::<C>::from_packed(lifecycle).state; |
187 | test_println!("-> mark_release; state={:?};" , state); |
188 | match state { |
189 | State::Removing => { |
190 | test_println!("--> mark_release; cannot release (already removed!)" ); |
191 | return None; |
192 | } |
193 | State::Marked => { |
194 | test_println!("--> mark_release; already marked;" ); |
195 | break; |
196 | } |
197 | State::Present => {} |
198 | }; |
199 | |
200 | // Set the new state to `MARKED`. |
201 | let new_lifecycle = Lifecycle::<C>::MARKED.pack(lifecycle); |
202 | test_println!( |
203 | "-> mark_release; old_lifecycle={:#x}; new_lifecycle={:#x};" , |
204 | lifecycle, |
205 | new_lifecycle |
206 | ); |
207 | |
208 | match self.lifecycle.compare_exchange( |
209 | lifecycle, |
210 | new_lifecycle, |
211 | Ordering::AcqRel, |
212 | Ordering::Acquire, |
213 | ) { |
214 | Ok(_) => break, |
215 | Err(actual) => { |
216 | test_println!("-> mark_release; retrying" ); |
217 | lifecycle = actual; |
218 | } |
219 | } |
220 | } |
221 | |
222 | // Unpack the current reference count to see if we can remove the slot now. |
223 | let refs = RefCount::<C>::from_packed(lifecycle); |
224 | test_println!("-> mark_release: marked; refs={:?};" , refs); |
225 | |
226 | // Are there currently outstanding references to the slot? If so, it |
227 | // will have to be removed when those references are dropped. |
228 | Some(refs.value == 0) |
229 | } |
230 | |
231 | /// Mutates this slot. |
232 | /// |
233 | /// This method spins until no references to this slot are left, and calls the mutator |
234 | fn release_with<F, M, R>(&self, gen: Generation<C>, offset: usize, free: &F, mutator: M) -> R |
235 | where |
236 | F: FreeList<C>, |
237 | M: FnOnce(Option<&mut T>) -> R, |
238 | { |
239 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); |
240 | let mut advanced = false; |
241 | // Exponential spin backoff while waiting for the slot to be released. |
242 | let mut spin_exp = 0; |
243 | let next_gen = gen.advance(); |
244 | loop { |
245 | let current_gen = LifecycleGen::from_packed(lifecycle).0; |
246 | test_println!("-> release_with; lifecycle={:#x}; expected_gen={:?}; current_gen={:?}; next_gen={:?};" , |
247 | lifecycle, |
248 | gen, |
249 | current_gen, |
250 | next_gen |
251 | ); |
252 | |
253 | // First, make sure we are actually able to remove the value. |
254 | // If we're going to remove the value, the generation has to match |
255 | // the value that `remove_value` was called with...unless we've |
256 | // already stored the new generation. |
257 | if (!advanced) && gen != current_gen { |
258 | test_println!("-> already removed!" ); |
259 | return mutator(None); |
260 | } |
261 | |
262 | match self.lifecycle.compare_exchange( |
263 | lifecycle, |
264 | LifecycleGen(next_gen).pack(lifecycle), |
265 | Ordering::AcqRel, |
266 | Ordering::Acquire, |
267 | ) { |
268 | Ok(actual) => { |
269 | // If we're in this state, we have successfully advanced to |
270 | // the next generation. |
271 | advanced = true; |
272 | |
273 | // Make sure that there are no outstanding references. |
274 | let refs = RefCount::<C>::from_packed(actual); |
275 | test_println!("-> advanced gen; lifecycle={:#x}; refs={:?};" , actual, refs); |
276 | if refs.value == 0 { |
277 | test_println!("-> ok to remove!" ); |
278 | // safety: we've modified the generation of this slot and any other thread |
279 | // calling this method will exit out at the generation check above in the |
280 | // next iteraton of the loop. |
281 | let value = self |
282 | .item |
283 | .with_mut(|item| mutator(Some(unsafe { &mut *item }))); |
284 | free.push(offset, self); |
285 | return value; |
286 | } |
287 | |
288 | // Otherwise, a reference must be dropped before we can |
289 | // remove the value. Spin here until there are no refs remaining... |
290 | test_println!("-> refs={:?}; spin..." , refs); |
291 | |
292 | // Back off, spinning and possibly yielding. |
293 | exponential_backoff(&mut spin_exp); |
294 | } |
295 | Err(actual) => { |
296 | test_println!("-> retrying; lifecycle={:#x};" , actual); |
297 | lifecycle = actual; |
298 | // The state changed; reset the spin backoff. |
299 | spin_exp = 0; |
300 | } |
301 | } |
302 | } |
303 | } |
304 | |
305 | /// Initialize a slot |
306 | /// |
307 | /// This method initializes and sets up the state for a slot. When being used in `Pool`, we |
308 | /// only need to ensure that the `Slot` is in the right `state, while when being used in a |
309 | /// `Slab` we want to insert a value into it, as the memory is not initialized |
310 | pub(crate) fn init(&self) -> Option<InitGuard<T, C>> { |
311 | // Load the current lifecycle state. |
312 | let lifecycle = self.lifecycle.load(Ordering::Acquire); |
313 | let gen = LifecycleGen::<C>::from_packed(lifecycle).0; |
314 | let refs = RefCount::<C>::from_packed(lifecycle); |
315 | |
316 | test_println!( |
317 | "-> initialize_state; state={:?}; gen={:?}; refs={:?};" , |
318 | Lifecycle::<C>::from_packed(lifecycle), |
319 | gen, |
320 | refs, |
321 | ); |
322 | |
323 | if refs.value != 0 { |
324 | test_println!("-> initialize while referenced! cancelling" ); |
325 | return None; |
326 | } |
327 | |
328 | Some(InitGuard { |
329 | slot: ptr::NonNull::from(self), |
330 | curr_lifecycle: lifecycle, |
331 | released: false, |
332 | }) |
333 | } |
334 | } |
335 | |
336 | // Slot impl which _needs_ an `Option` for self.item, this is for `Slab` to use. |
337 | impl<T, C> Slot<Option<T>, C> |
338 | where |
339 | C: cfg::Config, |
340 | { |
341 | fn is_empty(&self) -> bool { |
342 | self.item.with(|item| unsafe { (*item).is_none() }) |
343 | } |
344 | |
345 | /// Insert a value into a slot |
346 | /// |
347 | /// We first initialize the state and then insert the pased in value into the slot. |
348 | #[inline ] |
349 | pub(crate) fn insert(&self, value: &mut Option<T>) -> Option<Generation<C>> { |
350 | debug_assert!(self.is_empty(), "inserted into full slot" ); |
351 | debug_assert!(value.is_some(), "inserted twice" ); |
352 | |
353 | let mut guard = self.init()?; |
354 | let gen = guard.generation(); |
355 | unsafe { |
356 | // Safety: Accessing the value of an `InitGuard` is unsafe because |
357 | // it has a pointer to a slot which may dangle. Here, we know the |
358 | // pointed slot is alive because we have a reference to it in scope, |
359 | // and the `InitGuard` will be dropped when this function returns. |
360 | mem::swap(guard.value_mut(), value); |
361 | guard.release(); |
362 | }; |
363 | test_println!("-> inserted at {:?}" , gen); |
364 | |
365 | Some(gen) |
366 | } |
367 | |
368 | /// Tries to remove the value in the slot, returning `true` if the value was |
369 | /// removed. |
370 | /// |
371 | /// This method tries to remove the value in the slot. If there are existing references, then |
372 | /// the slot is marked for removal and the next thread calling either this method or |
373 | /// `remove_value` will do the work instead. |
374 | #[inline ] |
375 | pub(super) fn try_remove_value<F: FreeList<C>>( |
376 | &self, |
377 | gen: Generation<C>, |
378 | offset: usize, |
379 | free: &F, |
380 | ) -> bool { |
381 | let should_remove = match self.mark_release(gen) { |
382 | // If `mark_release` returns `Some`, a value exists at this |
383 | // generation. The bool inside this option indicates whether or not |
384 | // _we're_ allowed to remove the value. |
385 | Some(should_remove) => should_remove, |
386 | // Otherwise, the generation we tried to remove has already expired, |
387 | // and we did not mark anything for removal. |
388 | None => { |
389 | test_println!( |
390 | "-> try_remove_value; nothing exists at generation={:?}" , |
391 | gen |
392 | ); |
393 | return false; |
394 | } |
395 | }; |
396 | |
397 | test_println!("-> try_remove_value; marked!" ); |
398 | |
399 | if should_remove { |
400 | // We're allowed to remove the slot now! |
401 | test_println!("-> try_remove_value; can remove now" ); |
402 | self.remove_value(gen, offset, free); |
403 | } |
404 | |
405 | true |
406 | } |
407 | |
408 | #[inline ] |
409 | pub(super) fn remove_value<F: FreeList<C>>( |
410 | &self, |
411 | gen: Generation<C>, |
412 | offset: usize, |
413 | free: &F, |
414 | ) -> Option<T> { |
415 | self.release_with(gen, offset, free, |item| item.and_then(Option::take)) |
416 | } |
417 | } |
418 | |
419 | // These impls are specific to `Pool` |
420 | impl<T, C> Slot<T, C> |
421 | where |
422 | T: Default + Clear, |
423 | C: cfg::Config, |
424 | { |
425 | pub(in crate::page) fn new(next: usize) -> Self { |
426 | Self { |
427 | lifecycle: AtomicUsize::new(Lifecycle::<C>::REMOVING.as_usize()), |
428 | item: UnsafeCell::new(T::default()), |
429 | next: UnsafeCell::new(next), |
430 | _cfg: PhantomData, |
431 | } |
432 | } |
433 | |
434 | /// Try to clear this slot's storage |
435 | /// |
436 | /// If there are references to this slot, then we mark this slot for clearing and let the last |
437 | /// thread do the work for us. |
438 | #[inline ] |
439 | pub(super) fn try_clear_storage<F: FreeList<C>>( |
440 | &self, |
441 | gen: Generation<C>, |
442 | offset: usize, |
443 | free: &F, |
444 | ) -> bool { |
445 | let should_clear = match self.mark_release(gen) { |
446 | // If `mark_release` returns `Some`, a value exists at this |
447 | // generation. The bool inside this option indicates whether or not |
448 | // _we're_ allowed to clear the value. |
449 | Some(should_clear) => should_clear, |
450 | // Otherwise, the generation we tried to remove has already expired, |
451 | // and we did not mark anything for removal. |
452 | None => { |
453 | test_println!( |
454 | "-> try_clear_storage; nothing exists at generation={:?}" , |
455 | gen |
456 | ); |
457 | return false; |
458 | } |
459 | }; |
460 | |
461 | test_println!("-> try_clear_storage; marked!" ); |
462 | |
463 | if should_clear { |
464 | // We're allowed to remove the slot now! |
465 | test_println!("-> try_remove_value; can clear now" ); |
466 | return self.clear_storage(gen, offset, free); |
467 | } |
468 | |
469 | true |
470 | } |
471 | |
472 | /// Clear this slot's storage |
473 | /// |
474 | /// This method blocks until all references have been dropped and clears the storage. |
475 | pub(super) fn clear_storage<F: FreeList<C>>( |
476 | &self, |
477 | gen: Generation<C>, |
478 | offset: usize, |
479 | free: &F, |
480 | ) -> bool { |
481 | // release_with will _always_ wait unitl it can release the slot or just return if the slot |
482 | // has already been released. |
483 | self.release_with(gen, offset, free, |item| { |
484 | let cleared = item.map(|inner| Clear::clear(inner)).is_some(); |
485 | test_println!("-> cleared: {}" , cleared); |
486 | cleared |
487 | }) |
488 | } |
489 | } |
490 | |
491 | impl<T, C: cfg::Config> Slot<T, C> { |
492 | fn release(&self) -> bool { |
493 | let mut lifecycle = self.lifecycle.load(Ordering::Acquire); |
494 | loop { |
495 | let refs = RefCount::<C>::from_packed(lifecycle); |
496 | let state = Lifecycle::<C>::from_packed(lifecycle).state; |
497 | let gen = LifecycleGen::<C>::from_packed(lifecycle).0; |
498 | |
499 | // Are we the last guard, and is the slot marked for removal? |
500 | let dropping = refs.value == 1 && state == State::Marked; |
501 | let new_lifecycle = if dropping { |
502 | // If so, we want to advance the state to "removing". |
503 | // Also, reset the ref count to 0. |
504 | LifecycleGen(gen).pack(State::Removing as usize) |
505 | } else { |
506 | // Otherwise, just subtract 1 from the ref count. |
507 | refs.decr().pack(lifecycle) |
508 | }; |
509 | |
510 | test_println!( |
511 | "-> drop guard: state={:?}; gen={:?}; refs={:?}; lifecycle={:#x}; new_lifecycle={:#x}; dropping={:?}" , |
512 | state, |
513 | gen, |
514 | refs, |
515 | lifecycle, |
516 | new_lifecycle, |
517 | dropping |
518 | ); |
519 | match self.lifecycle.compare_exchange( |
520 | lifecycle, |
521 | new_lifecycle, |
522 | Ordering::AcqRel, |
523 | Ordering::Acquire, |
524 | ) { |
525 | Ok(_) => { |
526 | test_println!("-> drop guard: done; dropping={:?}" , dropping); |
527 | return dropping; |
528 | } |
529 | Err(actual) => { |
530 | test_println!("-> drop guard; retry, actual={:#x}" , actual); |
531 | lifecycle = actual; |
532 | } |
533 | } |
534 | } |
535 | } |
536 | } |
537 | |
538 | impl<T, C: cfg::Config> fmt::Debug for Slot<T, C> { |
539 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
540 | let lifecycle = self.lifecycle.load(Ordering::Relaxed); |
541 | f.debug_struct("Slot" ) |
542 | .field("lifecycle" , &format_args!("{:#x}" , lifecycle)) |
543 | .field("state" , &Lifecycle::<C>::from_packed(lifecycle).state) |
544 | .field("gen" , &LifecycleGen::<C>::from_packed(lifecycle).0) |
545 | .field("refs" , &RefCount::<C>::from_packed(lifecycle)) |
546 | .field("next" , &self.next()) |
547 | .finish() |
548 | } |
549 | } |
550 | |
551 | // === impl Generation === |
552 | |
553 | impl<C> fmt::Debug for Generation<C> { |
554 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
555 | f.debug_tuple("Generation" ).field(&self.value).finish() |
556 | } |
557 | } |
558 | |
559 | impl<C: cfg::Config> Generation<C> { |
560 | fn advance(self) -> Self { |
561 | Self::from_usize((self.value + 1) % Self::BITS) |
562 | } |
563 | } |
564 | |
565 | impl<C: cfg::Config> PartialEq for Generation<C> { |
566 | fn eq(&self, other: &Self) -> bool { |
567 | self.value == other.value |
568 | } |
569 | } |
570 | |
571 | impl<C: cfg::Config> Eq for Generation<C> {} |
572 | |
573 | impl<C: cfg::Config> PartialOrd for Generation<C> { |
574 | fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { |
575 | self.value.partial_cmp(&other.value) |
576 | } |
577 | } |
578 | |
579 | impl<C: cfg::Config> Ord for Generation<C> { |
580 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { |
581 | self.value.cmp(&other.value) |
582 | } |
583 | } |
584 | |
585 | impl<C: cfg::Config> Clone for Generation<C> { |
586 | fn clone(&self) -> Self { |
587 | *self |
588 | } |
589 | } |
590 | |
591 | impl<C: cfg::Config> Copy for Generation<C> {} |
592 | |
593 | // === impl Guard === |
594 | |
595 | impl<T, C: cfg::Config> Guard<T, C> { |
596 | /// Releases the guard, returning `true` if the slot should be cleared. |
597 | /// |
598 | /// ## Safety |
599 | /// |
600 | /// This dereferences a raw pointer to the slot. The caller is responsible |
601 | /// for ensuring that the `Guard` does not outlive the slab that contains |
602 | /// the pointed slot. Failure to do so means this pointer may dangle. |
603 | #[inline ] |
604 | pub(crate) unsafe fn release(&self) -> bool { |
605 | self.slot().release() |
606 | } |
607 | |
608 | /// Returns a borrowed reference to the slot. |
609 | /// |
610 | /// ## Safety |
611 | /// |
612 | /// This dereferences a raw pointer to the slot. The caller is responsible |
613 | /// for ensuring that the `Guard` does not outlive the slab that contains |
614 | /// the pointed slot. Failure to do so means this pointer may dangle. |
615 | #[inline ] |
616 | pub(crate) unsafe fn slot(&self) -> &Slot<T, C> { |
617 | self.slot.as_ref() |
618 | } |
619 | |
620 | /// Returns a borrowed reference to the slot's value. |
621 | /// |
622 | /// ## Safety |
623 | /// |
624 | /// This dereferences a raw pointer to the slot. The caller is responsible |
625 | /// for ensuring that the `Guard` does not outlive the slab that contains |
626 | /// the pointed slot. Failure to do so means this pointer may dangle. |
627 | #[inline (always)] |
628 | pub(crate) unsafe fn value(&self) -> &T { |
629 | self.slot().item.with(|item| &*item) |
630 | } |
631 | } |
632 | |
633 | // === impl Lifecycle === |
634 | |
635 | impl<C: cfg::Config> Lifecycle<C> { |
636 | const MARKED: Self = Self { |
637 | state: State::Marked, |
638 | _cfg: PhantomData, |
639 | }; |
640 | const REMOVING: Self = Self { |
641 | state: State::Removing, |
642 | _cfg: PhantomData, |
643 | }; |
644 | const PRESENT: Self = Self { |
645 | state: State::Present, |
646 | _cfg: PhantomData, |
647 | }; |
648 | } |
649 | |
650 | impl<C: cfg::Config> Pack<C> for Lifecycle<C> { |
651 | const LEN: usize = 2; |
652 | type Prev = (); |
653 | |
654 | fn from_usize(u: usize) -> Self { |
655 | Self { |
656 | state: match u & Self::MASK { |
657 | 0b00 => State::Present, |
658 | 0b01 => State::Marked, |
659 | 0b11 => State::Removing, |
660 | bad => unreachable!("weird lifecycle {:#b}" , bad), |
661 | }, |
662 | _cfg: PhantomData, |
663 | } |
664 | } |
665 | |
666 | fn as_usize(&self) -> usize { |
667 | self.state as usize |
668 | } |
669 | } |
670 | |
671 | impl<C> PartialEq for Lifecycle<C> { |
672 | fn eq(&self, other: &Self) -> bool { |
673 | self.state == other.state |
674 | } |
675 | } |
676 | |
677 | impl<C> Eq for Lifecycle<C> {} |
678 | |
679 | impl<C> fmt::Debug for Lifecycle<C> { |
680 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
681 | f.debug_tuple("Lifecycle" ).field(&self.state).finish() |
682 | } |
683 | } |
684 | |
685 | // === impl RefCount === |
686 | |
687 | impl<C: cfg::Config> Pack<C> for RefCount<C> { |
688 | const LEN: usize = cfg::WIDTH - (Lifecycle::<C>::LEN + Generation::<C>::LEN); |
689 | type Prev = Lifecycle<C>; |
690 | |
691 | fn from_usize(value: usize) -> Self { |
692 | debug_assert!(value <= Self::BITS); |
693 | Self { |
694 | value, |
695 | _cfg: PhantomData, |
696 | } |
697 | } |
698 | |
699 | fn as_usize(&self) -> usize { |
700 | self.value |
701 | } |
702 | } |
703 | |
704 | impl<C: cfg::Config> RefCount<C> { |
705 | pub(crate) const MAX: usize = Self::BITS - 1; |
706 | |
707 | #[inline ] |
708 | fn incr(self) -> Option<Self> { |
709 | if self.value >= Self::MAX { |
710 | test_println!("-> get: {}; MAX={}" , self.value, RefCount::<C>::MAX); |
711 | return None; |
712 | } |
713 | |
714 | Some(Self::from_usize(self.value + 1)) |
715 | } |
716 | |
717 | #[inline ] |
718 | fn decr(self) -> Self { |
719 | Self::from_usize(self.value - 1) |
720 | } |
721 | } |
722 | |
723 | impl<C> fmt::Debug for RefCount<C> { |
724 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
725 | f.debug_tuple("RefCount" ).field(&self.value).finish() |
726 | } |
727 | } |
728 | |
729 | impl<C: cfg::Config> PartialEq for RefCount<C> { |
730 | fn eq(&self, other: &Self) -> bool { |
731 | self.value == other.value |
732 | } |
733 | } |
734 | |
735 | impl<C: cfg::Config> Eq for RefCount<C> {} |
736 | |
737 | impl<C: cfg::Config> PartialOrd for RefCount<C> { |
738 | fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { |
739 | self.value.partial_cmp(&other.value) |
740 | } |
741 | } |
742 | |
743 | impl<C: cfg::Config> Ord for RefCount<C> { |
744 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { |
745 | self.value.cmp(&other.value) |
746 | } |
747 | } |
748 | |
749 | impl<C: cfg::Config> Clone for RefCount<C> { |
750 | fn clone(&self) -> Self { |
751 | *self |
752 | } |
753 | } |
754 | |
755 | impl<C: cfg::Config> Copy for RefCount<C> {} |
756 | |
757 | // === impl LifecycleGen === |
758 | |
759 | impl<C: cfg::Config> Pack<C> for LifecycleGen<C> { |
760 | const LEN: usize = Generation::<C>::LEN; |
761 | type Prev = RefCount<C>; |
762 | |
763 | fn from_usize(value: usize) -> Self { |
764 | Self(Generation::from_usize(value)) |
765 | } |
766 | |
767 | fn as_usize(&self) -> usize { |
768 | self.0.as_usize() |
769 | } |
770 | } |
771 | |
772 | impl<T, C: cfg::Config> InitGuard<T, C> { |
773 | pub(crate) fn generation(&self) -> Generation<C> { |
774 | LifecycleGen::<C>::from_packed(self.curr_lifecycle).0 |
775 | } |
776 | |
777 | /// Returns a borrowed reference to the slot's value. |
778 | /// |
779 | /// ## Safety |
780 | /// |
781 | /// This dereferences a raw pointer to the slot. The caller is responsible |
782 | /// for ensuring that the `InitGuard` does not outlive the slab that |
783 | /// contains the pointed slot. Failure to do so means this pointer may |
784 | /// dangle. |
785 | pub(crate) unsafe fn value(&self) -> &T { |
786 | self.slot.as_ref().item.with(|val| &*val) |
787 | } |
788 | |
789 | /// Returns a mutably borrowed reference to the slot's value. |
790 | /// |
791 | /// ## Safety |
792 | /// |
793 | /// This dereferences a raw pointer to the slot. The caller is responsible |
794 | /// for ensuring that the `InitGuard` does not outlive the slab that |
795 | /// contains the pointed slot. Failure to do so means this pointer may |
796 | /// dangle. |
797 | /// |
798 | /// It's safe to reference the slot mutably, though, because creating an |
799 | /// `InitGuard` ensures there are no outstanding immutable references. |
800 | pub(crate) unsafe fn value_mut(&mut self) -> &mut T { |
801 | self.slot.as_ref().item.with_mut(|val| &mut *val) |
802 | } |
803 | |
804 | /// Releases the guard, returning `true` if the slot should be cleared. |
805 | /// |
806 | /// ## Safety |
807 | /// |
808 | /// This dereferences a raw pointer to the slot. The caller is responsible |
809 | /// for ensuring that the `InitGuard` does not outlive the slab that |
810 | /// contains the pointed slot. Failure to do so means this pointer may |
811 | /// dangle. |
812 | pub(crate) unsafe fn release(&mut self) -> bool { |
813 | self.release2(0) |
814 | } |
815 | |
816 | /// Downgrades the guard to an immutable guard |
817 | /// |
818 | /// ## Safety |
819 | /// |
820 | /// This dereferences a raw pointer to the slot. The caller is responsible |
821 | /// for ensuring that the `InitGuard` does not outlive the slab that |
822 | /// contains the pointed slot. Failure to do so means this pointer may |
823 | /// dangle. |
824 | pub(crate) unsafe fn downgrade(&mut self) -> Guard<T, C> { |
825 | let _ = self.release2(RefCount::<C>::from_usize(1).pack(0)); |
826 | Guard { slot: self.slot } |
827 | } |
828 | |
829 | unsafe fn release2(&mut self, new_refs: usize) -> bool { |
830 | test_println!( |
831 | "InitGuard::release; curr_lifecycle={:?}; downgrading={}" , |
832 | Lifecycle::<C>::from_packed(self.curr_lifecycle), |
833 | new_refs != 0, |
834 | ); |
835 | if self.released { |
836 | test_println!("-> already released!" ); |
837 | return false; |
838 | } |
839 | self.released = true; |
840 | let mut curr_lifecycle = self.curr_lifecycle; |
841 | let slot = self.slot.as_ref(); |
842 | let new_lifecycle = LifecycleGen::<C>::from_packed(self.curr_lifecycle) |
843 | .pack(Lifecycle::<C>::PRESENT.pack(new_refs)); |
844 | |
845 | match slot.lifecycle.compare_exchange( |
846 | curr_lifecycle, |
847 | new_lifecycle, |
848 | Ordering::AcqRel, |
849 | Ordering::Acquire, |
850 | ) { |
851 | Ok(_) => { |
852 | test_println!("--> advanced to PRESENT; done" ); |
853 | return false; |
854 | } |
855 | Err(actual) => { |
856 | test_println!( |
857 | "--> lifecycle changed; actual={:?}" , |
858 | Lifecycle::<C>::from_packed(actual) |
859 | ); |
860 | curr_lifecycle = actual; |
861 | } |
862 | } |
863 | |
864 | // if the state was no longer the prior state, we are now responsible |
865 | // for releasing the slot. |
866 | loop { |
867 | let refs = RefCount::<C>::from_packed(curr_lifecycle); |
868 | let state = Lifecycle::<C>::from_packed(curr_lifecycle).state; |
869 | |
870 | test_println!( |
871 | "-> InitGuard::release; lifecycle={:#x}; state={:?}; refs={:?};" , |
872 | curr_lifecycle, |
873 | state, |
874 | refs, |
875 | ); |
876 | |
877 | debug_assert!(state == State::Marked || thread::panicking(), "state was not MARKED; someone else has removed the slot while we have exclusive access! \nactual={:?}" , state); |
878 | debug_assert!(refs.value == 0 || thread::panicking(), "ref count was not 0; someone else has referenced the slot while we have exclusive access! \nactual={:?}" , refs); |
879 | |
880 | let new_lifecycle = LifecycleGen(self.generation()).pack(State::Removing as usize); |
881 | |
882 | match slot.lifecycle.compare_exchange( |
883 | curr_lifecycle, |
884 | new_lifecycle, |
885 | Ordering::AcqRel, |
886 | Ordering::Acquire, |
887 | ) { |
888 | Ok(_) => { |
889 | test_println!("-> InitGuard::RELEASE: done!" ); |
890 | return true; |
891 | } |
892 | Err(actual) => { |
893 | debug_assert!(thread::panicking(), "we should not have to retry this CAS!" ); |
894 | test_println!("-> InitGuard::release; retry, actual={:#x}" , actual); |
895 | curr_lifecycle = actual; |
896 | } |
897 | } |
898 | } |
899 | } |
900 | } |
901 | |
902 | // === helpers === |
903 | |
904 | #[inline (always)] |
905 | fn exponential_backoff(exp: &mut usize) { |
906 | /// Maximum exponent we can back off to. |
907 | const MAX_EXPONENT: usize = 8; |
908 | |
909 | // Issue 2^exp pause instructions. |
910 | for _ in 0..(1 << *exp) { |
911 | hint::spin_loop(); |
912 | } |
913 | |
914 | if *exp >= MAX_EXPONENT { |
915 | // If we have reached the max backoff, also yield to the scheduler |
916 | // explicitly. |
917 | crate::sync::yield_now(); |
918 | } else { |
919 | // Otherwise, increment the exponent. |
920 | *exp += 1; |
921 | } |
922 | } |
923 | |