1#![cfg_attr(not(feature = "rt"), allow(dead_code))]
2
3use crate::loom::cell::UnsafeCell;
4use crate::loom::sync::atomic::{AtomicBool, AtomicUsize};
5use crate::loom::sync::{Arc, Mutex};
6use crate::util::bit;
7use std::fmt;
8use std::mem;
9use std::ops;
10use std::ptr;
11use std::sync::atomic::Ordering::Relaxed;
12
13/// Amortized allocation for homogeneous data types.
14///
15/// The slab pre-allocates chunks of memory to store values. It uses a similar
16/// growing strategy as `Vec`. When new capacity is needed, the slab grows by
17/// 2x.
18///
19/// # Pages
20///
21/// Unlike `Vec`, growing does not require moving existing elements. Instead of
22/// being a continuous chunk of memory for all elements, `Slab` is an array of
23/// arrays. The top-level array is an array of pages. Each page is 2x bigger
24/// than the previous one. When the slab grows, a new page is allocated.
25///
26/// Pages are lazily initialized.
27///
28/// # Allocating
29///
30/// When allocating an object, first previously used slots are reused. If no
31/// previously used slot is available, a new slot is initialized in an existing
32/// page. If all pages are full, then a new page is allocated.
33///
34/// When an allocated object is released, it is pushed into it's page's free
35/// list. Allocating scans all pages for a free slot.
36///
37/// # Indexing
38///
39/// The slab is able to index values using an address. Even when the indexed
40/// object has been released, it is still safe to index. This is a key ability
41/// for using the slab with the I/O driver. Addresses are registered with the
42/// OS's selector and I/O resources can be released without synchronizing with
43/// the OS.
44///
45/// # Compaction
46///
47/// `Slab::compact` will release pages that have been allocated but are no
48/// longer used. This is done by scanning the pages and finding pages with no
49/// allocated objects. These pages are then freed.
50///
51/// # Synchronization
52///
53/// The `Slab` structure is able to provide (mostly) unsynchronized reads to
54/// values stored in the slab. Insertions and removals are synchronized. Reading
55/// objects via `Ref` is fully unsynchronized. Indexing objects uses amortized
56/// synchronization.
57///
58pub(crate) struct Slab<T> {
59 /// Array of pages. Each page is synchronized.
60 pages: [Arc<Page<T>>; NUM_PAGES],
61
62 /// Caches the array pointer & number of initialized slots.
63 cached: [CachedPage<T>; NUM_PAGES],
64}
65
66/// Allocate values in the associated slab.
67pub(crate) struct Allocator<T> {
68 /// Pages in the slab. The first page has a capacity of 16 elements. Each
69 /// following page has double the capacity of the previous page.
70 ///
71 /// Each returned `Ref` holds a reference count to this `Arc`.
72 pages: [Arc<Page<T>>; NUM_PAGES],
73}
74
75/// References a slot in the slab. Indexing a slot using an `Address` is memory
76/// safe even if the slot has been released or the page has been deallocated.
77/// However, it is not guaranteed that the slot has not been reused and is now
78/// represents a different value.
79///
80/// The I/O driver uses a counter to track the slot's generation. Once accessing
81/// the slot, the generations are compared. If they match, the value matches the
82/// address.
83#[derive(Debug, Copy, Clone, PartialEq, Eq)]
84pub(crate) struct Address(usize);
85
86/// An entry in the slab.
87pub(crate) trait Entry: Default {
88 /// Resets the entry's value and track the generation.
89 fn reset(&self);
90}
91
92/// A reference to a value stored in the slab.
93pub(crate) struct Ref<T> {
94 value: *const Value<T>,
95}
96
97/// Maximum number of pages a slab can contain.
98const NUM_PAGES: usize = 19;
99
100/// Minimum number of slots a page can contain.
101const PAGE_INITIAL_SIZE: usize = 32;
102const PAGE_INDEX_SHIFT: u32 = PAGE_INITIAL_SIZE.trailing_zeros() + 1;
103
104/// A page in the slab.
105struct Page<T> {
106 /// Slots.
107 slots: Mutex<Slots<T>>,
108
109 // Number of slots currently being used. This is not guaranteed to be up to
110 // date and should only be used as a hint.
111 used: AtomicUsize,
112
113 // Set to `true` when the page has been allocated.
114 allocated: AtomicBool,
115
116 // The number of slots the page can hold.
117 len: usize,
118
119 // Length of all previous pages combined.
120 prev_len: usize,
121}
122
123struct CachedPage<T> {
124 /// Pointer to the page's slots.
125 slots: *const Slot<T>,
126
127 /// Number of initialized slots.
128 init: usize,
129}
130
131/// Page state.
132struct Slots<T> {
133 /// Slots.
134 slots: Vec<Slot<T>>,
135
136 head: usize,
137
138 /// Number of slots currently in use.
139 used: usize,
140}
141
142unsafe impl<T: Sync> Sync for Page<T> {}
143unsafe impl<T: Sync> Send for Page<T> {}
144unsafe impl<T: Sync> Sync for CachedPage<T> {}
145unsafe impl<T: Sync> Send for CachedPage<T> {}
146unsafe impl<T: Sync> Sync for Ref<T> {}
147unsafe impl<T: Sync> Send for Ref<T> {}
148
149/// A slot in the slab. Contains slot-specific metadata.
150///
151/// `#[repr(C)]` guarantees that the struct starts w/ `value`. We use pointer
152/// math to map a value pointer to an index in the page.
153#[repr(C)]
154struct Slot<T> {
155 /// Pointed to by `Ref`.
156 value: UnsafeCell<Value<T>>,
157
158 /// Next entry in the free list.
159 next: u32,
160
161 /// Makes miri happy by making mutable references not take exclusive access.
162 ///
163 /// Could probably also be fixed by replacing `slots` with a raw-pointer
164 /// based equivalent.
165 _pin: std::marker::PhantomPinned,
166}
167
168/// Value paired with a reference to the page.
169struct Value<T> {
170 /// Value stored in the value.
171 value: T,
172
173 /// Pointer to the page containing the slot.
174 ///
175 /// A raw pointer is used as this creates a ref cycle.
176 page: *const Page<T>,
177}
178
179impl<T> Slab<T> {
180 /// Create a new, empty, slab.
181 pub(crate) fn new() -> Slab<T> {
182 // Initializing arrays is a bit annoying. Instead of manually writing
183 // out an array and every single entry, `Default::default()` is used to
184 // initialize the array, then the array is iterated and each value is
185 // initialized.
186 let mut slab = Slab {
187 pages: Default::default(),
188 cached: Default::default(),
189 };
190
191 let mut len = PAGE_INITIAL_SIZE;
192 let mut prev_len: usize = 0;
193
194 for page in &mut slab.pages {
195 let page = Arc::get_mut(page).unwrap();
196 page.len = len;
197 page.prev_len = prev_len;
198 len *= 2;
199 prev_len += page.len;
200
201 // Ensure we don't exceed the max address space.
202 debug_assert!(
203 page.len - 1 + page.prev_len < (1 << 24),
204 "max = {:b}",
205 page.len - 1 + page.prev_len
206 );
207 }
208
209 slab
210 }
211
212 /// Returns a new `Allocator`.
213 ///
214 /// The `Allocator` supports concurrent allocation of objects.
215 pub(crate) fn allocator(&self) -> Allocator<T> {
216 Allocator {
217 pages: self.pages.clone(),
218 }
219 }
220
221 /// Returns a reference to the value stored at the given address.
222 ///
223 /// `&mut self` is used as the call may update internal cached state.
224 pub(crate) fn get(&mut self, addr: Address) -> Option<&T> {
225 let page_idx = addr.page();
226 let slot_idx = self.pages[page_idx].slot(addr);
227
228 // If the address references a slot that was last seen as uninitialized,
229 // the `CachedPage` is updated. This requires acquiring the page lock
230 // and updating the slot pointer and initialized offset.
231 if self.cached[page_idx].init <= slot_idx {
232 self.cached[page_idx].refresh(&self.pages[page_idx]);
233 }
234
235 // If the address **still** references an uninitialized slot, then the
236 // address is invalid and `None` is returned.
237 if self.cached[page_idx].init <= slot_idx {
238 return None;
239 }
240
241 // Get a reference to the value. The lifetime of the returned reference
242 // is bound to `&self`. The only way to invalidate the underlying memory
243 // is to call `compact()`. The lifetimes prevent calling `compact()`
244 // while references to values are outstanding.
245 //
246 // The referenced data is never mutated. Only `&self` references are
247 // used and the data is `Sync`.
248 Some(self.cached[page_idx].get(slot_idx))
249 }
250
251 /// Calls the given function with a reference to each slot in the slab. The
252 /// slot may not be in-use.
253 ///
254 /// This is used by the I/O driver during the shutdown process to notify
255 /// each pending task.
256 pub(crate) fn for_each(&mut self, mut f: impl FnMut(&T)) {
257 for page_idx in 0..self.pages.len() {
258 // It is required to avoid holding the lock when calling the
259 // provided function. The function may attempt to acquire the lock
260 // itself. If we hold the lock here while calling `f`, a deadlock
261 // situation is possible.
262 //
263 // Instead of iterating the slots directly in `page`, which would
264 // require holding the lock, the cache is updated and the slots are
265 // iterated from the cache.
266 self.cached[page_idx].refresh(&self.pages[page_idx]);
267
268 for slot_idx in 0..self.cached[page_idx].init {
269 f(self.cached[page_idx].get(slot_idx));
270 }
271 }
272 }
273
274 // Release memory back to the allocator.
275 //
276 // If pages are empty, the underlying memory is released back to the
277 // allocator.
278 pub(crate) fn compact(&mut self) {
279 // Iterate each page except the very first one. The very first page is
280 // never freed.
281 for (idx, page) in self.pages.iter().enumerate().skip(1) {
282 if page.used.load(Relaxed) != 0 || !page.allocated.load(Relaxed) {
283 // If the page has slots in use or the memory has not been
284 // allocated then it cannot be compacted.
285 continue;
286 }
287
288 let mut slots = match page.slots.try_lock() {
289 Some(slots) => slots,
290 // If the lock cannot be acquired due to being held by another
291 // thread, don't try to compact the page.
292 _ => continue,
293 };
294
295 if slots.used > 0 || slots.slots.capacity() == 0 {
296 // The page is in use or it has not yet been allocated. Either
297 // way, there is no more work to do.
298 continue;
299 }
300
301 page.allocated.store(false, Relaxed);
302
303 // Remove the slots vector from the page. This is done so that the
304 // freeing process is done outside of the lock's critical section.
305 let vec = mem::take(&mut slots.slots);
306 slots.head = 0;
307
308 // Drop the lock so we can drop the vector outside the lock below.
309 drop(slots);
310
311 debug_assert!(
312 self.cached[idx].slots.is_null() || self.cached[idx].slots == vec.as_ptr(),
313 "cached = {:?}; actual = {:?}",
314 self.cached[idx].slots,
315 vec.as_ptr(),
316 );
317
318 // Clear cache
319 self.cached[idx].slots = ptr::null();
320 self.cached[idx].init = 0;
321
322 drop(vec);
323 }
324 }
325}
326
327impl<T> fmt::Debug for Slab<T> {
328 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
329 debug(fmt, name:"Slab", &self.pages[..])
330 }
331}
332
333impl<T: Entry> Allocator<T> {
334 /// Allocate a new entry and return a handle to the entry.
335 ///
336 /// Scans pages from smallest to biggest, stopping when a slot is found.
337 /// Pages are allocated if necessary.
338 ///
339 /// Returns `None` if the slab is full.
340 pub(crate) fn allocate(&self) -> Option<(Address, Ref<T>)> {
341 // Find the first available slot.
342 for page: &Arc> in &self.pages[..] {
343 if let Some((addr: Address, val: Ref)) = Page::allocate(me:page) {
344 return Some((addr, val));
345 }
346 }
347
348 None
349 }
350}
351
352impl<T> fmt::Debug for Allocator<T> {
353 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
354 debug(fmt, name:"slab::Allocator", &self.pages[..])
355 }
356}
357
358impl<T> ops::Deref for Ref<T> {
359 type Target = T;
360
361 fn deref(&self) -> &T {
362 // Safety: `&mut` is never handed out to the underlying value. The page
363 // is not freed until all `Ref` values are dropped.
364 unsafe { &(*self.value).value }
365 }
366}
367
368impl<T> Drop for Ref<T> {
369 fn drop(&mut self) {
370 // Safety: `&mut` is never handed out to the underlying value. The page
371 // is not freed until all `Ref` values are dropped.
372 let _ = unsafe { (*self.value).release() };
373 }
374}
375
376impl<T: fmt::Debug> fmt::Debug for Ref<T> {
377 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
378 (**self).fmt(fmt)
379 }
380}
381
382impl<T: Entry> Page<T> {
383 // Allocates an object, returns the ref and address.
384 //
385 // `self: &Arc<Page<T>>` is avoided here as this would not work with the
386 // loom `Arc`.
387 fn allocate(me: &Arc<Page<T>>) -> Option<(Address, Ref<T>)> {
388 // Before acquiring the lock, use the `used` hint.
389 if me.used.load(Relaxed) == me.len {
390 return None;
391 }
392
393 // Allocating objects requires synchronization
394 let mut locked = me.slots.lock();
395
396 if locked.head < locked.slots.len() {
397 // Re-use an already initialized slot.
398 //
399 // Help out the borrow checker
400 let locked = &mut *locked;
401
402 // Get the index of the slot at the head of the free stack. This is
403 // the slot that will be reused.
404 let idx = locked.head;
405 let slot = &locked.slots[idx];
406
407 // Update the free stack head to point to the next slot.
408 locked.head = slot.next as usize;
409
410 // Increment the number of used slots
411 locked.used += 1;
412 me.used.store(locked.used, Relaxed);
413
414 // Reset the slot
415 slot.value.with(|ptr| unsafe { (*ptr).value.reset() });
416
417 // Return a reference to the slot
418 Some((me.addr(idx), locked.gen_ref(idx, me)))
419 } else if me.len == locked.slots.len() {
420 // The page is full
421 None
422 } else {
423 // No initialized slots are available, but the page has more
424 // capacity. Initialize a new slot.
425 let idx = locked.slots.len();
426
427 if idx == 0 {
428 // The page has not yet been allocated. Allocate the storage for
429 // all page slots.
430 locked.slots.reserve_exact(me.len);
431 }
432
433 // Initialize a new slot
434 locked.slots.push(Slot {
435 value: UnsafeCell::new(Value {
436 value: Default::default(),
437 page: Arc::as_ptr(me),
438 }),
439 next: 0,
440 _pin: std::marker::PhantomPinned,
441 });
442
443 // Increment the head to indicate the free stack is empty
444 locked.head += 1;
445
446 // Increment the number of used slots
447 locked.used += 1;
448 me.used.store(locked.used, Relaxed);
449 me.allocated.store(true, Relaxed);
450
451 debug_assert_eq!(locked.slots.len(), locked.head);
452
453 Some((me.addr(idx), locked.gen_ref(idx, me)))
454 }
455 }
456}
457
458impl<T> Page<T> {
459 /// Returns the slot index within the current page referenced by the given
460 /// address.
461 fn slot(&self, addr: Address) -> usize {
462 addr.0 - self.prev_len
463 }
464
465 /// Returns the address for the given slot.
466 fn addr(&self, slot: usize) -> Address {
467 Address(slot + self.prev_len)
468 }
469}
470
471impl<T> Default for Page<T> {
472 fn default() -> Page<T> {
473 Page {
474 used: AtomicUsize::new(val:0),
475 allocated: AtomicBool::new(false),
476 slots: Mutex::new(Slots {
477 slots: Vec::new(),
478 head: 0,
479 used: 0,
480 }),
481 len: 0,
482 prev_len: 0,
483 }
484 }
485}
486
487impl<T> Page<T> {
488 /// Release a slot into the page's free list.
489 fn release(&self, value: *const Value<T>) {
490 let mut locked: MutexGuard<'_, Slots> = self.slots.lock();
491
492 let idx: usize = locked.index_for(slot:value);
493 locked.slots[idx].next = locked.head as u32;
494 locked.head = idx;
495 locked.used -= 1;
496
497 self.used.store(val:locked.used, order:Relaxed);
498 }
499}
500
501impl<T> CachedPage<T> {
502 /// Refreshes the cache.
503 fn refresh(&mut self, page: &Page<T>) {
504 let slots = page.slots.lock();
505
506 if !slots.slots.is_empty() {
507 self.slots = slots.slots.as_ptr();
508 self.init = slots.slots.len();
509 }
510 }
511
512 /// Gets a value by index.
513 fn get(&self, idx: usize) -> &T {
514 assert!(idx < self.init);
515
516 // Safety: Pages are allocated concurrently, but are only ever
517 // **deallocated** by `Slab`. `Slab` will always have a more
518 // conservative view on the state of the slot array. Once `CachedPage`
519 // sees a slot pointer and initialized offset, it will remain valid
520 // until `compact()` is called. The `compact()` function also updates
521 // `CachedPage`.
522 unsafe {
523 let slot = self.slots.add(idx);
524 let value = slot as *const Value<T>;
525
526 &(*value).value
527 }
528 }
529}
530
531impl<T> Default for CachedPage<T> {
532 fn default() -> CachedPage<T> {
533 CachedPage {
534 slots: ptr::null(),
535 init: 0,
536 }
537 }
538}
539
540impl<T> Slots<T> {
541 /// Maps a slot pointer to an offset within the current page.
542 ///
543 /// The pointer math removes the `usize` index from the `Ref` struct,
544 /// shrinking the struct to a single pointer size. The contents of the
545 /// function is safe, the resulting `usize` is bounds checked before being
546 /// used.
547 ///
548 /// # Panics
549 ///
550 /// panics if the provided slot pointer is not contained by the page.
551 fn index_for(&self, slot: *const Value<T>) -> usize {
552 use std::mem;
553
554 assert_ne!(self.slots.capacity(), 0, "page is unallocated");
555
556 let base = self.slots.as_ptr() as usize;
557 let slot = slot as usize;
558 let width = mem::size_of::<Slot<T>>();
559
560 assert!(slot >= base, "unexpected pointer");
561
562 let idx = (slot - base) / width;
563 assert!(idx < self.slots.len());
564
565 idx
566 }
567
568 /// Generates a `Ref` for the slot at the given index. This involves bumping the page's ref count.
569 fn gen_ref(&self, idx: usize, page: &Arc<Page<T>>) -> Ref<T> {
570 assert!(idx < self.slots.len());
571 mem::forget(page.clone());
572
573 let vec_ptr = self.slots.as_ptr();
574 let slot: *const Slot<T> = unsafe { vec_ptr.add(idx) };
575 let value: *const Value<T> = slot as *const Value<T>;
576
577 Ref { value }
578 }
579}
580
581impl<T> Value<T> {
582 /// Releases the slot, returning the `Arc<Page<T>>` logically owned by the ref.
583 fn release(&self) -> Arc<Page<T>> {
584 // Safety: called by `Ref`, which owns an `Arc<Page<T>>` instance.
585 let page: Arc> = unsafe { Arc::from_raw(self.page) };
586 page.release(self as *const _);
587 page
588 }
589}
590
591impl Address {
592 fn page(self) -> usize {
593 // Since every page is twice as large as the previous page, and all page
594 // sizes are powers of two, we can determine the page index that
595 // contains a given address by shifting the address down by the smallest
596 // page size and looking at how many twos places necessary to represent
597 // that number, telling us what power of two page size it fits inside
598 // of. We can determine the number of twos places by counting the number
599 // of leading zeros (unused twos places) in the number's binary
600 // representation, and subtracting that count from the total number of
601 // bits in a word.
602 let slot_shifted: usize = (self.0 + PAGE_INITIAL_SIZE) >> PAGE_INDEX_SHIFT;
603 (bit::pointer_width() - slot_shifted.leading_zeros()) as usize
604 }
605
606 pub(crate) const fn as_usize(self) -> usize {
607 self.0
608 }
609
610 pub(crate) fn from_usize(src: usize) -> Address {
611 Address(src)
612 }
613}
614
615fn debug<T>(fmt: &mut fmt::Formatter<'_>, name: &str, pages: &[Arc<Page<T>>]) -> fmt::Result {
616 let mut capacity: usize = 0;
617 let mut len: usize = 0;
618
619 for page: &Arc> in pages {
620 if page.allocated.load(order:Relaxed) {
621 capacity += page.len;
622 len += page.used.load(order:Relaxed);
623 }
624 }
625
626 fmt&mut DebugStruct<'_, '_>.debug_struct(name)
627 .field("len", &len)
628 .field(name:"capacity", &capacity)
629 .finish()
630}
631
632#[cfg(all(test, not(loom)))]
633mod test {
634 use super::*;
635 use std::sync::atomic::AtomicUsize;
636 use std::sync::atomic::Ordering::SeqCst;
637
638 struct Foo {
639 cnt: AtomicUsize,
640 id: AtomicUsize,
641 }
642
643 impl Default for Foo {
644 fn default() -> Foo {
645 Foo {
646 cnt: AtomicUsize::new(0),
647 id: AtomicUsize::new(0),
648 }
649 }
650 }
651
652 impl Entry for Foo {
653 fn reset(&self) {
654 self.cnt.fetch_add(1, SeqCst);
655 }
656 }
657
658 #[test]
659 fn insert_remove() {
660 let mut slab = Slab::<Foo>::new();
661 let alloc = slab.allocator();
662
663 let (addr1, foo1) = alloc.allocate().unwrap();
664 foo1.id.store(1, SeqCst);
665 assert_eq!(0, foo1.cnt.load(SeqCst));
666
667 let (addr2, foo2) = alloc.allocate().unwrap();
668 foo2.id.store(2, SeqCst);
669 assert_eq!(0, foo2.cnt.load(SeqCst));
670
671 assert_eq!(1, slab.get(addr1).unwrap().id.load(SeqCst));
672 assert_eq!(2, slab.get(addr2).unwrap().id.load(SeqCst));
673
674 drop(foo1);
675
676 assert_eq!(1, slab.get(addr1).unwrap().id.load(SeqCst));
677
678 let (addr3, foo3) = alloc.allocate().unwrap();
679 assert_eq!(addr3, addr1);
680 assert_eq!(1, foo3.cnt.load(SeqCst));
681 foo3.id.store(3, SeqCst);
682 assert_eq!(3, slab.get(addr3).unwrap().id.load(SeqCst));
683
684 drop(foo2);
685 drop(foo3);
686
687 slab.compact();
688
689 // The first page is never released
690 assert!(slab.get(addr1).is_some());
691 assert!(slab.get(addr2).is_some());
692 assert!(slab.get(addr3).is_some());
693 }
694
695 #[test]
696 fn insert_many() {
697 const MANY: usize = normal_or_miri(10_000, 50);
698
699 let mut slab = Slab::<Foo>::new();
700 let alloc = slab.allocator();
701 let mut entries = vec![];
702
703 for i in 0..MANY {
704 let (addr, val) = alloc.allocate().unwrap();
705 val.id.store(i, SeqCst);
706 entries.push((addr, val));
707 }
708
709 for (i, (addr, v)) in entries.iter().enumerate() {
710 assert_eq!(i, v.id.load(SeqCst));
711 assert_eq!(i, slab.get(*addr).unwrap().id.load(SeqCst));
712 }
713
714 entries.clear();
715
716 for i in 0..MANY {
717 let (addr, val) = alloc.allocate().unwrap();
718 val.id.store(MANY - i, SeqCst);
719 entries.push((addr, val));
720 }
721
722 for (i, (addr, v)) in entries.iter().enumerate() {
723 assert_eq!(MANY - i, v.id.load(SeqCst));
724 assert_eq!(MANY - i, slab.get(*addr).unwrap().id.load(SeqCst));
725 }
726 }
727
728 #[test]
729 fn insert_drop_reverse() {
730 let mut slab = Slab::<Foo>::new();
731 let alloc = slab.allocator();
732 let mut entries = vec![];
733
734 for i in 0..normal_or_miri(10_000, 100) {
735 let (addr, val) = alloc.allocate().unwrap();
736 val.id.store(i, SeqCst);
737 entries.push((addr, val));
738 }
739
740 for _ in 0..10 {
741 // Drop 1000 in reverse
742 for _ in 0..normal_or_miri(1_000, 10) {
743 entries.pop();
744 }
745
746 // Check remaining
747 for (i, (addr, v)) in entries.iter().enumerate() {
748 assert_eq!(i, v.id.load(SeqCst));
749 assert_eq!(i, slab.get(*addr).unwrap().id.load(SeqCst));
750 }
751 }
752 }
753
754 #[test]
755 fn no_compaction_if_page_still_in_use() {
756 let mut slab = Slab::<Foo>::new();
757 let alloc = slab.allocator();
758 let mut entries1 = vec![];
759 let mut entries2 = vec![];
760
761 for i in 0..normal_or_miri(10_000, 100) {
762 let (addr, val) = alloc.allocate().unwrap();
763 val.id.store(i, SeqCst);
764
765 if i % 2 == 0 {
766 entries1.push((addr, val, i));
767 } else {
768 entries2.push(val);
769 }
770 }
771
772 drop(entries2);
773
774 for (addr, _, i) in &entries1 {
775 assert_eq!(*i, slab.get(*addr).unwrap().id.load(SeqCst));
776 }
777 }
778
779 const fn normal_or_miri(normal: usize, miri: usize) -> usize {
780 if cfg!(miri) {
781 miri
782 } else {
783 normal
784 }
785 }
786
787 #[test]
788 fn compact_all() {
789 let mut slab = Slab::<Foo>::new();
790 let alloc = slab.allocator();
791 let mut entries = vec![];
792
793 for _ in 0..2 {
794 entries.clear();
795
796 for i in 0..normal_or_miri(10_000, 100) {
797 let (addr, val) = alloc.allocate().unwrap();
798 val.id.store(i, SeqCst);
799
800 entries.push((addr, val));
801 }
802
803 let mut addrs = vec![];
804
805 for (addr, _) in entries.drain(..) {
806 addrs.push(addr);
807 }
808
809 slab.compact();
810
811 // The first page is never freed
812 for addr in &addrs[PAGE_INITIAL_SIZE..] {
813 assert!(slab.get(*addr).is_none());
814 }
815 }
816 }
817
818 #[test]
819 fn issue_3014() {
820 let mut slab = Slab::<Foo>::new();
821 let alloc = slab.allocator();
822 let mut entries = vec![];
823
824 for _ in 0..normal_or_miri(5, 2) {
825 entries.clear();
826
827 // Allocate a few pages + 1
828 for i in 0..(32 + 64 + 128 + 1) {
829 let (addr, val) = alloc.allocate().unwrap();
830 val.id.store(i, SeqCst);
831
832 entries.push((addr, val, i));
833 }
834
835 for (addr, val, i) in &entries {
836 assert_eq!(*i, val.id.load(SeqCst));
837 assert_eq!(*i, slab.get(*addr).unwrap().id.load(SeqCst));
838 }
839
840 // Release the last entry
841 entries.pop();
842
843 // Compact
844 slab.compact();
845
846 // Check all the addresses
847
848 for (addr, val, i) in &entries {
849 assert_eq!(*i, val.id.load(SeqCst));
850 assert_eq!(*i, slab.get(*addr).unwrap().id.load(SeqCst));
851 }
852 }
853 }
854}
855