1#![doc = include_str!("../README.md")]
2#![deny(missing_debug_implementations)]
3#![deny(missing_docs)]
4#![cfg_attr(not(feature = "std"), no_std)]
5#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
6
7#[doc(hidden)]
8pub extern crate alloc as core_alloc;
9
10#[cfg(feature = "boxed")]
11pub mod boxed;
12#[cfg(feature = "collections")]
13pub mod collections;
14
15mod alloc;
16
17use core::cell::Cell;
18use core::fmt::Display;
19use core::iter;
20use core::marker::PhantomData;
21use core::mem;
22use core::ptr::{self, NonNull};
23use core::slice;
24use core::str;
25use core_alloc::alloc::{alloc, dealloc, Layout};
26
27#[cfg(feature = "allocator_api")]
28use core_alloc::alloc::{AllocError, Allocator};
29
30#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
31use allocator_api2::alloc::{AllocError, Allocator};
32
33pub use alloc::AllocErr;
34
35/// An error returned from [`Bump::try_alloc_try_with`].
36#[derive(Clone, PartialEq, Eq, Debug)]
37pub enum AllocOrInitError<E> {
38 /// Indicates that the initial allocation failed.
39 Alloc(AllocErr),
40 /// Indicates that the initializer failed with the contained error after
41 /// allocation.
42 ///
43 /// It is possible but not guaranteed that the allocated memory has been
44 /// released back to the allocator at this point.
45 Init(E),
46}
47impl<E> From<AllocErr> for AllocOrInitError<E> {
48 fn from(e: AllocErr) -> Self {
49 Self::Alloc(e)
50 }
51}
52impl<E: Display> Display for AllocOrInitError<E> {
53 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
54 match self {
55 AllocOrInitError::Alloc(err: &AllocErr) => err.fmt(f),
56 AllocOrInitError::Init(err: &E) => write!(f, "initialization failed: {}", err),
57 }
58 }
59}
60
61/// An arena to bump allocate into.
62///
63/// ## No `Drop`s
64///
65/// Objects that are bump-allocated will never have their [`Drop`] implementation
66/// called &mdash; unless you do it manually yourself. This makes it relatively
67/// easy to leak memory or other resources.
68///
69/// If you have a type which internally manages
70///
71/// * an allocation from the global heap (e.g. [`Vec<T>`]),
72/// * open file descriptors (e.g. [`std::fs::File`]), or
73/// * any other resource that must be cleaned up (e.g. an `mmap`)
74///
75/// and relies on its `Drop` implementation to clean up the internal resource,
76/// then if you allocate that type with a `Bump`, you need to find a new way to
77/// clean up after it yourself.
78///
79/// Potential solutions are:
80///
81/// * Using [`bumpalo::boxed::Box::new_in`] instead of [`Bump::alloc`], that
82/// will drop wrapped values similarly to [`std::boxed::Box`]. Note that this
83/// requires enabling the `"boxed"` Cargo feature for this crate. **This is
84/// often the easiest solution.**
85///
86/// * Calling [`drop_in_place`][drop_in_place] or using
87/// [`std::mem::ManuallyDrop`][manuallydrop] to manually drop these types.
88///
89/// * Using [`bumpalo::collections::Vec`] instead of [`std::vec::Vec`].
90///
91/// * Avoiding allocating these problematic types within a `Bump`.
92///
93/// Note that not calling `Drop` is memory safe! Destructors are never
94/// guaranteed to run in Rust, you can't rely on them for enforcing memory
95/// safety.
96///
97/// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
98/// [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
99/// [`std::fs::File`]: https://doc.rust-lang.org/std/fs/struct.File.html
100/// [drop_in_place]: https://doc.rust-lang.org/std/ptr/fn.drop_in_place.html
101/// [manuallydrop]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html
102/// [`bumpalo::collections::Vec`]: collections/vec/struct.Vec.html
103/// [`std::vec::Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
104/// [`bumpalo::boxed::Box::new_in`]: boxed/struct.Box.html#method.new_in
105/// [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html
106///
107/// ## Example
108///
109/// ```
110/// use bumpalo::Bump;
111///
112/// // Create a new bump arena.
113/// let bump = Bump::new();
114///
115/// // Allocate values into the arena.
116/// let forty_two = bump.alloc(42);
117/// assert_eq!(*forty_two, 42);
118///
119/// // Mutable references are returned from allocation.
120/// let mut s = bump.alloc("bumpalo");
121/// *s = "the bump allocator; and also is a buffalo";
122/// ```
123///
124/// ## Allocation Methods Come in Many Flavors
125///
126/// There are various allocation methods on `Bump`, the simplest being
127/// [`alloc`][Bump::alloc]. The others exist to satisfy some combination of
128/// fallible allocation and initialization. The allocation methods are
129/// summarized in the following table:
130///
131/// <table>
132/// <thead>
133/// <tr>
134/// <th></th>
135/// <th>Infallible Allocation</th>
136/// <th>Fallible Allocation</th>
137/// </tr>
138/// </thead>
139/// <tr>
140/// <th>By Value</th>
141/// <td><a href="#method.alloc"><code>alloc</code></a></td>
142/// <td><a href="#method.try_alloc"><code>try_alloc</code></a></td>
143/// </tr>
144/// <tr>
145/// <th>Infallible Initializer Function</th>
146/// <td><a href="#method.alloc_with"><code>alloc_with</code></a></td>
147/// <td><a href="#method.try_alloc_with"><code>try_alloc_with</code></a></td>
148/// </tr>
149/// <tr>
150/// <th>Fallible Initializer Function</th>
151/// <td><a href="#method.alloc_try_with"><code>alloc_try_with</code></a></td>
152/// <td><a href="#method.try_alloc_try_with"><code>try_alloc_try_with</code></a></td>
153/// </tr>
154/// <tbody>
155/// </tbody>
156/// </table>
157///
158/// ### Fallible Allocation: The `try_alloc_` Method Prefix
159///
160/// These allocation methods let you recover from out-of-memory (OOM)
161/// scenarioes, rather than raising a panic on OOM.
162///
163/// ```
164/// use bumpalo::Bump;
165///
166/// let bump = Bump::new();
167///
168/// match bump.try_alloc(MyStruct {
169/// // ...
170/// }) {
171/// Ok(my_struct) => {
172/// // Allocation succeeded.
173/// }
174/// Err(e) => {
175/// // Out of memory.
176/// }
177/// }
178///
179/// struct MyStruct {
180/// // ...
181/// }
182/// ```
183///
184/// ### Initializer Functions: The `_with` Method Suffix
185///
186/// Calling one of the generic `…alloc(x)` methods is essentially equivalent to
187/// the matching [`…alloc_with(|| x)`](?search=alloc_with). However if you use
188/// `…alloc_with`, then the closure will not be invoked until after allocating
189/// space for storing `x` on the heap.
190///
191/// This can be useful in certain edge-cases related to compiler optimizations.
192/// When evaluating for example `bump.alloc(x)`, semantically `x` is first put
193/// on the stack and then moved onto the heap. In some cases, the compiler is
194/// able to optimize this into constructing `x` directly on the heap, however
195/// in many cases it does not.
196///
197/// The `…alloc_with` functions try to help the compiler be smarter. In most
198/// cases doing for example `bump.try_alloc_with(|| x)` on release mode will be
199/// enough to help the compiler realize that this optimization is valid and
200/// to construct `x` directly onto the heap.
201///
202/// #### Warning
203///
204/// These functions critically depend on compiler optimizations to achieve their
205/// desired effect. This means that it is not an effective tool when compiling
206/// without optimizations on.
207///
208/// Even when optimizations are on, these functions do not **guarantee** that
209/// the value is constructed on the heap. To the best of our knowledge no such
210/// guarantee can be made in stable Rust as of 1.54.
211///
212/// ### Fallible Initialization: The `_try_with` Method Suffix
213///
214/// The generic [`…alloc_try_with(|| x)`](?search=_try_with) methods behave
215/// like the purely `_with` suffixed methods explained above. However, they
216/// allow for fallible initialization by accepting a closure that returns a
217/// [`Result`] and will attempt to undo the initial allocation if this closure
218/// returns [`Err`].
219///
220/// #### Warning
221///
222/// If the inner closure returns [`Ok`], space for the entire [`Result`] remains
223/// allocated inside `self`. This can be a problem especially if the [`Err`]
224/// variant is larger, but even otherwise there may be overhead for the
225/// [`Result`]'s discriminant.
226///
227/// <p><details><summary>Undoing the allocation in the <code>Err</code> case
228/// always fails if <code>f</code> successfully made any additional allocations
229/// in <code>self</code>.</summary>
230///
231/// For example, the following will always leak also space for the [`Result`]
232/// into this `Bump`, even though the inner reference isn't kept and the [`Err`]
233/// payload is returned semantically by value:
234///
235/// ```rust
236/// let bump = bumpalo::Bump::new();
237///
238/// let r: Result<&mut [u8; 1000], ()> = bump.alloc_try_with(|| {
239/// let _ = bump.alloc(0_u8);
240/// Err(())
241/// });
242///
243/// assert!(r.is_err());
244/// ```
245///
246///</details></p>
247///
248/// Since [`Err`] payloads are first placed on the heap and then moved to the
249/// stack, `bump.…alloc_try_with(|| x)?` is likely to execute more slowly than
250/// the matching `bump.…alloc(x?)` in case of initialization failure. If this
251/// happens frequently, using the plain un-suffixed method may perform better.
252///
253/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
254/// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
255/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
256///
257/// ### `Bump` Allocation Limits
258///
259/// `bumpalo` supports setting a limit on the maximum bytes of memory that can
260/// be allocated for use in a particular `Bump` arena. This limit can be set and removed with
261/// [`set_allocation_limit`][Bump::set_allocation_limit].
262/// The allocation limit is only enforced when allocating new backing chunks for
263/// a `Bump`. Updating the allocation limit will not affect existing allocations
264/// or any future allocations within the `Bump`'s current chunk.
265///
266/// #### Example
267///
268/// ```
269/// let bump = bumpalo::Bump::new();
270///
271/// assert_eq!(bump.allocation_limit(), None);
272/// bump.set_allocation_limit(Some(0));
273///
274/// assert!(bump.try_alloc(5).is_err());
275///
276/// bump.set_allocation_limit(Some(6));
277///
278/// assert_eq!(bump.allocation_limit(), Some(6));
279///
280/// bump.set_allocation_limit(None);
281///
282/// assert_eq!(bump.allocation_limit(), None);
283/// ```
284///
285/// #### Warning
286///
287/// Because of backwards compatibility, allocations that fail
288/// due to allocation limits will not present differently than
289/// errors due to resource exhaustion.
290
291#[derive(Debug)]
292pub struct Bump {
293 // The current chunk we are bump allocating within.
294 current_chunk_footer: Cell<NonNull<ChunkFooter>>,
295 allocation_limit: Cell<Option<usize>>,
296}
297
298#[repr(C)]
299#[derive(Debug)]
300struct ChunkFooter {
301 // Pointer to the start of this chunk allocation. This footer is always at
302 // the end of the chunk.
303 data: NonNull<u8>,
304
305 // The layout of this chunk's allocation.
306 layout: Layout,
307
308 // Link to the previous chunk.
309 //
310 // Note that the last node in the `prev` linked list is the canonical empty
311 // chunk, whose `prev` link points to itself.
312 prev: Cell<NonNull<ChunkFooter>>,
313
314 // Bump allocation finger that is always in the range `self.data..=self`.
315 ptr: Cell<NonNull<u8>>,
316
317 // The bytes allocated in all chunks so far, the canonical empty chunk has
318 // a size of 0 and for all other chunks, `allocated_bytes` will be
319 // the allocated_bytes of the current chunk plus the allocated bytes
320 // of the `prev` chunk.
321 allocated_bytes: usize,
322}
323
324/// A wrapper type for the canonical, statically allocated empty chunk.
325///
326/// For the canonical empty chunk to be `static`, its type must be `Sync`, which
327/// is the purpose of this wrapper type. This is safe because the empty chunk is
328/// immutable and never actually modified.
329#[repr(transparent)]
330struct EmptyChunkFooter(ChunkFooter);
331
332unsafe impl Sync for EmptyChunkFooter {}
333
334static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
335 // This chunk is empty (except the foot itself).
336 layout: Layout::new::<ChunkFooter>(),
337
338 // The start of the (empty) allocatable region for this chunk is itself.
339 data: unsafe { NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8) },
340
341 // The end of the (empty) allocatable region for this chunk is also itself.
342 ptr: Cell::new(unsafe {
343 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut u8)
344 }),
345
346 // Invariant: the last chunk footer in all `ChunkFooter::prev` linked lists
347 // is the empty chunk footer, whose `prev` points to itself.
348 prev: Cell::new(unsafe {
349 NonNull::new_unchecked(&EMPTY_CHUNK as *const EmptyChunkFooter as *mut ChunkFooter)
350 }),
351
352 // Empty chunks count as 0 allocated bytes in an arena.
353 allocated_bytes: 0,
354});
355
356impl EmptyChunkFooter {
357 fn get(&'static self) -> NonNull<ChunkFooter> {
358 NonNull::from(&self.0)
359 }
360}
361
362impl ChunkFooter {
363 // Returns the start and length of the currently allocated region of this
364 // chunk.
365 fn as_raw_parts(&self) -> (*const u8, usize) {
366 let data: *const u8 = self.data.as_ptr() as *const u8;
367 let ptr: *const u8 = self.ptr.get().as_ptr() as *const u8;
368 debug_assert!(data <= ptr);
369 debug_assert!(ptr <= self as *const ChunkFooter as *const u8);
370 let len: usize = unsafe { (self as *const ChunkFooter as *const u8).offset_from(origin:ptr) as usize };
371 (ptr, len)
372 }
373
374 /// Is this chunk the last empty chunk?
375 fn is_empty(&self) -> bool {
376 ptr::eq(self, b:EMPTY_CHUNK.get().as_ptr())
377 }
378}
379
380impl Default for Bump {
381 fn default() -> Bump {
382 Bump::new()
383 }
384}
385
386impl Drop for Bump {
387 fn drop(&mut self) {
388 unsafe {
389 dealloc_chunk_list(self.current_chunk_footer.get());
390 }
391 }
392}
393
394#[inline]
395unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
396 while !footer.as_ref().is_empty() {
397 let f: NonNull = footer;
398 footer = f.as_ref().prev.get();
399 dealloc(f.as_ref().data.as_ptr(), f.as_ref().layout);
400 }
401}
402
403// `Bump`s are safe to send between threads because nothing aliases its owned
404// chunks until you start allocating from it. But by the time you allocate from
405// it, the returned references to allocations borrow the `Bump` and therefore
406// prevent sending the `Bump` across threads until the borrows end.
407unsafe impl Send for Bump {}
408
409#[inline]
410pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
411 debug_assert!(divisor > 0);
412 debug_assert!(divisor.is_power_of_two());
413 Some(n.checked_add(divisor - 1)? & !(divisor - 1))
414}
415
416#[inline]
417pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
418 debug_assert!(divisor > 0);
419 debug_assert!(divisor.is_power_of_two());
420 n & !(divisor - 1)
421}
422
423// After this point, we try to hit page boundaries instead of powers of 2
424const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
425
426// We only support alignments of up to 16 bytes for iter_allocated_chunks.
427const SUPPORTED_ITER_ALIGNMENT: usize = 16;
428const CHUNK_ALIGN: usize = SUPPORTED_ITER_ALIGNMENT;
429const FOOTER_SIZE: usize = mem::size_of::<ChunkFooter>();
430
431// Assert that ChunkFooter is at most the supported alignment. This will give a compile time error if it is not the case
432const _FOOTER_ALIGN_ASSERTION: bool = mem::align_of::<ChunkFooter>() <= CHUNK_ALIGN;
433const _: [(); _FOOTER_ALIGN_ASSERTION as usize] = [()];
434
435// Maximum typical overhead per allocation imposed by allocators.
436const MALLOC_OVERHEAD: usize = 16;
437
438// This is the overhead from malloc, footer and alignment. For instance, if
439// we want to request a chunk of memory that has at least X bytes usable for
440// allocations (where X is aligned to CHUNK_ALIGN), then we expect that the
441// after adding a footer, malloc overhead and alignment, the chunk of memory
442// the allocator actually sets aside for us is X+OVERHEAD rounded up to the
443// nearest suitable size boundary.
444const OVERHEAD: usize = (MALLOC_OVERHEAD + FOOTER_SIZE + (CHUNK_ALIGN - 1)) & !(CHUNK_ALIGN - 1);
445
446// Choose a relatively small default initial chunk size, since we double chunk
447// sizes as we grow bump arenas to amortize costs of hitting the global
448// allocator.
449const FIRST_ALLOCATION_GOAL: usize = 1 << 9;
450
451// The actual size of the first allocation is going to be a bit smaller
452// than the goal. We need to make room for the footer, and we also need
453// take the alignment into account.
454const DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER: usize = FIRST_ALLOCATION_GOAL - OVERHEAD;
455
456/// The memory size and alignment details for a potential new chunk
457/// allocation.
458#[derive(Debug, Clone, Copy)]
459struct NewChunkMemoryDetails {
460 new_size_without_footer: usize,
461 align: usize,
462 size: usize,
463}
464
465/// Wrapper around `Layout::from_size_align` that adds debug assertions.
466#[inline]
467fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
468 Layout::from_size_align(size, align).map_err(|_| AllocErr)
469}
470
471#[inline(never)]
472fn allocation_size_overflow<T>() -> T {
473 panic!("requested allocation size overflowed")
474}
475
476impl Bump {
477 /// Construct a new arena to bump allocate into.
478 ///
479 /// ## Example
480 ///
481 /// ```
482 /// let bump = bumpalo::Bump::new();
483 /// # let _ = bump;
484 /// ```
485 pub fn new() -> Bump {
486 Self::with_capacity(0)
487 }
488
489 /// Attempt to construct a new arena to bump allocate into.
490 ///
491 /// ## Example
492 ///
493 /// ```
494 /// let bump = bumpalo::Bump::try_new();
495 /// # let _ = bump.unwrap();
496 /// ```
497 pub fn try_new() -> Result<Bump, AllocErr> {
498 Bump::try_with_capacity(0)
499 }
500
501 /// Construct a new arena with the specified byte capacity to bump allocate into.
502 ///
503 /// ## Example
504 ///
505 /// ```
506 /// let bump = bumpalo::Bump::with_capacity(100);
507 /// # let _ = bump;
508 /// ```
509 pub fn with_capacity(capacity: usize) -> Bump {
510 Bump::try_with_capacity(capacity).unwrap_or_else(|_| oom())
511 }
512
513 /// Attempt to construct a new arena with the specified byte capacity to bump allocate into.
514 ///
515 /// ## Example
516 ///
517 /// ```
518 /// let bump = bumpalo::Bump::try_with_capacity(100);
519 /// # let _ = bump.unwrap();
520 /// ```
521 pub fn try_with_capacity(capacity: usize) -> Result<Self, AllocErr> {
522 if capacity == 0 {
523 return Ok(Bump {
524 current_chunk_footer: Cell::new(EMPTY_CHUNK.get()),
525 allocation_limit: Cell::new(None),
526 });
527 }
528
529 let layout = layout_from_size_align(capacity, 1)?;
530
531 let chunk_footer = unsafe {
532 Self::new_chunk(
533 Bump::new_chunk_memory_details(None, layout).ok_or(AllocErr)?,
534 layout,
535 EMPTY_CHUNK.get(),
536 )
537 .ok_or(AllocErr)?
538 };
539
540 Ok(Bump {
541 current_chunk_footer: Cell::new(chunk_footer),
542 allocation_limit: Cell::new(None),
543 })
544 }
545
546 /// The allocation limit for this arena in bytes.
547 ///
548 /// ## Example
549 ///
550 /// ```
551 /// let bump = bumpalo::Bump::with_capacity(0);
552 ///
553 /// assert_eq!(bump.allocation_limit(), None);
554 ///
555 /// bump.set_allocation_limit(Some(6));
556 ///
557 /// assert_eq!(bump.allocation_limit(), Some(6));
558 ///
559 /// bump.set_allocation_limit(None);
560 ///
561 /// assert_eq!(bump.allocation_limit(), None);
562 /// ```
563 pub fn allocation_limit(&self) -> Option<usize> {
564 self.allocation_limit.get()
565 }
566
567 /// Set the allocation limit in bytes for this arena.
568 ///
569 /// The allocation limit is only enforced when allocating new backing chunks for
570 /// a `Bump`. Updating the allocation limit will not affect existing allocations
571 /// or any future allocations within the `Bump`'s current chunk.
572 ///
573 /// ## Example
574 ///
575 /// ```
576 /// let bump = bumpalo::Bump::with_capacity(0);
577 ///
578 /// bump.set_allocation_limit(Some(0));
579 ///
580 /// assert!(bump.try_alloc(5).is_err());
581 /// ```
582 pub fn set_allocation_limit(&self, limit: Option<usize>) {
583 self.allocation_limit.set(limit)
584 }
585
586 /// How much headroom an arena has before it hits its allocation
587 /// limit.
588 fn allocation_limit_remaining(&self) -> Option<usize> {
589 self.allocation_limit.get().and_then(|allocation_limit| {
590 let allocated_bytes = self.allocated_bytes();
591 if allocated_bytes > allocation_limit {
592 None
593 } else {
594 Some(usize::abs_diff(allocation_limit, allocated_bytes))
595 }
596 })
597 }
598
599 /// Whether a request to allocate a new chunk with a given size for a given
600 /// requested layout will fit under the allocation limit set on a `Bump`.
601 fn chunk_fits_under_limit(
602 allocation_limit_remaining: Option<usize>,
603 new_chunk_memory_details: NewChunkMemoryDetails,
604 ) -> bool {
605 allocation_limit_remaining
606 .map(|allocation_limit_left| {
607 allocation_limit_left >= new_chunk_memory_details.new_size_without_footer
608 })
609 .unwrap_or(true)
610 }
611
612 /// Determine the memory details including final size, alignment and
613 /// final size without footer for a new chunk that would be allocated
614 /// to fulfill an allocation request.
615 fn new_chunk_memory_details(
616 new_size_without_footer: Option<usize>,
617 requested_layout: Layout,
618 ) -> Option<NewChunkMemoryDetails> {
619 let mut new_size_without_footer =
620 new_size_without_footer.unwrap_or(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
621
622 // We want to have CHUNK_ALIGN or better alignment
623 let mut align = CHUNK_ALIGN;
624
625 // If we already know we need to fulfill some request,
626 // make sure we allocate at least enough to satisfy it
627 align = align.max(requested_layout.align());
628 let requested_size =
629 round_up_to(requested_layout.size(), align).unwrap_or_else(allocation_size_overflow);
630 new_size_without_footer = new_size_without_footer.max(requested_size);
631
632 // We want our allocations to play nice with the memory allocator,
633 // and waste as little memory as possible.
634 // For small allocations, this means that the entire allocation
635 // including the chunk footer and mallocs internal overhead is
636 // as close to a power of two as we can go without going over.
637 // For larger allocations, we only need to get close to a page
638 // boundary without going over.
639 if new_size_without_footer < PAGE_STRATEGY_CUTOFF {
640 new_size_without_footer =
641 (new_size_without_footer + OVERHEAD).next_power_of_two() - OVERHEAD;
642 } else {
643 new_size_without_footer =
644 round_up_to(new_size_without_footer + OVERHEAD, 0x1000)? - OVERHEAD;
645 }
646
647 debug_assert_eq!(align % CHUNK_ALIGN, 0);
648 debug_assert_eq!(new_size_without_footer % CHUNK_ALIGN, 0);
649 let size = new_size_without_footer
650 .checked_add(FOOTER_SIZE)
651 .unwrap_or_else(allocation_size_overflow);
652
653 Some(NewChunkMemoryDetails {
654 new_size_without_footer,
655 size,
656 align,
657 })
658 }
659
660 /// Allocate a new chunk and return its initialized footer.
661 ///
662 /// If given, `layouts` is a tuple of the current chunk size and the
663 /// layout of the allocation request that triggered us to fall back to
664 /// allocating a new chunk of memory.
665 unsafe fn new_chunk(
666 new_chunk_memory_details: NewChunkMemoryDetails,
667 requested_layout: Layout,
668 prev: NonNull<ChunkFooter>,
669 ) -> Option<NonNull<ChunkFooter>> {
670 let NewChunkMemoryDetails {
671 new_size_without_footer,
672 align,
673 size,
674 } = new_chunk_memory_details;
675
676 let layout = layout_from_size_align(size, align).ok()?;
677
678 debug_assert!(size >= requested_layout.size());
679
680 let data = alloc(layout);
681 let data = NonNull::new(data)?;
682
683 // The `ChunkFooter` is at the end of the chunk.
684 let footer_ptr = data.as_ptr().add(new_size_without_footer);
685 debug_assert_eq!((data.as_ptr() as usize) % align, 0);
686 debug_assert_eq!(footer_ptr as usize % CHUNK_ALIGN, 0);
687 let footer_ptr = footer_ptr as *mut ChunkFooter;
688
689 // The bump pointer is initialized to the end of the range we will
690 // bump out of.
691 let ptr = Cell::new(NonNull::new_unchecked(footer_ptr as *mut u8));
692
693 // The `allocated_bytes` of a new chunk counts the total size
694 // of the chunks, not how much of the chunks are used.
695 let allocated_bytes = prev.as_ref().allocated_bytes + new_size_without_footer;
696
697 ptr::write(
698 footer_ptr,
699 ChunkFooter {
700 data,
701 layout,
702 prev: Cell::new(prev),
703 ptr,
704 allocated_bytes,
705 },
706 );
707
708 Some(NonNull::new_unchecked(footer_ptr))
709 }
710
711 /// Reset this bump allocator.
712 ///
713 /// Performs mass deallocation on everything allocated in this arena by
714 /// resetting the pointer into the underlying chunk of memory to the start
715 /// of the chunk. Does not run any `Drop` implementations on deallocated
716 /// objects; see [the top-level documentation](struct.Bump.html) for details.
717 ///
718 /// If this arena has allocated multiple chunks to bump allocate into, then
719 /// the excess chunks are returned to the global allocator.
720 ///
721 /// ## Example
722 ///
723 /// ```
724 /// let mut bump = bumpalo::Bump::new();
725 ///
726 /// // Allocate a bunch of things.
727 /// {
728 /// for i in 0..100 {
729 /// bump.alloc(i);
730 /// }
731 /// }
732 ///
733 /// // Reset the arena.
734 /// bump.reset();
735 ///
736 /// // Allocate some new things in the space previously occupied by the
737 /// // original things.
738 /// for j in 200..400 {
739 /// bump.alloc(j);
740 /// }
741 ///```
742 pub fn reset(&mut self) {
743 // Takes `&mut self` so `self` must be unique and there can't be any
744 // borrows active that would get invalidated by resetting.
745 unsafe {
746 if self.current_chunk_footer.get().as_ref().is_empty() {
747 return;
748 }
749
750 let mut cur_chunk = self.current_chunk_footer.get();
751
752 // Deallocate all chunks except the current one
753 let prev_chunk = cur_chunk.as_ref().prev.replace(EMPTY_CHUNK.get());
754 dealloc_chunk_list(prev_chunk);
755
756 // Reset the bump finger to the end of the chunk.
757 cur_chunk.as_ref().ptr.set(cur_chunk.cast());
758
759 // Reset the allocated size of the chunk.
760 cur_chunk.as_mut().allocated_bytes = cur_chunk.as_ref().layout.size();
761
762 debug_assert!(
763 self.current_chunk_footer
764 .get()
765 .as_ref()
766 .prev
767 .get()
768 .as_ref()
769 .is_empty(),
770 "We should only have a single chunk"
771 );
772 debug_assert_eq!(
773 self.current_chunk_footer.get().as_ref().ptr.get(),
774 self.current_chunk_footer.get().cast(),
775 "Our chunk's bump finger should be reset to the start of its allocation"
776 );
777 }
778 }
779
780 /// Allocate an object in this `Bump` and return an exclusive reference to
781 /// it.
782 ///
783 /// ## Panics
784 ///
785 /// Panics if reserving space for `T` fails.
786 ///
787 /// ## Example
788 ///
789 /// ```
790 /// let bump = bumpalo::Bump::new();
791 /// let x = bump.alloc("hello");
792 /// assert_eq!(*x, "hello");
793 /// ```
794 #[inline(always)]
795 #[allow(clippy::mut_from_ref)]
796 pub fn alloc<T>(&self, val: T) -> &mut T {
797 self.alloc_with(|| val)
798 }
799
800 /// Try to allocate an object in this `Bump` and return an exclusive
801 /// reference to it.
802 ///
803 /// ## Errors
804 ///
805 /// Errors if reserving space for `T` fails.
806 ///
807 /// ## Example
808 ///
809 /// ```
810 /// let bump = bumpalo::Bump::new();
811 /// let x = bump.try_alloc("hello");
812 /// assert_eq!(x, Ok(&mut "hello"));
813 /// ```
814 #[inline(always)]
815 #[allow(clippy::mut_from_ref)]
816 pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
817 self.try_alloc_with(|| val)
818 }
819
820 /// Pre-allocate space for an object in this `Bump`, initializes it using
821 /// the closure, then returns an exclusive reference to it.
822 ///
823 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
824 /// discussion on the differences between the `_with` suffixed methods and
825 /// those methods without it, their performance characteristics, and when
826 /// you might or might not choose a `_with` suffixed method.
827 ///
828 /// ## Panics
829 ///
830 /// Panics if reserving space for `T` fails.
831 ///
832 /// ## Example
833 ///
834 /// ```
835 /// let bump = bumpalo::Bump::new();
836 /// let x = bump.alloc_with(|| "hello");
837 /// assert_eq!(*x, "hello");
838 /// ```
839 #[inline(always)]
840 #[allow(clippy::mut_from_ref)]
841 pub fn alloc_with<F, T>(&self, f: F) -> &mut T
842 where
843 F: FnOnce() -> T,
844 {
845 #[inline(always)]
846 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
847 where
848 F: FnOnce() -> T,
849 {
850 // This function is translated as:
851 // - allocate space for a T on the stack
852 // - call f() with the return value being put onto this stack space
853 // - memcpy from the stack to the heap
854 //
855 // Ideally we want LLVM to always realize that doing a stack
856 // allocation is unnecessary and optimize the code so it writes
857 // directly into the heap instead. It seems we get it to realize
858 // this most consistently if we put this critical line into it's
859 // own function instead of inlining it into the surrounding code.
860 ptr::write(ptr, f())
861 }
862
863 let layout = Layout::new::<T>();
864
865 unsafe {
866 let p = self.alloc_layout(layout);
867 let p = p.as_ptr() as *mut T;
868 inner_writer(p, f);
869 &mut *p
870 }
871 }
872
873 /// Tries to pre-allocate space for an object in this `Bump`, initializes
874 /// it using the closure, then returns an exclusive reference to it.
875 ///
876 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
877 /// discussion on the differences between the `_with` suffixed methods and
878 /// those methods without it, their performance characteristics, and when
879 /// you might or might not choose a `_with` suffixed method.
880 ///
881 /// ## Errors
882 ///
883 /// Errors if reserving space for `T` fails.
884 ///
885 /// ## Example
886 ///
887 /// ```
888 /// let bump = bumpalo::Bump::new();
889 /// let x = bump.try_alloc_with(|| "hello");
890 /// assert_eq!(x, Ok(&mut "hello"));
891 /// ```
892 #[inline(always)]
893 #[allow(clippy::mut_from_ref)]
894 pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
895 where
896 F: FnOnce() -> T,
897 {
898 #[inline(always)]
899 unsafe fn inner_writer<T, F>(ptr: *mut T, f: F)
900 where
901 F: FnOnce() -> T,
902 {
903 // This function is translated as:
904 // - allocate space for a T on the stack
905 // - call f() with the return value being put onto this stack space
906 // - memcpy from the stack to the heap
907 //
908 // Ideally we want LLVM to always realize that doing a stack
909 // allocation is unnecessary and optimize the code so it writes
910 // directly into the heap instead. It seems we get it to realize
911 // this most consistently if we put this critical line into it's
912 // own function instead of inlining it into the surrounding code.
913 ptr::write(ptr, f())
914 }
915
916 //SAFETY: Self-contained:
917 // `p` is allocated for `T` and then a `T` is written.
918 let layout = Layout::new::<T>();
919 let p = self.try_alloc_layout(layout)?;
920 let p = p.as_ptr() as *mut T;
921
922 unsafe {
923 inner_writer(p, f);
924 Ok(&mut *p)
925 }
926 }
927
928 /// Pre-allocates space for a [`Result`] in this `Bump`, initializes it using
929 /// the closure, then returns an exclusive reference to its `T` if [`Ok`].
930 ///
931 /// Iff the allocation fails, the closure is not run.
932 ///
933 /// Iff [`Err`], an allocator rewind is *attempted* and the `E` instance is
934 /// moved out of the allocator to be consumed or dropped as normal.
935 ///
936 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
937 /// discussion on the differences between the `_with` suffixed methods and
938 /// those methods without it, their performance characteristics, and when
939 /// you might or might not choose a `_with` suffixed method.
940 ///
941 /// For caveats specific to fallible initialization, see
942 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
943 ///
944 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
945 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
946 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
947 ///
948 /// ## Errors
949 ///
950 /// Iff the allocation succeeds but `f` fails, that error is forwarded by value.
951 ///
952 /// ## Panics
953 ///
954 /// Panics if reserving space for `Result<T, E>` fails.
955 ///
956 /// ## Example
957 ///
958 /// ```
959 /// let bump = bumpalo::Bump::new();
960 /// let x = bump.alloc_try_with(|| Ok("hello"))?;
961 /// assert_eq!(*x, "hello");
962 /// # Result::<_, ()>::Ok(())
963 /// ```
964 #[inline(always)]
965 #[allow(clippy::mut_from_ref)]
966 pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
967 where
968 F: FnOnce() -> Result<T, E>,
969 {
970 let rewind_footer = self.current_chunk_footer.get();
971 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
972 let mut inner_result_ptr = NonNull::from(self.alloc_with(f));
973 match unsafe { inner_result_ptr.as_mut() } {
974 Ok(t) => Ok(unsafe {
975 //SAFETY:
976 // The `&mut Result<T, E>` returned by `alloc_with` may be
977 // lifetime-limited by `E`, but the derived `&mut T` still has
978 // the same validity as in `alloc_with` since the error variant
979 // is already ruled out here.
980
981 // We could conditionally truncate the allocation here, but
982 // since it grows backwards, it seems unlikely that we'd get
983 // any more than the `Result`'s discriminant this way, if
984 // anything at all.
985 &mut *(t as *mut _)
986 }),
987 Err(e) => unsafe {
988 // If this result was the last allocation in this arena, we can
989 // reclaim its space. In fact, sometimes we can do even better
990 // than simply calling `dealloc` on the result pointer: we can
991 // reclaim any alignment padding we might have added (which
992 // `dealloc` cannot do) if we didn't allocate a new chunk for
993 // this result.
994 if self.is_last_allocation(inner_result_ptr.cast()) {
995 let current_footer_p = self.current_chunk_footer.get();
996 let current_ptr = &current_footer_p.as_ref().ptr;
997 if current_footer_p == rewind_footer {
998 // It's still the same chunk, so reset the bump pointer
999 // to its original value upon entry to this method
1000 // (reclaiming any alignment padding we may have
1001 // added).
1002 current_ptr.set(rewind_ptr);
1003 } else {
1004 // We allocated a new chunk for this result.
1005 //
1006 // We know the result is the only allocation in this
1007 // chunk: Any additional allocations since the start of
1008 // this method could only have happened when running
1009 // the initializer function, which is called *after*
1010 // reserving space for this result. Therefore, since we
1011 // already determined via the check above that this
1012 // result was the last allocation, there must not have
1013 // been any other allocations, and this result is the
1014 // only allocation in this chunk.
1015 //
1016 // Because this is the only allocation in this chunk,
1017 // we can reset the chunk's bump finger to the start of
1018 // the chunk.
1019 current_ptr.set(current_footer_p.as_ref().data);
1020 }
1021 }
1022 //SAFETY:
1023 // As we received `E` semantically by value from `f`, we can
1024 // just copy that value here as long as we avoid a double-drop
1025 // (which can't happen as any specific references to the `E`'s
1026 // data in `self` are destroyed when this function returns).
1027 //
1028 // The order between this and the deallocation doesn't matter
1029 // because `Self: !Sync`.
1030 Err(ptr::read(e as *const _))
1031 },
1032 }
1033 }
1034
1035 /// Tries to pre-allocates space for a [`Result`] in this `Bump`,
1036 /// initializes it using the closure, then returns an exclusive reference
1037 /// to its `T` if all [`Ok`].
1038 ///
1039 /// Iff the allocation fails, the closure is not run.
1040 ///
1041 /// Iff the closure returns [`Err`], an allocator rewind is *attempted* and
1042 /// the `E` instance is moved out of the allocator to be consumed or dropped
1043 /// as normal.
1044 ///
1045 /// See [The `_with` Method Suffix](#initializer-functions-the-_with-method-suffix) for a
1046 /// discussion on the differences between the `_with` suffixed methods and
1047 /// those methods without it, their performance characteristics, and when
1048 /// you might or might not choose a `_with` suffixed method.
1049 ///
1050 /// For caveats specific to fallible initialization, see
1051 /// [The `_try_with` Method Suffix](#fallible-initialization-the-_try_with-method-suffix).
1052 ///
1053 /// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
1054 /// [`Ok`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Ok
1055 /// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
1056 ///
1057 /// ## Errors
1058 ///
1059 /// Errors with the [`Alloc`](`AllocOrInitError::Alloc`) variant iff
1060 /// reserving space for `Result<T, E>` fails.
1061 ///
1062 /// Iff the allocation succeeds but `f` fails, that error is forwarded by
1063 /// value inside the [`Init`](`AllocOrInitError::Init`) variant.
1064 ///
1065 /// ## Example
1066 ///
1067 /// ```
1068 /// let bump = bumpalo::Bump::new();
1069 /// let x = bump.try_alloc_try_with(|| Ok("hello"))?;
1070 /// assert_eq!(*x, "hello");
1071 /// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
1072 /// ```
1073 #[inline(always)]
1074 #[allow(clippy::mut_from_ref)]
1075 pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
1076 where
1077 F: FnOnce() -> Result<T, E>,
1078 {
1079 let rewind_footer = self.current_chunk_footer.get();
1080 let rewind_ptr = unsafe { rewind_footer.as_ref() }.ptr.get();
1081 let mut inner_result_ptr = NonNull::from(self.try_alloc_with(f)?);
1082 match unsafe { inner_result_ptr.as_mut() } {
1083 Ok(t) => Ok(unsafe {
1084 //SAFETY:
1085 // The `&mut Result<T, E>` returned by `alloc_with` may be
1086 // lifetime-limited by `E`, but the derived `&mut T` still has
1087 // the same validity as in `alloc_with` since the error variant
1088 // is already ruled out here.
1089
1090 // We could conditionally truncate the allocation here, but
1091 // since it grows backwards, it seems unlikely that we'd get
1092 // any more than the `Result`'s discriminant this way, if
1093 // anything at all.
1094 &mut *(t as *mut _)
1095 }),
1096 Err(e) => unsafe {
1097 // If this result was the last allocation in this arena, we can
1098 // reclaim its space. In fact, sometimes we can do even better
1099 // than simply calling `dealloc` on the result pointer: we can
1100 // reclaim any alignment padding we might have added (which
1101 // `dealloc` cannot do) if we didn't allocate a new chunk for
1102 // this result.
1103 if self.is_last_allocation(inner_result_ptr.cast()) {
1104 let current_footer_p = self.current_chunk_footer.get();
1105 let current_ptr = &current_footer_p.as_ref().ptr;
1106 if current_footer_p == rewind_footer {
1107 // It's still the same chunk, so reset the bump pointer
1108 // to its original value upon entry to this method
1109 // (reclaiming any alignment padding we may have
1110 // added).
1111 current_ptr.set(rewind_ptr);
1112 } else {
1113 // We allocated a new chunk for this result.
1114 //
1115 // We know the result is the only allocation in this
1116 // chunk: Any additional allocations since the start of
1117 // this method could only have happened when running
1118 // the initializer function, which is called *after*
1119 // reserving space for this result. Therefore, since we
1120 // already determined via the check above that this
1121 // result was the last allocation, there must not have
1122 // been any other allocations, and this result is the
1123 // only allocation in this chunk.
1124 //
1125 // Because this is the only allocation in this chunk,
1126 // we can reset the chunk's bump finger to the start of
1127 // the chunk.
1128 current_ptr.set(current_footer_p.as_ref().data);
1129 }
1130 }
1131 //SAFETY:
1132 // As we received `E` semantically by value from `f`, we can
1133 // just copy that value here as long as we avoid a double-drop
1134 // (which can't happen as any specific references to the `E`'s
1135 // data in `self` are destroyed when this function returns).
1136 //
1137 // The order between this and the deallocation doesn't matter
1138 // because `Self: !Sync`.
1139 Err(AllocOrInitError::Init(ptr::read(e as *const _)))
1140 },
1141 }
1142 }
1143
1144 /// `Copy` a slice into this `Bump` and return an exclusive reference to
1145 /// the copy.
1146 ///
1147 /// ## Panics
1148 ///
1149 /// Panics if reserving space for the slice fails.
1150 ///
1151 /// ## Example
1152 ///
1153 /// ```
1154 /// let bump = bumpalo::Bump::new();
1155 /// let x = bump.alloc_slice_copy(&[1, 2, 3]);
1156 /// assert_eq!(x, &[1, 2, 3]);
1157 /// ```
1158 #[inline(always)]
1159 #[allow(clippy::mut_from_ref)]
1160 pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
1161 where
1162 T: Copy,
1163 {
1164 let layout = Layout::for_value(src);
1165 let dst = self.alloc_layout(layout).cast::<T>();
1166
1167 unsafe {
1168 ptr::copy_nonoverlapping(src.as_ptr(), dst.as_ptr(), src.len());
1169 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1170 }
1171 }
1172
1173 /// `Clone` a slice into this `Bump` and return an exclusive reference to
1174 /// the clone. Prefer [`alloc_slice_copy`](#method.alloc_slice_copy) if `T` is `Copy`.
1175 ///
1176 /// ## Panics
1177 ///
1178 /// Panics if reserving space for the slice fails.
1179 ///
1180 /// ## Example
1181 ///
1182 /// ```
1183 /// #[derive(Clone, Debug, Eq, PartialEq)]
1184 /// struct Sheep {
1185 /// name: String,
1186 /// }
1187 ///
1188 /// let originals = [
1189 /// Sheep { name: "Alice".into() },
1190 /// Sheep { name: "Bob".into() },
1191 /// Sheep { name: "Cathy".into() },
1192 /// ];
1193 ///
1194 /// let bump = bumpalo::Bump::new();
1195 /// let clones = bump.alloc_slice_clone(&originals);
1196 /// assert_eq!(originals, clones);
1197 /// ```
1198 #[inline(always)]
1199 #[allow(clippy::mut_from_ref)]
1200 pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
1201 where
1202 T: Clone,
1203 {
1204 let layout = Layout::for_value(src);
1205 let dst = self.alloc_layout(layout).cast::<T>();
1206
1207 unsafe {
1208 for (i, val) in src.iter().cloned().enumerate() {
1209 ptr::write(dst.as_ptr().add(i), val);
1210 }
1211
1212 slice::from_raw_parts_mut(dst.as_ptr(), src.len())
1213 }
1214 }
1215
1216 /// `Copy` a string slice into this `Bump` and return an exclusive reference to it.
1217 ///
1218 /// ## Panics
1219 ///
1220 /// Panics if reserving space for the string fails.
1221 ///
1222 /// ## Example
1223 ///
1224 /// ```
1225 /// let bump = bumpalo::Bump::new();
1226 /// let hello = bump.alloc_str("hello world");
1227 /// assert_eq!("hello world", hello);
1228 /// ```
1229 #[inline(always)]
1230 #[allow(clippy::mut_from_ref)]
1231 pub fn alloc_str(&self, src: &str) -> &mut str {
1232 let buffer = self.alloc_slice_copy(src.as_bytes());
1233 unsafe {
1234 // This is OK, because it already came in as str, so it is guaranteed to be utf8
1235 str::from_utf8_unchecked_mut(buffer)
1236 }
1237 }
1238
1239 /// Allocates a new slice of size `len` into this `Bump` and returns an
1240 /// exclusive reference to the copy.
1241 ///
1242 /// The elements of the slice are initialized using the supplied closure.
1243 /// The closure argument is the position in the slice.
1244 ///
1245 /// ## Panics
1246 ///
1247 /// Panics if reserving space for the slice fails.
1248 ///
1249 /// ## Example
1250 ///
1251 /// ```
1252 /// let bump = bumpalo::Bump::new();
1253 /// let x = bump.alloc_slice_fill_with(5, |i| 5 * (i + 1));
1254 /// assert_eq!(x, &[5, 10, 15, 20, 25]);
1255 /// ```
1256 #[inline(always)]
1257 #[allow(clippy::mut_from_ref)]
1258 pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
1259 where
1260 F: FnMut(usize) -> T,
1261 {
1262 let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
1263 let dst = self.alloc_layout(layout).cast::<T>();
1264
1265 unsafe {
1266 for i in 0..len {
1267 ptr::write(dst.as_ptr().add(i), f(i));
1268 }
1269
1270 let result = slice::from_raw_parts_mut(dst.as_ptr(), len);
1271 debug_assert_eq!(Layout::for_value(result), layout);
1272 result
1273 }
1274 }
1275
1276 /// Allocates a new slice of size `len` into this `Bump` and returns an
1277 /// exclusive reference to the copy.
1278 ///
1279 /// All elements of the slice are initialized to `value`.
1280 ///
1281 /// ## Panics
1282 ///
1283 /// Panics if reserving space for the slice fails.
1284 ///
1285 /// ## Example
1286 ///
1287 /// ```
1288 /// let bump = bumpalo::Bump::new();
1289 /// let x = bump.alloc_slice_fill_copy(5, 42);
1290 /// assert_eq!(x, &[42, 42, 42, 42, 42]);
1291 /// ```
1292 #[inline(always)]
1293 #[allow(clippy::mut_from_ref)]
1294 pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
1295 self.alloc_slice_fill_with(len, |_| value)
1296 }
1297
1298 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1299 /// exclusive reference to the copy.
1300 ///
1301 /// All elements of the slice are initialized to `value.clone()`.
1302 ///
1303 /// ## Panics
1304 ///
1305 /// Panics if reserving space for the slice fails.
1306 ///
1307 /// ## Example
1308 ///
1309 /// ```
1310 /// let bump = bumpalo::Bump::new();
1311 /// let s: String = "Hello Bump!".to_string();
1312 /// let x: &[String] = bump.alloc_slice_fill_clone(2, &s);
1313 /// assert_eq!(x.len(), 2);
1314 /// assert_eq!(&x[0], &s);
1315 /// assert_eq!(&x[1], &s);
1316 /// ```
1317 #[inline(always)]
1318 #[allow(clippy::mut_from_ref)]
1319 pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
1320 self.alloc_slice_fill_with(len, |_| value.clone())
1321 }
1322
1323 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1324 /// exclusive reference to the copy.
1325 ///
1326 /// The elements are initialized using the supplied iterator.
1327 ///
1328 /// ## Panics
1329 ///
1330 /// Panics if reserving space for the slice fails, or if the supplied
1331 /// iterator returns fewer elements than it promised.
1332 ///
1333 /// ## Example
1334 ///
1335 /// ```
1336 /// let bump = bumpalo::Bump::new();
1337 /// let x: &[i32] = bump.alloc_slice_fill_iter([2, 3, 5].iter().cloned().map(|i| i * i));
1338 /// assert_eq!(x, [4, 9, 25]);
1339 /// ```
1340 #[inline(always)]
1341 #[allow(clippy::mut_from_ref)]
1342 pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
1343 where
1344 I: IntoIterator<Item = T>,
1345 I::IntoIter: ExactSizeIterator,
1346 {
1347 let mut iter = iter.into_iter();
1348 self.alloc_slice_fill_with(iter.len(), |_| {
1349 iter.next().expect("Iterator supplied too few elements")
1350 })
1351 }
1352
1353 /// Allocates a new slice of size `len` slice into this `Bump` and return an
1354 /// exclusive reference to the copy.
1355 ///
1356 /// All elements of the slice are initialized to [`T::default()`].
1357 ///
1358 /// [`T::default()`]: https://doc.rust-lang.org/std/default/trait.Default.html#tymethod.default
1359 ///
1360 /// ## Panics
1361 ///
1362 /// Panics if reserving space for the slice fails.
1363 ///
1364 /// ## Example
1365 ///
1366 /// ```
1367 /// let bump = bumpalo::Bump::new();
1368 /// let x = bump.alloc_slice_fill_default::<u32>(5);
1369 /// assert_eq!(x, &[0, 0, 0, 0, 0]);
1370 /// ```
1371 #[inline(always)]
1372 #[allow(clippy::mut_from_ref)]
1373 pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
1374 self.alloc_slice_fill_with(len, |_| T::default())
1375 }
1376
1377 /// Allocate space for an object with the given `Layout`.
1378 ///
1379 /// The returned pointer points at uninitialized memory, and should be
1380 /// initialized with
1381 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1382 ///
1383 /// # Panics
1384 ///
1385 /// Panics if reserving space matching `layout` fails.
1386 #[inline(always)]
1387 pub fn alloc_layout(&self, layout: Layout) -> NonNull<u8> {
1388 self.try_alloc_layout(layout).unwrap_or_else(|_| oom())
1389 }
1390
1391 /// Attempts to allocate space for an object with the given `Layout` or else returns
1392 /// an `Err`.
1393 ///
1394 /// The returned pointer points at uninitialized memory, and should be
1395 /// initialized with
1396 /// [`std::ptr::write`](https://doc.rust-lang.org/std/ptr/fn.write.html).
1397 ///
1398 /// # Errors
1399 ///
1400 /// Errors if reserving space matching `layout` fails.
1401 #[inline(always)]
1402 pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1403 if let Some(p) = self.try_alloc_layout_fast(layout) {
1404 Ok(p)
1405 } else {
1406 self.alloc_layout_slow(layout).ok_or(AllocErr)
1407 }
1408 }
1409
1410 #[inline(always)]
1411 fn try_alloc_layout_fast(&self, layout: Layout) -> Option<NonNull<u8>> {
1412 // We don't need to check for ZSTs here since they will automatically
1413 // be handled properly: the pointer will be bumped by zero bytes,
1414 // modulo alignment. This keeps the fast path optimized for non-ZSTs,
1415 // which are much more common.
1416 unsafe {
1417 let footer = self.current_chunk_footer.get();
1418 let footer = footer.as_ref();
1419 let ptr = footer.ptr.get().as_ptr();
1420 let start = footer.data.as_ptr();
1421 debug_assert!(start <= ptr);
1422 debug_assert!(ptr as *const u8 <= footer as *const _ as *const u8);
1423
1424 if (ptr as usize) < layout.size() {
1425 return None;
1426 }
1427
1428 let ptr = ptr.wrapping_sub(layout.size());
1429 let rem = ptr as usize % layout.align();
1430 let aligned_ptr = ptr.wrapping_sub(rem);
1431
1432 if aligned_ptr >= start {
1433 let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
1434 footer.ptr.set(aligned_ptr);
1435 Some(aligned_ptr)
1436 } else {
1437 None
1438 }
1439 }
1440 }
1441
1442 /// Gets the remaining capacity in the current chunk (in bytes).
1443 ///
1444 /// ## Example
1445 ///
1446 /// ```
1447 /// use bumpalo::Bump;
1448 ///
1449 /// let bump = Bump::with_capacity(100);
1450 ///
1451 /// let capacity = bump.chunk_capacity();
1452 /// assert!(capacity >= 100);
1453 /// ```
1454 pub fn chunk_capacity(&self) -> usize {
1455 let current_footer = self.current_chunk_footer.get();
1456 let current_footer = unsafe { current_footer.as_ref() };
1457
1458 current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
1459 }
1460
1461 /// Slow path allocation for when we need to allocate a new chunk from the
1462 /// parent bump set because there isn't enough room in our current chunk.
1463 #[inline(never)]
1464 #[cold]
1465 fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
1466 unsafe {
1467 let size = layout.size();
1468 let allocation_limit_remaining = self.allocation_limit_remaining();
1469
1470 // Get a new chunk from the global allocator.
1471 let current_footer = self.current_chunk_footer.get();
1472 let current_layout = current_footer.as_ref().layout;
1473
1474 // By default, we want our new chunk to be about twice as big
1475 // as the previous chunk. If the global allocator refuses it,
1476 // we try to divide it by half until it works or the requested
1477 // size is smaller than the default footer size.
1478 let min_new_chunk_size = layout.size().max(DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER);
1479 let mut base_size = (current_layout.size() - FOOTER_SIZE)
1480 .checked_mul(2)?
1481 .max(min_new_chunk_size);
1482 let chunk_memory_details = iter::from_fn(|| {
1483 let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
1484 && base_size >= layout.size()
1485 && limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
1486 && self.allocated_bytes() == 0);
1487
1488 if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
1489 let size = base_size;
1490 base_size /= 2;
1491 Bump::new_chunk_memory_details(Some(size), layout)
1492 } else {
1493 None
1494 }
1495 });
1496
1497 let new_footer = chunk_memory_details
1498 .filter_map(|chunk_memory_details| {
1499 if Bump::chunk_fits_under_limit(
1500 allocation_limit_remaining,
1501 chunk_memory_details,
1502 ) {
1503 Bump::new_chunk(chunk_memory_details, layout, current_footer)
1504 } else {
1505 None
1506 }
1507 })
1508 .next()?;
1509
1510 debug_assert_eq!(
1511 new_footer.as_ref().data.as_ptr() as usize % layout.align(),
1512 0
1513 );
1514
1515 // Set the new chunk as our new current chunk.
1516 self.current_chunk_footer.set(new_footer);
1517
1518 let new_footer = new_footer.as_ref();
1519
1520 // Move the bump ptr finger down to allocate room for `val`. We know
1521 // this can't overflow because we successfully allocated a chunk of
1522 // at least the requested size.
1523 let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
1524 // Round the pointer down to the requested alignment.
1525 ptr = ptr.sub(ptr as usize % layout.align());
1526 debug_assert!(
1527 ptr as *const _ <= new_footer,
1528 "{:p} <= {:p}",
1529 ptr,
1530 new_footer
1531 );
1532 let ptr = NonNull::new_unchecked(ptr);
1533 new_footer.ptr.set(ptr);
1534
1535 // Return a pointer to the freshly allocated region in this chunk.
1536 Some(ptr)
1537 }
1538 }
1539
1540 /// Returns an iterator over each chunk of allocated memory that
1541 /// this arena has bump allocated into.
1542 ///
1543 /// The chunks are returned ordered by allocation time, with the most
1544 /// recently allocated chunk being returned first, and the least recently
1545 /// allocated chunk being returned last.
1546 ///
1547 /// The values inside each chunk are also ordered by allocation time, with
1548 /// the most recent allocation being earlier in the slice, and the least
1549 /// recent allocation being towards the end of the slice.
1550 ///
1551 /// ## Safety
1552 ///
1553 /// Because this method takes `&mut self`, we know that the bump arena
1554 /// reference is unique and therefore there aren't any active references to
1555 /// any of the objects we've allocated in it either. This potential aliasing
1556 /// of exclusive references is one common footgun for unsafe code that we
1557 /// don't need to worry about here.
1558 ///
1559 /// However, there could be regions of uninitialized memory used as padding
1560 /// between allocations, which is why this iterator has items of type
1561 /// `[MaybeUninit<u8>]`, instead of simply `[u8]`.
1562 ///
1563 /// The only way to guarantee that there is no padding between allocations
1564 /// or within allocated objects is if all of these properties hold:
1565 ///
1566 /// 1. Every object allocated in this arena has the same alignment,
1567 /// and that alignment is at most 16.
1568 /// 2. Every object's size is a multiple of its alignment.
1569 /// 3. None of the objects allocated in this arena contain any internal
1570 /// padding.
1571 ///
1572 /// If you want to use this `iter_allocated_chunks` method, it is *your*
1573 /// responsibility to ensure that these properties hold before calling
1574 /// `MaybeUninit::assume_init` or otherwise reading the returned values.
1575 ///
1576 /// Finally, you must also ensure that any values allocated into the bump
1577 /// arena have not had their `Drop` implementations called on them,
1578 /// e.g. after dropping a [`bumpalo::boxed::Box<T>`][crate::boxed::Box].
1579 ///
1580 /// ## Example
1581 ///
1582 /// ```
1583 /// let mut bump = bumpalo::Bump::new();
1584 ///
1585 /// // Allocate a bunch of `i32`s in this bump arena, potentially causing
1586 /// // additional memory chunks to be reserved.
1587 /// for i in 0..10000 {
1588 /// bump.alloc(i);
1589 /// }
1590 ///
1591 /// // Iterate over each chunk we've bump allocated into. This is safe
1592 /// // because we have only allocated `i32`s in this arena, which fulfills
1593 /// // the above requirements.
1594 /// for ch in bump.iter_allocated_chunks() {
1595 /// println!("Used a chunk that is {} bytes long", ch.len());
1596 /// println!("The first byte is {:?}", unsafe {
1597 /// ch[0].assume_init()
1598 /// });
1599 /// }
1600 ///
1601 /// // Within a chunk, allocations are ordered from most recent to least
1602 /// // recent. If we allocated 'a', then 'b', then 'c', when we iterate
1603 /// // through the chunk's data, we get them in the order 'c', then 'b',
1604 /// // then 'a'.
1605 ///
1606 /// bump.reset();
1607 /// bump.alloc(b'a');
1608 /// bump.alloc(b'b');
1609 /// bump.alloc(b'c');
1610 ///
1611 /// assert_eq!(bump.iter_allocated_chunks().count(), 1);
1612 /// let chunk = bump.iter_allocated_chunks().nth(0).unwrap();
1613 /// assert_eq!(chunk.len(), 3);
1614 ///
1615 /// // Safe because we've only allocated `u8`s in this arena, which
1616 /// // fulfills the above requirements.
1617 /// unsafe {
1618 /// assert_eq!(chunk[0].assume_init(), b'c');
1619 /// assert_eq!(chunk[1].assume_init(), b'b');
1620 /// assert_eq!(chunk[2].assume_init(), b'a');
1621 /// }
1622 /// ```
1623 pub fn iter_allocated_chunks(&mut self) -> ChunkIter<'_> {
1624 // SAFE: Ensured by mutable borrow of `self`.
1625 let raw = unsafe { self.iter_allocated_chunks_raw() };
1626 ChunkIter {
1627 raw,
1628 bump: PhantomData,
1629 }
1630 }
1631
1632 /// Returns an iterator over raw pointers to chunks of allocated memory that
1633 /// this arena has bump allocated into.
1634 ///
1635 /// This is an unsafe version of [`iter_allocated_chunks()`](Bump::iter_allocated_chunks),
1636 /// with the caller responsible for safe usage of the returned pointers as
1637 /// well as ensuring that the iterator is not invalidated by new
1638 /// allocations.
1639 ///
1640 /// ## Safety
1641 ///
1642 /// Allocations from this arena must not be performed while the returned
1643 /// iterator is alive. If reading the chunk data (or casting to a reference)
1644 /// the caller must ensure that there exist no mutable references to
1645 /// previously allocated data.
1646 ///
1647 /// In addition, all of the caveats when reading the chunk data from
1648 /// [`iter_allocated_chunks()`](Bump::iter_allocated_chunks) still apply.
1649 pub unsafe fn iter_allocated_chunks_raw(&self) -> ChunkRawIter<'_> {
1650 ChunkRawIter {
1651 footer: self.current_chunk_footer.get(),
1652 bump: PhantomData,
1653 }
1654 }
1655
1656 /// Calculates the number of bytes currently allocated across all chunks in
1657 /// this bump arena.
1658 ///
1659 /// If you allocate types of different alignments or types with
1660 /// larger-than-typical alignment in the same arena, some padding
1661 /// bytes might get allocated in the bump arena. Note that those padding
1662 /// bytes will add to this method's resulting sum, so you cannot rely
1663 /// on it only counting the sum of the sizes of the things
1664 /// you've allocated in the arena.
1665 ///
1666 /// The allocated bytes do not include the size of bumpalo's metadata,
1667 /// so the amount of memory requested from the Rust allocator is higher
1668 /// than the returned value.
1669 ///
1670 /// ## Example
1671 ///
1672 /// ```
1673 /// let bump = bumpalo::Bump::new();
1674 /// let _x = bump.alloc_slice_fill_default::<u32>(5);
1675 /// let bytes = bump.allocated_bytes();
1676 /// assert!(bytes >= core::mem::size_of::<u32>() * 5);
1677 /// ```
1678 pub fn allocated_bytes(&self) -> usize {
1679 let footer = self.current_chunk_footer.get();
1680
1681 unsafe { footer.as_ref().allocated_bytes }
1682 }
1683
1684 /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
1685 ///
1686 /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
1687 /// the size of the bump metadata.
1688 pub fn allocated_bytes_including_metadata(&self) -> usize {
1689 let metadata_size =
1690 unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
1691 self.allocated_bytes() + metadata_size
1692 }
1693
1694 #[inline]
1695 unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
1696 let footer = self.current_chunk_footer.get();
1697 let footer = footer.as_ref();
1698 footer.ptr.get() == ptr
1699 }
1700
1701 #[inline]
1702 unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
1703 // If the pointer is the last allocation we made, we can reuse the bytes,
1704 // otherwise they are simply leaked -- at least until somebody calls reset().
1705 if self.is_last_allocation(ptr) {
1706 let ptr = NonNull::new_unchecked(ptr.as_ptr().add(layout.size()));
1707 self.current_chunk_footer.get().as_ref().ptr.set(ptr);
1708 }
1709 }
1710
1711 #[inline]
1712 unsafe fn shrink(
1713 &self,
1714 ptr: NonNull<u8>,
1715 old_layout: Layout,
1716 new_layout: Layout,
1717 ) -> Result<NonNull<u8>, AllocErr> {
1718 let old_size = old_layout.size();
1719 let new_size = new_layout.size();
1720 let align_is_compatible = old_layout.align() >= new_layout.align();
1721
1722 if !align_is_compatible {
1723 return Err(AllocErr);
1724 }
1725
1726 // This is how much space we would *actually* reclaim while satisfying
1727 // the requested alignment.
1728 let delta = round_down_to(old_size - new_size, new_layout.align());
1729
1730 if self.is_last_allocation(ptr)
1731 // Only reclaim the excess space (which requires a copy) if it
1732 // is worth it: we are actually going to recover "enough" space
1733 // and we can do a non-overlapping copy.
1734 && delta >= old_size / 2
1735 {
1736 let footer = self.current_chunk_footer.get();
1737 let footer = footer.as_ref();
1738
1739 // NB: new_ptr is aligned, because ptr *has to* be aligned, and we
1740 // made sure delta is aligned.
1741 let new_ptr = NonNull::new_unchecked(footer.ptr.get().as_ptr().add(delta));
1742 footer.ptr.set(new_ptr);
1743
1744 // NB: we know it is non-overlapping because of the size check
1745 // in the `if` condition.
1746 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
1747
1748 Ok(new_ptr)
1749 } else {
1750 Ok(ptr)
1751 }
1752 }
1753
1754 #[inline]
1755 unsafe fn grow(
1756 &self,
1757 ptr: NonNull<u8>,
1758 old_layout: Layout,
1759 new_layout: Layout,
1760 ) -> Result<NonNull<u8>, AllocErr> {
1761 let old_size = old_layout.size();
1762 let new_size = new_layout.size();
1763 let align_is_compatible = old_layout.align() >= new_layout.align();
1764
1765 if align_is_compatible && self.is_last_allocation(ptr) {
1766 // Try to allocate the delta size within this same block so we can
1767 // reuse the currently allocated space.
1768 let delta = new_size - old_size;
1769 if let Some(p) =
1770 self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
1771 {
1772 ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
1773 return Ok(p);
1774 }
1775 }
1776
1777 // Fallback: do a fresh allocation and copy the existing data into it.
1778 let new_ptr = self.try_alloc_layout(new_layout)?;
1779 ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), old_size);
1780 Ok(new_ptr)
1781 }
1782}
1783
1784/// An iterator over each chunk of allocated memory that
1785/// an arena has bump allocated into.
1786///
1787/// The chunks are returned ordered by allocation time, with the most recently
1788/// allocated chunk being returned first.
1789///
1790/// The values inside each chunk are also ordered by allocation time, with the most
1791/// recent allocation being earlier in the slice.
1792///
1793/// This struct is created by the [`iter_allocated_chunks`] method on
1794/// [`Bump`]. See that function for a safety description regarding reading from the returned items.
1795///
1796/// [`Bump`]: struct.Bump.html
1797/// [`iter_allocated_chunks`]: struct.Bump.html#method.iter_allocated_chunks
1798#[derive(Debug)]
1799pub struct ChunkIter<'a> {
1800 raw: ChunkRawIter<'a>,
1801 bump: PhantomData<&'a mut Bump>,
1802}
1803
1804impl<'a> Iterator for ChunkIter<'a> {
1805 type Item = &'a [mem::MaybeUninit<u8>];
1806 fn next(&mut self) -> Option<&'a [mem::MaybeUninit<u8>]> {
1807 unsafe {
1808 let (ptr: *mut u8, len: usize) = self.raw.next()?;
1809 let slice: &[MaybeUninit] = slice::from_raw_parts(data:ptr as *const mem::MaybeUninit<u8>, len);
1810 Some(slice)
1811 }
1812 }
1813}
1814
1815impl<'a> iter::FusedIterator for ChunkIter<'a> {}
1816
1817/// An iterator over raw pointers to chunks of allocated memory that this
1818/// arena has bump allocated into.
1819///
1820/// See [`ChunkIter`] for details regarding the returned chunks.
1821///
1822/// This struct is created by the [`iter_allocated_chunks_raw`] method on
1823/// [`Bump`]. See that function for a safety description regarding reading from
1824/// the returned items.
1825///
1826/// [`Bump`]: struct.Bump.html
1827/// [`iter_allocated_chunks_raw`]: struct.Bump.html#method.iter_allocated_chunks_raw
1828#[derive(Debug)]
1829pub struct ChunkRawIter<'a> {
1830 footer: NonNull<ChunkFooter>,
1831 bump: PhantomData<&'a Bump>,
1832}
1833
1834impl Iterator for ChunkRawIter<'_> {
1835 type Item = (*mut u8, usize);
1836 fn next(&mut self) -> Option<(*mut u8, usize)> {
1837 unsafe {
1838 let foot: &ChunkFooter = self.footer.as_ref();
1839 if foot.is_empty() {
1840 return None;
1841 }
1842 let (ptr: *const u8, len: usize) = foot.as_raw_parts();
1843 self.footer = foot.prev.get();
1844 Some((ptr as *mut u8, len))
1845 }
1846 }
1847}
1848
1849impl iter::FusedIterator for ChunkRawIter<'_> {}
1850
1851#[inline(never)]
1852#[cold]
1853fn oom() -> ! {
1854 panic!("out of memory")
1855}
1856
1857unsafe impl<'a> alloc::Alloc for &'a Bump {
1858 #[inline(always)]
1859 unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
1860 self.try_alloc_layout(layout)
1861 }
1862
1863 #[inline]
1864 unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
1865 Bump::dealloc(self, ptr, layout)
1866 }
1867
1868 #[inline]
1869 unsafe fn realloc(
1870 &mut self,
1871 ptr: NonNull<u8>,
1872 layout: Layout,
1873 new_size: usize,
1874 ) -> Result<NonNull<u8>, AllocErr> {
1875 let old_size = layout.size();
1876
1877 if old_size == 0 {
1878 return self.try_alloc_layout(layout);
1879 }
1880
1881 let new_layout = layout_from_size_align(new_size, layout.align())?;
1882 if new_size <= old_size {
1883 self.shrink(ptr, layout, new_layout)
1884 } else {
1885 self.grow(ptr, layout, new_layout)
1886 }
1887 }
1888}
1889
1890#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
1891unsafe impl<'a> Allocator for &'a Bump {
1892 fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
1893 self.try_alloc_layout(layout)
1894 .map(|p| unsafe {
1895 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
1896 })
1897 .map_err(|_| AllocError)
1898 }
1899
1900 unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
1901 Bump::dealloc(self, ptr, layout)
1902 }
1903
1904 unsafe fn shrink(
1905 &self,
1906 ptr: NonNull<u8>,
1907 old_layout: Layout,
1908 new_layout: Layout,
1909 ) -> Result<NonNull<[u8]>, AllocError> {
1910 Bump::shrink(self, ptr, old_layout, new_layout)
1911 .map(|p| unsafe {
1912 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
1913 })
1914 .map_err(|_| AllocError)
1915 }
1916
1917 unsafe fn grow(
1918 &self,
1919 ptr: NonNull<u8>,
1920 old_layout: Layout,
1921 new_layout: Layout,
1922 ) -> Result<NonNull<[u8]>, AllocError> {
1923 Bump::grow(self, ptr, old_layout, new_layout)
1924 .map(|p| unsafe {
1925 NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
1926 })
1927 .map_err(|_| AllocError)
1928 }
1929
1930 unsafe fn grow_zeroed(
1931 &self,
1932 ptr: NonNull<u8>,
1933 old_layout: Layout,
1934 new_layout: Layout,
1935 ) -> Result<NonNull<[u8]>, AllocError> {
1936 let mut ptr = self.grow(ptr, old_layout, new_layout)?;
1937 ptr.as_mut()[old_layout.size()..].fill(0);
1938 Ok(ptr)
1939 }
1940}
1941
1942// NB: Only tests which require private types, fields, or methods should be in
1943// here. Anything that can just be tested via public API surface should be in
1944// `bumpalo/tests/all/*`.
1945#[cfg(test)]
1946mod tests {
1947 use super::*;
1948
1949 // Uses private type `ChunkFooter`.
1950 #[test]
1951 fn chunk_footer_is_five_words() {
1952 assert_eq!(mem::size_of::<ChunkFooter>(), mem::size_of::<usize>() * 6);
1953 }
1954
1955 // Uses private `alloc` module.
1956 #[test]
1957 #[allow(clippy::cognitive_complexity)]
1958 fn test_realloc() {
1959 use crate::alloc::Alloc;
1960
1961 unsafe {
1962 const CAPACITY: usize = 1024 - OVERHEAD;
1963 let mut b = Bump::with_capacity(CAPACITY);
1964
1965 // `realloc` doesn't shrink allocations that aren't "worth it".
1966 let layout = Layout::from_size_align(100, 1).unwrap();
1967 let p = b.alloc_layout(layout);
1968 let q = (&b).realloc(p, layout, 51).unwrap();
1969 assert_eq!(p, q);
1970 b.reset();
1971
1972 // `realloc` will shrink allocations that are "worth it".
1973 let layout = Layout::from_size_align(100, 1).unwrap();
1974 let p = b.alloc_layout(layout);
1975 let q = (&b).realloc(p, layout, 50).unwrap();
1976 assert!(p != q);
1977 b.reset();
1978
1979 // `realloc` will reuse the last allocation when growing.
1980 let layout = Layout::from_size_align(10, 1).unwrap();
1981 let p = b.alloc_layout(layout);
1982 let q = (&b).realloc(p, layout, 11).unwrap();
1983 assert_eq!(q.as_ptr() as usize, p.as_ptr() as usize - 1);
1984 b.reset();
1985
1986 // `realloc` will allocate a new chunk when growing the last
1987 // allocation, if need be.
1988 let layout = Layout::from_size_align(1, 1).unwrap();
1989 let p = b.alloc_layout(layout);
1990 let q = (&b).realloc(p, layout, CAPACITY + 1).unwrap();
1991 assert!(q.as_ptr() as usize != p.as_ptr() as usize - CAPACITY);
1992 b = Bump::with_capacity(CAPACITY);
1993
1994 // `realloc` will allocate and copy when reallocating anything that
1995 // wasn't the last allocation.
1996 let layout = Layout::from_size_align(1, 1).unwrap();
1997 let p = b.alloc_layout(layout);
1998 let _ = b.alloc_layout(layout);
1999 let q = (&b).realloc(p, layout, 2).unwrap();
2000 assert!(q.as_ptr() as usize != p.as_ptr() as usize - 1);
2001 b.reset();
2002 }
2003 }
2004
2005 // Uses our private `alloc` module.
2006 #[test]
2007 fn invalid_read() {
2008 use alloc::Alloc;
2009
2010 let mut b = &Bump::new();
2011
2012 unsafe {
2013 let l1 = Layout::from_size_align(12000, 4).unwrap();
2014 let p1 = Alloc::alloc(&mut b, l1).unwrap();
2015
2016 let l2 = Layout::from_size_align(1000, 4).unwrap();
2017 Alloc::alloc(&mut b, l2).unwrap();
2018
2019 let p1 = b.realloc(p1, l1, 24000).unwrap();
2020 let l3 = Layout::from_size_align(24000, 4).unwrap();
2021 b.realloc(p1, l3, 48000).unwrap();
2022 }
2023 }
2024}
2025