1use core::alloc::Layout;
2use core::mem;
3use core::mem::{align_of, size_of};
4use core::ptr::null_mut;
5use core::ptr::NonNull;
6
7use crate::{align_down_size, align_up_size};
8
9use super::align_up;
10
11/// A sorted list of holes. It uses the the holes itself to store its nodes.
12pub struct HoleList {
13 pub(crate) first: Hole, // dummy
14 pub(crate) bottom: *mut u8,
15 pub(crate) top: *mut u8,
16 pub(crate) pending_extend: u8,
17}
18
19pub(crate) struct Cursor {
20 prev: NonNull<Hole>,
21 hole: NonNull<Hole>,
22 top: *mut u8,
23}
24
25/// A block containing free memory. It points to the next hole and thus forms a linked list.
26pub(crate) struct Hole {
27 pub size: usize,
28 pub next: Option<NonNull<Hole>>,
29}
30
31/// Basic information about a hole.
32#[derive(Debug, Clone, Copy)]
33struct HoleInfo {
34 addr: *mut u8,
35 size: usize,
36}
37
38impl Cursor {
39 fn next(mut self) -> Option<Self> {
40 unsafe {
41 self.hole.as_mut().next.map(|nhole| Cursor {
42 prev: self.hole,
43 hole: nhole,
44 top: self.top,
45 })
46 }
47 }
48
49 fn current(&self) -> &Hole {
50 unsafe { self.hole.as_ref() }
51 }
52
53 fn previous(&self) -> &Hole {
54 unsafe { self.prev.as_ref() }
55 }
56
57 // On success, it returns the new allocation, and the linked list has been updated
58 // to accomodate any new holes and allocation. On error, it returns the cursor
59 // unmodified, and has made no changes to the linked list of holes.
60 fn split_current(self, required_layout: Layout) -> Result<(*mut u8, usize), Self> {
61 let front_padding;
62 let alloc_ptr;
63 let alloc_size;
64 let back_padding;
65
66 // Here we create a scope, JUST to make sure that any created references do not
67 // live to the point where we start doing pointer surgery below.
68 {
69 let hole_size = self.current().size;
70 let hole_addr_u8 = self.hole.as_ptr().cast::<u8>();
71 let required_size = required_layout.size();
72 let required_align = required_layout.align();
73
74 // Quick check: If the new item is larger than the current hole, it's never gunna
75 // work. Go ahead and bail early to save ourselves some math.
76 if hole_size < required_size {
77 return Err(self);
78 }
79
80 // Attempt to fracture the current hole into the following parts:
81 // ([front_padding], allocation, [back_padding])
82 //
83 // The paddings are optional, and only placed if required.
84 //
85 // First, figure out if front padding is necessary. This would be necessary if the new
86 // allocation has a larger alignment requirement than the current hole, and we didn't get
87 // lucky that the current position was well-aligned enough for the new item.
88 let aligned_addr = if hole_addr_u8 == align_up(hole_addr_u8, required_align) {
89 // hole has already the required alignment, no front padding is needed.
90 front_padding = None;
91 hole_addr_u8
92 } else {
93 // Unfortunately, we did not get lucky. Instead: Push the "starting location" FORWARD the size
94 // of a hole node, to guarantee there is at least enough room for the hole header, and
95 // potentially additional space.
96 let new_start = hole_addr_u8.wrapping_add(HoleList::min_size());
97
98 let aligned_addr = align_up(new_start, required_align);
99 front_padding = Some(HoleInfo {
100 // Our new front padding will exist at the same location as the previous hole,
101 // it will just have a smaller size after we have chopped off the "tail" for
102 // the allocation.
103 addr: hole_addr_u8,
104 size: (aligned_addr as usize) - (hole_addr_u8 as usize),
105 });
106 aligned_addr
107 };
108
109 // Okay, now that we found space, we need to see if the decisions we just made
110 // ACTUALLY fit in the previous hole space
111 let allocation_end = aligned_addr.wrapping_add(required_size);
112 let hole_end = hole_addr_u8.wrapping_add(hole_size);
113
114 if allocation_end > hole_end {
115 // hole is too small
116 return Err(self);
117 }
118
119 // Yes! We have successfully placed our allocation as well.
120 alloc_ptr = aligned_addr;
121 alloc_size = required_size;
122
123 // Okay, time to move onto the back padding.
124 let back_padding_size = hole_end as usize - allocation_end as usize;
125 back_padding = if back_padding_size == 0 {
126 None
127 } else {
128 // NOTE: Because we always use `HoleList::align_layout`, the size of
129 // the new allocation is always "rounded up" to cover any partial gaps that
130 // would have occurred. For this reason, we DON'T need to "round up"
131 // to account for an unaligned hole spot.
132 let hole_layout = Layout::new::<Hole>();
133 let back_padding_start = align_up(allocation_end, hole_layout.align());
134 let back_padding_end = back_padding_start.wrapping_add(hole_layout.size());
135
136 // Will the proposed new back padding actually fit in the old hole slot?
137 if back_padding_end <= hole_end {
138 // Yes, it does! Place a back padding node
139 Some(HoleInfo {
140 addr: back_padding_start,
141 size: back_padding_size,
142 })
143 } else {
144 // No, it does not. We don't want to leak any heap bytes, so we
145 // consider this hole unsuitable for the requested allocation.
146 return Err(self);
147 }
148 };
149 }
150
151 ////////////////////////////////////////////////////////////////////////////
152 // This is where we actually perform surgery on the linked list.
153 ////////////////////////////////////////////////////////////////////////////
154 let Cursor {
155 mut prev, mut hole, ..
156 } = self;
157 // Remove the current location from the previous node
158 unsafe {
159 prev.as_mut().next = None;
160 }
161 // Take the next node out of our current node
162 let maybe_next_addr: Option<NonNull<Hole>> = unsafe { hole.as_mut().next.take() };
163
164 // As of now, the old `Hole` is no more. We are about to replace it with one or more of
165 // the front padding, the allocation, and the back padding.
166
167 match (front_padding, back_padding) {
168 (None, None) => {
169 // No padding at all, how lucky! We still need to connect the PREVIOUS node
170 // to the NEXT node, if there was one
171 unsafe {
172 prev.as_mut().next = maybe_next_addr;
173 }
174 }
175 (None, Some(singlepad)) | (Some(singlepad), None) => unsafe {
176 // We have front padding OR back padding, but not both.
177 //
178 // Replace the old node with the new single node. We need to stitch the new node
179 // into the linked list. Start by writing the padding into the proper location
180 let singlepad_ptr = singlepad.addr.cast::<Hole>();
181 singlepad_ptr.write(Hole {
182 size: singlepad.size,
183 // If the old hole had a next pointer, the single padding now takes
184 // "ownership" of that link
185 next: maybe_next_addr,
186 });
187
188 // Then connect the OLD previous to the NEW single padding
189 prev.as_mut().next = Some(NonNull::new_unchecked(singlepad_ptr));
190 },
191 (Some(frontpad), Some(backpad)) => unsafe {
192 // We have front padding AND back padding.
193 //
194 // We need to stich them together as two nodes where there used to
195 // only be one. Start with the back padding.
196 let backpad_ptr = backpad.addr.cast::<Hole>();
197 backpad_ptr.write(Hole {
198 size: backpad.size,
199 // If the old hole had a next pointer, the BACK padding now takes
200 // "ownership" of that link
201 next: maybe_next_addr,
202 });
203
204 // Now we emplace the front padding, and link it to both the back padding,
205 // and the old previous
206 let frontpad_ptr = frontpad.addr.cast::<Hole>();
207 frontpad_ptr.write(Hole {
208 size: frontpad.size,
209 // We now connect the FRONT padding to the BACK padding
210 next: Some(NonNull::new_unchecked(backpad_ptr)),
211 });
212
213 // Then connect the OLD previous to the NEW FRONT padding
214 prev.as_mut().next = Some(NonNull::new_unchecked(frontpad_ptr));
215 },
216 }
217
218 // Well that went swimmingly! Hand off the allocation, with surgery performed successfully!
219 Ok((alloc_ptr, alloc_size))
220 }
221}
222
223// See if we can extend this hole towards the end of the allocation region
224// If so: increase the size of the node. If no: keep the node as-is
225fn check_merge_top(mut node: NonNull<Hole>, top: *mut u8) {
226 let node_u8: *mut u8 = node.as_ptr().cast::<u8>();
227 let node_sz: usize = unsafe { node.as_ref().size };
228
229 // If this is the last node, we need to see if we need to merge to the end
230 let end: *mut u8 = node_u8.wrapping_add(count:node_sz);
231 let hole_layout: Layout = Layout::new::<Hole>();
232 if end < top {
233 let next_hole_end: *mut u8 = align_up(end, hole_layout.align()).wrapping_add(count:hole_layout.size());
234
235 if next_hole_end > top {
236 let offset: usize = (top as usize) - (end as usize);
237 unsafe {
238 node.as_mut().size += offset;
239 }
240 }
241 }
242}
243
244// See if we can scoot this hole back to the bottom of the allocation region
245// If so: create and return the new hole. If not: return the existing hole
246fn check_merge_bottom(node: NonNull<Hole>, bottom: *mut u8) -> NonNull<Hole> {
247 debug_assert_eq!(bottom as usize % align_of::<Hole>(), 0);
248
249 if bottom.wrapping_add(count:core::mem::size_of::<Hole>()) > node.as_ptr().cast::<u8>() {
250 let offset: usize = (node.as_ptr() as usize) - (bottom as usize);
251 let size: usize = unsafe { node.as_ref() }.size + offset;
252 unsafe { make_hole(addr:bottom, size) }
253 } else {
254 node
255 }
256}
257
258impl HoleList {
259 /// Creates an empty `HoleList`.
260 pub const fn empty() -> HoleList {
261 HoleList {
262 first: Hole {
263 size: 0,
264 next: None,
265 },
266 bottom: null_mut(),
267 top: null_mut(),
268 pending_extend: 0,
269 }
270 }
271
272 pub(crate) fn cursor(&mut self) -> Option<Cursor> {
273 if let Some(hole) = self.first.next {
274 Some(Cursor {
275 hole,
276 prev: NonNull::new(&mut self.first)?,
277 top: self.top,
278 })
279 } else {
280 None
281 }
282 }
283
284 #[cfg(any(test, fuzzing))]
285 #[allow(dead_code)]
286 pub(crate) fn debug(&mut self) {
287 if let Some(cursor) = self.cursor() {
288 let mut cursor = cursor;
289 loop {
290 println!(
291 "prev: {:?}[{}], hole: {:?}[{}]",
292 cursor.previous() as *const Hole,
293 cursor.previous().size,
294 cursor.current() as *const Hole,
295 cursor.current().size,
296 );
297 if let Some(c) = cursor.next() {
298 cursor = c;
299 } else {
300 println!("Done!");
301 return;
302 }
303 }
304 } else {
305 println!("No holes");
306 }
307 }
308
309 /// Creates a `HoleList` that contains the given hole.
310 ///
311 /// The `hole_addr` pointer is automatically aligned, so the `bottom`
312 /// field might be larger than the given `hole_addr`.
313 ///
314 /// The given `hole_size` must be large enough to store the required
315 /// metadata, otherwise this function will panic. Depending on the
316 /// alignment of the `hole_addr` pointer, the minimum size is between
317 /// `2 * size_of::<usize>` and `3 * size_of::<usize>`.
318 ///
319 /// The usable size for allocations will be truncated to the nearest
320 /// alignment of `align_of::<usize>`. Any extra bytes left at the end
321 /// will be reclaimed once sufficient additional space is given to
322 /// [`extend`][crate::Heap::extend].
323 ///
324 /// # Safety
325 ///
326 /// This function is unsafe because it creates a hole at the given `hole_addr`.
327 /// This can cause undefined behavior if this address is invalid or if memory from the
328 /// `[hole_addr, hole_addr+size)` range is used somewhere else.
329 pub unsafe fn new(hole_addr: *mut u8, hole_size: usize) -> HoleList {
330 assert_eq!(size_of::<Hole>(), Self::min_size());
331 assert!(hole_size >= size_of::<Hole>());
332
333 let aligned_hole_addr = align_up(hole_addr, align_of::<Hole>());
334 let requested_hole_size = hole_size - ((aligned_hole_addr as usize) - (hole_addr as usize));
335 let aligned_hole_size = align_down_size(requested_hole_size, align_of::<Hole>());
336 assert!(aligned_hole_size >= size_of::<Hole>());
337
338 let ptr = aligned_hole_addr as *mut Hole;
339 ptr.write(Hole {
340 size: aligned_hole_size,
341 next: None,
342 });
343
344 assert_eq!(
345 hole_addr.wrapping_add(hole_size),
346 aligned_hole_addr.wrapping_add(requested_hole_size)
347 );
348
349 HoleList {
350 first: Hole {
351 size: 0,
352 next: Some(NonNull::new_unchecked(ptr)),
353 },
354 bottom: aligned_hole_addr,
355 top: aligned_hole_addr.wrapping_add(aligned_hole_size),
356 pending_extend: (requested_hole_size - aligned_hole_size) as u8,
357 }
358 }
359
360 /// Aligns the given layout for use with `HoleList`.
361 ///
362 /// Returns a layout with size increased to fit at least `HoleList::min_size` and proper
363 /// alignment of a `Hole`.
364 ///
365 /// The [`allocate_first_fit`][HoleList::allocate_first_fit] and
366 /// [`deallocate`][HoleList::deallocate] methods perform the required alignment
367 /// themselves, so calling this function manually is not necessary.
368 pub fn align_layout(layout: Layout) -> Layout {
369 let mut size = layout.size();
370 if size < Self::min_size() {
371 size = Self::min_size();
372 }
373 let size = align_up_size(size, mem::align_of::<Hole>());
374 Layout::from_size_align(size, layout.align()).unwrap()
375 }
376
377 /// Searches the list for a big enough hole.
378 ///
379 /// A hole is big enough if it can hold an allocation of `layout.size()` bytes with
380 /// the given `layout.align()`. If such a hole is found in the list, a block of the
381 /// required size is allocated from it. Then the start address of that
382 /// block and the aligned layout are returned. The automatic layout alignment is required
383 /// because the `HoleList` has some additional layout requirements for each memory block.
384 ///
385 /// This function uses the “first fit” strategy, so it uses the first hole that is big
386 /// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations.
387 //
388 // NOTE: We could probably replace this with an `Option` instead of a `Result` in a later
389 // release to remove this clippy warning
390 #[allow(clippy::result_unit_err)]
391 pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<(NonNull<u8>, Layout), ()> {
392 let aligned_layout = Self::align_layout(layout);
393 let mut cursor = self.cursor().ok_or(())?;
394
395 loop {
396 match cursor.split_current(aligned_layout) {
397 Ok((ptr, _len)) => {
398 return Ok((NonNull::new(ptr).ok_or(())?, aligned_layout));
399 }
400 Err(curs) => {
401 cursor = curs.next().ok_or(())?;
402 }
403 }
404 }
405 }
406
407 /// Frees the allocation given by `ptr` and `layout`.
408 ///
409 /// This function walks the list and inserts the given block at the correct place. If the freed
410 /// block is adjacent to another free block, the blocks are merged again.
411 /// This operation is in `O(n)` since the list needs to be sorted by address.
412 ///
413 /// [`allocate_first_fit`]: HoleList::allocate_first_fit
414 ///
415 /// # Safety
416 ///
417 /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with
418 /// identical layout. Undefined behavior may occur for invalid arguments.
419 /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and
420 /// returns the aligned layout.
421 pub unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> Layout {
422 let aligned_layout = Self::align_layout(layout);
423 deallocate(self, ptr.as_ptr(), aligned_layout.size());
424 aligned_layout
425 }
426
427 /// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed.
428 pub fn min_size() -> usize {
429 size_of::<usize>() * 2
430 }
431
432 /// Returns information about the first hole for test purposes.
433 #[cfg(test)]
434 pub fn first_hole(&self) -> Option<(*const u8, usize)> {
435 self.first.next.as_ref().map(|hole| {
436 (hole.as_ptr() as *mut u8 as *const u8, unsafe {
437 hole.as_ref().size
438 })
439 })
440 }
441
442 pub(crate) unsafe fn extend(&mut self, by: usize) {
443 assert!(!self.top.is_null(), "tried to extend an empty heap");
444
445 let top = self.top;
446
447 let dead_space = top.align_offset(align_of::<Hole>());
448 debug_assert_eq!(
449 0, dead_space,
450 "dead space detected during extend: {} bytes. This means top was unaligned",
451 dead_space
452 );
453
454 debug_assert!(
455 (self.pending_extend as usize) < Self::min_size(),
456 "pending extend was larger than expected"
457 );
458
459 // join this extend request with any pending (but not yet acted on) extension
460 let extend_by = self.pending_extend as usize + by;
461
462 let minimum_extend = Self::min_size();
463 if extend_by < minimum_extend {
464 self.pending_extend = extend_by as u8;
465 return;
466 }
467
468 // only extend up to another valid boundary
469 let new_hole_size = align_down_size(extend_by, align_of::<Hole>());
470 let layout = Layout::from_size_align(new_hole_size, 1).unwrap();
471
472 // instantiate the hole by forcing a deallocation on the new memory
473 self.deallocate(NonNull::new_unchecked(top as *mut u8), layout);
474 self.top = top.add(new_hole_size);
475
476 // save extra bytes given to extend that weren't aligned to the hole size
477 self.pending_extend = (extend_by - new_hole_size) as u8;
478 }
479}
480
481unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull<Hole> {
482 let hole_addr: *mut Hole = addr.cast::<Hole>();
483 debug_assert_eq!(
484 addr as usize % align_of::<Hole>(),
485 0,
486 "Hole address not aligned!",
487 );
488 hole_addr.write(val:Hole { size, next: None });
489 NonNull::new_unchecked(ptr:hole_addr)
490}
491
492impl Cursor {
493 fn try_insert_back(self, node: NonNull<Hole>, bottom: *mut u8) -> Result<Self, Self> {
494 // Covers the case where the new hole exists BEFORE the current pointer,
495 // which only happens when previous is the stub pointer
496 if node < self.hole {
497 let node_u8 = node.as_ptr().cast::<u8>();
498 let node_size = unsafe { node.as_ref().size };
499 let hole_u8 = self.hole.as_ptr().cast::<u8>();
500
501 assert!(
502 node_u8.wrapping_add(node_size) <= hole_u8,
503 "Freed node aliases existing hole! Bad free?",
504 );
505 debug_assert_eq!(self.previous().size, 0);
506
507 let Cursor {
508 mut prev,
509 hole,
510 top,
511 } = self;
512 unsafe {
513 let mut node = check_merge_bottom(node, bottom);
514 prev.as_mut().next = Some(node);
515 node.as_mut().next = Some(hole);
516 }
517 Ok(Cursor {
518 prev,
519 hole: node,
520 top,
521 })
522 } else {
523 Err(self)
524 }
525 }
526
527 fn try_insert_after(&mut self, mut node: NonNull<Hole>) -> Result<(), ()> {
528 let node_u8 = node.as_ptr().cast::<u8>();
529 let node_size = unsafe { node.as_ref().size };
530
531 // If we have a next, does the node overlap next?
532 if let Some(next) = self.current().next.as_ref() {
533 if node < *next {
534 let node_u8 = node_u8 as *const u8;
535 assert!(
536 node_u8.wrapping_add(node_size) <= next.as_ptr().cast::<u8>(),
537 "Freed node aliases existing hole! Bad free?",
538 );
539 } else {
540 // The new hole isn't between current and next.
541 return Err(());
542 }
543 }
544
545 // At this point, we either have no "next" pointer, or the hole is
546 // between current and "next". The following assert can only trigger
547 // if we've gotten our list out of order.
548 debug_assert!(self.hole < node, "Hole list out of order?");
549
550 let hole_u8 = self.hole.as_ptr().cast::<u8>();
551 let hole_size = self.current().size;
552
553 // Does hole overlap node?
554 assert!(
555 hole_u8.wrapping_add(hole_size) <= node_u8,
556 "Freed node ({:?}) aliases existing hole ({:?}[{}])! Bad free?",
557 node_u8,
558 hole_u8,
559 hole_size,
560 );
561
562 // All good! Let's insert that after.
563 unsafe {
564 let maybe_next = self.hole.as_mut().next.replace(node);
565 node.as_mut().next = maybe_next;
566 }
567
568 Ok(())
569 }
570
571 // Merge the current node with up to n following nodes
572 fn try_merge_next_n(self, max: usize) {
573 let Cursor {
574 prev: _,
575 mut hole,
576 top,
577 ..
578 } = self;
579
580 for _ in 0..max {
581 // Is there a next node?
582 let mut next = if let Some(next) = unsafe { hole.as_mut() }.next.as_ref() {
583 *next
584 } else {
585 // Since there is no NEXT node, we need to check whether the current
586 // hole SHOULD extend to the end, but doesn't. This would happen when
587 // there isn't enough remaining space to place a hole after the current
588 // node's placement.
589 check_merge_top(hole, top);
590 return;
591 };
592
593 // Can we directly merge these? e.g. are they touching?
594 //
595 // NOTE: Because we always use `HoleList::align_layout`, the size of
596 // the new hole is always "rounded up" to cover any partial gaps that
597 // would have occurred. For this reason, we DON'T need to "round up"
598 // to account for an unaligned hole spot.
599 let hole_u8 = hole.as_ptr().cast::<u8>();
600 let hole_sz = unsafe { hole.as_ref().size };
601 let next_u8 = next.as_ptr().cast::<u8>();
602 let end = hole_u8.wrapping_add(hole_sz);
603
604 let touching = end == next_u8;
605
606 if touching {
607 let next_sz;
608 let next_next;
609 unsafe {
610 let next_mut = next.as_mut();
611 next_sz = next_mut.size;
612 next_next = next_mut.next.take();
613 }
614 unsafe {
615 let hole_mut = hole.as_mut();
616 hole_mut.next = next_next;
617 hole_mut.size += next_sz;
618 }
619 // Okay, we just merged the next item. DON'T move the cursor, as we can
620 // just try to merge the next_next, which is now our next.
621 } else {
622 // Welp, not touching, can't merge. Move to the next node.
623 hole = next;
624 }
625 }
626 }
627}
628
629/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to
630/// find the correct place (the list is sorted by address).
631fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) {
632 // Start off by just making this allocation a hole where it stands.
633 // We'll attempt to merge it with other nodes once we figure out where
634 // it should live
635 let hole = unsafe { make_hole(addr, size) };
636
637 // Now, try to get a cursor to the list - this only works if we have at least
638 // one non-"dummy" hole in the list
639 let cursor = if let Some(cursor) = list.cursor() {
640 cursor
641 } else {
642 // Oh hey, there are no "real" holes at all. That means this just
643 // becomes the only "real" hole! Check if this is touching the end
644 // or the beginning of the allocation range
645 let hole = check_merge_bottom(hole, list.bottom);
646 check_merge_top(hole, list.top);
647 list.first.next = Some(hole);
648 return;
649 };
650
651 // First, check if we can just insert this node at the top of the list. If the
652 // insertion succeeded, then our cursor now points to the NEW node, behind the
653 // previous location the cursor was pointing to.
654 //
655 // Otherwise, our cursor will point at the current non-"dummy" head of the list
656 let (cursor, n) = match cursor.try_insert_back(hole, list.bottom) {
657 Ok(cursor) => {
658 // Yup! It lives at the front of the list. Hooray! Attempt to merge
659 // it with just ONE next node, since it is at the front of the list
660 (cursor, 1)
661 }
662 Err(mut cursor) => {
663 // Nope. It lives somewhere else. Advance the list until we find its home
664 while let Err(()) = cursor.try_insert_after(hole) {
665 cursor = cursor
666 .next()
667 .expect("Reached end of holes without finding deallocation hole!");
668 }
669 // Great! We found a home for it, our cursor is now JUST BEFORE the new
670 // node we inserted, so we need to try to merge up to twice: One to combine
671 // the current node to the new node, then once more to combine the new node
672 // with the node after that.
673 (cursor, 2)
674 }
675 };
676
677 // We now need to merge up to two times to combine the current node with the next
678 // two nodes.
679 cursor.try_merge_next_n(n);
680}
681
682#[cfg(test)]
683pub mod test {
684 use super::HoleList;
685 use crate::{align_down_size, test::new_heap};
686 use core::mem::size_of;
687 use std::{alloc::Layout, convert::TryInto, prelude::v1::*, ptr::NonNull};
688
689 #[test]
690 fn cursor() {
691 let mut heap = new_heap();
692 let curs = heap.holes.cursor().unwrap();
693 // This is the "dummy" node
694 assert_eq!(curs.previous().size, 0);
695 // This is the "full" heap
696 assert_eq!(
697 curs.current().size,
698 align_down_size(1000, size_of::<usize>())
699 );
700 // There is no other hole
701 assert!(curs.next().is_none());
702 }
703
704 #[test]
705 fn aff() {
706 let mut heap = new_heap();
707 let reqd = Layout::from_size_align(256, 1).unwrap();
708 let _ = heap.allocate_first_fit(reqd).unwrap();
709 }
710
711 /// Tests `HoleList::new` with the minimal allowed `hole_size`.
712 #[test]
713 fn hole_list_new_min_size() {
714 // define an array of `u64` instead of `u8` for alignment
715 static mut HEAP: [u64; 2] = [0; 2];
716 let heap_start = unsafe { HEAP.as_ptr() as usize };
717 let heap =
718 unsafe { HoleList::new(HEAP.as_mut_ptr().cast(), 2 * core::mem::size_of::<usize>()) };
719 assert_eq!(heap.bottom as usize, heap_start);
720 assert_eq!(heap.top as usize, heap_start + 2 * size_of::<usize>());
721 assert_eq!(heap.first.size, 0); // dummy
722 assert_eq!(
723 heap.first.next,
724 Some(NonNull::new(heap.bottom.cast())).unwrap()
725 );
726 assert_eq!(
727 unsafe { heap.first.next.as_ref().unwrap().as_ref() }.size,
728 2 * core::mem::size_of::<usize>()
729 );
730 assert_eq!(unsafe { &*(heap.first.next.unwrap().as_ptr()) }.next, None);
731 }
732
733 /// Tests that `HoleList::new` aligns the `hole_addr` correctly and adjusts the size
734 /// accordingly.
735 #[test]
736 fn hole_list_new_align() {
737 // define an array of `u64` instead of `u8` for alignment
738 static mut HEAP: [u64; 3] = [0; 3];
739
740 let heap_start: *mut u8 = unsafe { HEAP.as_mut_ptr().add(1) }.cast();
741 // initialize the HoleList with a hole_addr one byte before `heap_start`
742 // -> the function should align it up to `heap_start`
743 let heap =
744 unsafe { HoleList::new(heap_start.sub(1), 2 * core::mem::size_of::<usize>() + 1) };
745 assert_eq!(heap.bottom, heap_start);
746 assert_eq!(heap.top.cast(), unsafe {
747 // one byte less than the `hole_size` given to `new` because of alignment
748 heap_start.add(2 * core::mem::size_of::<usize>())
749 });
750
751 assert_eq!(heap.first.size, 0); // dummy
752 assert_eq!(
753 heap.first.next,
754 Some(NonNull::new(heap.bottom.cast())).unwrap()
755 );
756 assert_eq!(
757 unsafe { &*(heap.first.next.unwrap().as_ptr()) }.size,
758 unsafe { heap.top.offset_from(heap.bottom) }
759 .try_into()
760 .unwrap()
761 );
762 assert_eq!(unsafe { &*(heap.first.next.unwrap().as_ptr()) }.next, None);
763 }
764
765 #[test]
766 #[should_panic]
767 fn hole_list_new_too_small() {
768 // define an array of `u64` instead of `u8` for alignment
769 static mut HEAP: [u64; 3] = [0; 3];
770
771 let heap_start: *mut u8 = unsafe { HEAP.as_mut_ptr().add(1) }.cast();
772 // initialize the HoleList with a hole_addr one byte before `heap_start`
773 // -> the function should align it up to `heap_start`, but then the
774 // available size is too small to store a hole -> it should panic
775 unsafe { HoleList::new(heap_start.sub(1), 2 * core::mem::size_of::<usize>()) };
776 }
777
778 #[test]
779 #[should_panic]
780 fn extend_empty() {
781 unsafe { HoleList::empty().extend(16) };
782 }
783}
784