| 1 | //===-- Implementation header for a block of memory -------------*- C++ -*-===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #ifndef LLVM_LIBC_SRC___SUPPORT_BLOCK_H |
| 10 | #define LLVM_LIBC_SRC___SUPPORT_BLOCK_H |
| 11 | |
| 12 | #include "src/__support/CPP/algorithm.h" |
| 13 | #include "src/__support/CPP/cstddef.h" |
| 14 | #include "src/__support/CPP/limits.h" |
| 15 | #include "src/__support/CPP/new.h" |
| 16 | #include "src/__support/CPP/optional.h" |
| 17 | #include "src/__support/CPP/span.h" |
| 18 | #include "src/__support/CPP/type_traits.h" |
| 19 | #include "src/__support/libc_assert.h" |
| 20 | #include "src/__support/macros/config.h" |
| 21 | #include "src/__support/math_extras.h" |
| 22 | |
| 23 | #include <stdint.h> |
| 24 | |
| 25 | namespace LIBC_NAMESPACE_DECL { |
| 26 | |
| 27 | /// Returns the value rounded down to the nearest multiple of alignment. |
| 28 | LIBC_INLINE constexpr size_t align_down(size_t value, size_t alignment) { |
| 29 | // Note this shouldn't overflow since the result will always be <= value. |
| 30 | return (value / alignment) * alignment; |
| 31 | } |
| 32 | |
| 33 | /// Returns the value rounded up to the nearest multiple of alignment. May wrap |
| 34 | /// around. |
| 35 | LIBC_INLINE constexpr size_t align_up(size_t value, size_t alignment) { |
| 36 | return align_down(value + alignment - 1, alignment); |
| 37 | } |
| 38 | |
| 39 | using ByteSpan = cpp::span<LIBC_NAMESPACE::cpp::byte>; |
| 40 | using cpp::optional; |
| 41 | |
| 42 | /// Memory region with links to adjacent blocks. |
| 43 | /// |
| 44 | /// The blocks store their offsets to the previous and next blocks. The latter |
| 45 | /// is also the block's size. |
| 46 | /// |
| 47 | /// All blocks have their usable space aligned to some multiple of max_align_t. |
| 48 | /// This also implies that block outer sizes are aligned to max_align_t. |
| 49 | /// |
| 50 | /// As an example, the diagram below represents two contiguous `Block`s. The |
| 51 | /// indices indicate byte offsets: |
| 52 | /// |
| 53 | /// @code{.unparsed} |
| 54 | /// Block 1: |
| 55 | /// +---------------------+--------------+ |
| 56 | /// | Header | Usable space | |
| 57 | /// +----------+----------+--------------+ |
| 58 | /// | prev | next | | |
| 59 | /// | 0......3 | 4......7 | 8........227 | |
| 60 | /// | 00000000 | 00000230 | <app data> | |
| 61 | /// +----------+----------+--------------+ |
| 62 | /// Block 2: |
| 63 | /// +---------------------+--------------+ |
| 64 | /// | Header | Usable space | |
| 65 | /// +----------+----------+--------------+ |
| 66 | /// | prev | next | | |
| 67 | /// | 0......3 | 4......7 | 8........827 | |
| 68 | /// | 00000230 | 00000830 | f7f7....f7f7 | |
| 69 | /// +----------+----------+--------------+ |
| 70 | /// @endcode |
| 71 | /// |
| 72 | /// As a space optimization, when a block is allocated, it consumes the prev |
| 73 | /// field of the following block: |
| 74 | /// |
| 75 | /// Block 1 (used): |
| 76 | /// +---------------------+--------------+ |
| 77 | /// | Header | Usable space | |
| 78 | /// +----------+----------+--------------+ |
| 79 | /// | prev | next | | |
| 80 | /// | 0......3 | 4......7 | 8........230 | |
| 81 | /// | 00000000 | 00000230 | <app data> | |
| 82 | /// +----------+----------+--------------+ |
| 83 | /// Block 2: |
| 84 | /// +---------------------+--------------+ |
| 85 | /// | B1 | Header | Usable space | |
| 86 | /// +----------+----------+--------------+ |
| 87 | /// | | next | | |
| 88 | /// | 0......3 | 4......7 | 8........827 | |
| 89 | /// | xxxxxxxx | 00000830 | f7f7....f7f7 | |
| 90 | /// +----------+----------+--------------+ |
| 91 | /// |
| 92 | /// The next offset of a block matches the previous offset of its next block. |
| 93 | /// The first block in a list is denoted by having a previous offset of `0`. |
| 94 | class Block { |
| 95 | // Masks for the contents of the next_ field. |
| 96 | static constexpr size_t PREV_FREE_MASK = 1 << 0; |
| 97 | static constexpr size_t LAST_MASK = 1 << 1; |
| 98 | static constexpr size_t SIZE_MASK = ~(PREV_FREE_MASK | LAST_MASK); |
| 99 | |
| 100 | public: |
| 101 | // No copy or move. |
| 102 | Block(const Block &other) = delete; |
| 103 | Block &operator=(const Block &other) = delete; |
| 104 | |
| 105 | /// Initializes a given memory region into a first block and a sentinel last |
| 106 | /// block. Returns the first block, which has its usable space aligned to |
| 107 | /// max_align_t. |
| 108 | static optional<Block *> init(ByteSpan region); |
| 109 | |
| 110 | /// @returns A pointer to a `Block`, given a pointer to the start of the |
| 111 | /// usable space inside the block. |
| 112 | /// |
| 113 | /// This is the inverse of `usable_space()`. |
| 114 | /// |
| 115 | /// @warning This method does not do any checking; passing a random |
| 116 | /// pointer will return a non-null pointer. |
| 117 | LIBC_INLINE static Block *from_usable_space(void *usable_space) { |
| 118 | auto *bytes = reinterpret_cast<cpp::byte *>(usable_space); |
| 119 | return reinterpret_cast<Block *>(bytes - sizeof(Block)); |
| 120 | } |
| 121 | LIBC_INLINE static const Block *from_usable_space(const void *usable_space) { |
| 122 | const auto *bytes = reinterpret_cast<const cpp::byte *>(usable_space); |
| 123 | return reinterpret_cast<const Block *>(bytes - sizeof(Block)); |
| 124 | } |
| 125 | |
| 126 | /// @returns The total size of the block in bytes, including the header. |
| 127 | LIBC_INLINE size_t outer_size() const { return next_ & SIZE_MASK; } |
| 128 | |
| 129 | LIBC_INLINE static size_t outer_size(size_t inner_size) { |
| 130 | // The usable region includes the prev_ field of the next block. |
| 131 | return inner_size - sizeof(prev_) + sizeof(Block); |
| 132 | } |
| 133 | |
| 134 | /// @returns The number of usable bytes inside the block were it to be |
| 135 | /// allocated. |
| 136 | LIBC_INLINE size_t inner_size() const { |
| 137 | if (!next()) |
| 138 | return 0; |
| 139 | return inner_size(outer_size()); |
| 140 | } |
| 141 | |
| 142 | /// @returns The number of usable bytes inside a block with the given outer |
| 143 | /// size were it to be allocated. |
| 144 | LIBC_INLINE static size_t inner_size(size_t outer_size) { |
| 145 | // The usable region includes the prev_ field of the next block. |
| 146 | return inner_size_free(outer_size) + sizeof(prev_); |
| 147 | } |
| 148 | |
| 149 | /// @returns The number of usable bytes inside the block if it remains free. |
| 150 | LIBC_INLINE size_t inner_size_free() const { |
| 151 | if (!next()) |
| 152 | return 0; |
| 153 | return inner_size_free(outer_size()); |
| 154 | } |
| 155 | |
| 156 | /// @returns The number of usable bytes inside a block with the given outer |
| 157 | /// size if it remains free. |
| 158 | LIBC_INLINE static size_t inner_size_free(size_t outer_size) { |
| 159 | return outer_size - sizeof(Block); |
| 160 | } |
| 161 | |
| 162 | /// @returns A pointer to the usable space inside this block. |
| 163 | /// |
| 164 | /// Aligned to some multiple of max_align_t. |
| 165 | LIBC_INLINE cpp::byte *usable_space() { |
| 166 | auto *s = reinterpret_cast<cpp::byte *>(this) + sizeof(Block); |
| 167 | LIBC_ASSERT(reinterpret_cast<uintptr_t>(s) % alignof(max_align_t) == 0 && |
| 168 | "usable space must be aligned to a multiple of max_align_t" ); |
| 169 | return s; |
| 170 | } |
| 171 | LIBC_INLINE const cpp::byte *usable_space() const { |
| 172 | const auto *s = reinterpret_cast<const cpp::byte *>(this) + sizeof(Block); |
| 173 | LIBC_ASSERT(reinterpret_cast<uintptr_t>(s) % alignof(max_align_t) == 0 && |
| 174 | "usable space must be aligned to a multiple of max_align_t" ); |
| 175 | return s; |
| 176 | } |
| 177 | |
| 178 | // @returns The region of memory the block manages, including the header. |
| 179 | LIBC_INLINE ByteSpan region() { |
| 180 | return {reinterpret_cast<cpp::byte *>(this), outer_size()}; |
| 181 | } |
| 182 | |
| 183 | /// Attempts to split this block. |
| 184 | /// |
| 185 | /// If successful, the block will have an inner size of at least |
| 186 | /// `new_inner_size`. The remaining space will be returned as a new block, |
| 187 | /// with usable space aligned to `usable_space_alignment`. Note that the prev_ |
| 188 | /// field of the next block counts as part of the inner size of the block. |
| 189 | /// `usable_space_alignment` must be a multiple of max_align_t. |
| 190 | optional<Block *> split(size_t new_inner_size, |
| 191 | size_t usable_space_alignment = alignof(max_align_t)); |
| 192 | |
| 193 | /// Merges this block with the one that comes after it. |
| 194 | bool merge_next(); |
| 195 | |
| 196 | /// @returns The block immediately after this one, or a null pointer if this |
| 197 | /// is the last block. |
| 198 | LIBC_INLINE Block *next() const { |
| 199 | if (next_ & LAST_MASK) |
| 200 | return nullptr; |
| 201 | return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) + |
| 202 | outer_size()); |
| 203 | } |
| 204 | |
| 205 | /// @returns The free block immediately before this one, otherwise nullptr. |
| 206 | LIBC_INLINE Block *prev_free() const { |
| 207 | if (!(next_ & PREV_FREE_MASK)) |
| 208 | return nullptr; |
| 209 | return reinterpret_cast<Block *>(reinterpret_cast<uintptr_t>(this) - prev_); |
| 210 | } |
| 211 | |
| 212 | /// @returns Whether the block is unavailable for allocation. |
| 213 | LIBC_INLINE bool used() const { return !next() || !next()->prev_free(); } |
| 214 | |
| 215 | /// Marks this block as in use. |
| 216 | LIBC_INLINE void mark_used() { |
| 217 | LIBC_ASSERT(next() && "last block is always considered used" ); |
| 218 | next()->next_ &= ~PREV_FREE_MASK; |
| 219 | } |
| 220 | |
| 221 | /// Marks this block as free. |
| 222 | LIBC_INLINE void mark_free() { |
| 223 | LIBC_ASSERT(next() && "last block is always considered used" ); |
| 224 | next()->next_ |= PREV_FREE_MASK; |
| 225 | // The next block's prev_ field becomes alive, as it is no longer part of |
| 226 | // this block's used space. |
| 227 | *new (&next()->prev_) size_t = outer_size(); |
| 228 | } |
| 229 | |
| 230 | LIBC_INLINE Block(size_t outer_size, bool is_last) : next_(outer_size) { |
| 231 | // Last blocks are not usable, so they need not have sizes aligned to |
| 232 | // max_align_t. Their lower bits must still be free, so they must be aligned |
| 233 | // to Block. |
| 234 | LIBC_ASSERT( |
| 235 | outer_size % (is_last ? alignof(Block) : alignof(max_align_t)) == 0 && |
| 236 | "block sizes must be aligned" ); |
| 237 | LIBC_ASSERT(is_usable_space_aligned(alignof(max_align_t)) && |
| 238 | "usable space must be aligned to a multiple of max_align_t" ); |
| 239 | if (is_last) |
| 240 | next_ |= LAST_MASK; |
| 241 | } |
| 242 | |
| 243 | LIBC_INLINE bool is_usable_space_aligned(size_t alignment) const { |
| 244 | return reinterpret_cast<uintptr_t>(usable_space()) % alignment == 0; |
| 245 | } |
| 246 | |
| 247 | // Returns the minimum inner size necessary for a block of that size to |
| 248 | // always be able to allocate at the given size and alignment. |
| 249 | // |
| 250 | // Returns 0 if there is no such size. |
| 251 | LIBC_INLINE static size_t min_size_for_allocation(size_t alignment, |
| 252 | size_t size) { |
| 253 | LIBC_ASSERT(alignment >= alignof(max_align_t) && |
| 254 | alignment % alignof(max_align_t) == 0 && |
| 255 | "alignment must be multiple of max_align_t" ); |
| 256 | |
| 257 | if (alignment == alignof(max_align_t)) |
| 258 | return size; |
| 259 | |
| 260 | // We must create a new block inside this one (splitting). This requires a |
| 261 | // block header in addition to the requested size. |
| 262 | if (add_overflow(size, sizeof(Block), size)) |
| 263 | return 0; |
| 264 | |
| 265 | // Beyond that, padding space may need to remain in this block to ensure |
| 266 | // that the usable space of the next block is aligned. |
| 267 | // |
| 268 | // Consider a position P of some lesser alignment, L, with maximal distance |
| 269 | // to the next position of some greater alignment, G, where G is a multiple |
| 270 | // of L. P must be one L unit past a G-aligned point. If it were one L-unit |
| 271 | // earlier, its distance would be zero. If it were one L-unit later, its |
| 272 | // distance would not be maximal. If it were not some integral number of L |
| 273 | // units away, it would not be L-aligned. |
| 274 | // |
| 275 | // So the maximum distance would be G - L. As a special case, if L is 1 |
| 276 | // (unaligned), the max distance is G - 1. |
| 277 | // |
| 278 | // This block's usable space is aligned to max_align_t >= Block. With zero |
| 279 | // padding, the next block's usable space is sizeof(Block) past it, which is |
| 280 | // a point aligned to Block. Thus the max padding needed is alignment - |
| 281 | // alignof(Block). |
| 282 | if (add_overflow(size, alignment - alignof(Block), size)) |
| 283 | return 0; |
| 284 | return size; |
| 285 | } |
| 286 | |
| 287 | // This is the return type for `allocate` which can split one block into up to |
| 288 | // three blocks. |
| 289 | struct BlockInfo { |
| 290 | // This is the newly aligned block. It will have the alignment requested by |
| 291 | // a call to `allocate` and at most `size`. |
| 292 | Block *block; |
| 293 | |
| 294 | // If the usable_space in the new block was not aligned according to the |
| 295 | // `alignment` parameter, we will need to split into this block and the |
| 296 | // `block` to ensure `block` is properly aligned. In this case, `prev` will |
| 297 | // be a pointer to this new "padding" block. `prev` will be nullptr if no |
| 298 | // new block was created or we were able to merge the block before the |
| 299 | // original block with the "padding" block. |
| 300 | Block *prev; |
| 301 | |
| 302 | // This is the remainder of the next block after splitting the `block` |
| 303 | // according to `size`. This can happen if there's enough space after the |
| 304 | // `block`. |
| 305 | Block *next; |
| 306 | }; |
| 307 | |
| 308 | // Divide a block into up to 3 blocks according to `BlockInfo`. Behavior is |
| 309 | // undefined if allocation is not possible for the given size and alignment. |
| 310 | static BlockInfo allocate(Block *block, size_t alignment, size_t size); |
| 311 | |
| 312 | // These two functions may wrap around. |
| 313 | LIBC_INLINE static uintptr_t next_possible_block_start( |
| 314 | uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) { |
| 315 | return align_up(ptr + sizeof(Block), usable_space_alignment) - |
| 316 | sizeof(Block); |
| 317 | } |
| 318 | LIBC_INLINE static uintptr_t prev_possible_block_start( |
| 319 | uintptr_t ptr, size_t usable_space_alignment = alignof(max_align_t)) { |
| 320 | return align_down(ptr, usable_space_alignment) - sizeof(Block); |
| 321 | } |
| 322 | |
| 323 | private: |
| 324 | /// Construct a block to represent a span of bytes. Overwrites only enough |
| 325 | /// memory for the block header; the rest of the span is left alone. |
| 326 | LIBC_INLINE static Block *as_block(ByteSpan bytes) { |
| 327 | LIBC_ASSERT(reinterpret_cast<uintptr_t>(bytes.data()) % alignof(Block) == |
| 328 | 0 && |
| 329 | "block start must be suitably aligned" ); |
| 330 | return ::new (bytes.data()) Block(bytes.size(), /*is_last=*/false); |
| 331 | } |
| 332 | |
| 333 | LIBC_INLINE static void make_last_block(cpp::byte *start) { |
| 334 | LIBC_ASSERT(reinterpret_cast<uintptr_t>(start) % alignof(Block) == 0 && |
| 335 | "block start must be suitably aligned" ); |
| 336 | ::new (start) Block(sizeof(Block), /*is_last=*/true); |
| 337 | } |
| 338 | |
| 339 | /// Offset from this block to the previous block. 0 if this is the first |
| 340 | /// block. This field is only alive when the previous block is free; |
| 341 | /// otherwise, its memory is reused as part of the previous block's usable |
| 342 | /// space. |
| 343 | size_t prev_ = 0; |
| 344 | |
| 345 | /// Offset from this block to the next block. Valid even if this is the last |
| 346 | /// block, since it equals the size of the block. |
| 347 | size_t next_ = 0; |
| 348 | |
| 349 | /// Information about the current state of the block is stored in the two low |
| 350 | /// order bits of the next_ value. These are guaranteed free by a minimum |
| 351 | /// alignment (and thus, alignment of the size) of 4. The lowest bit is the |
| 352 | /// `prev_free` flag, and the other bit is the `last` flag. |
| 353 | /// |
| 354 | /// * If the `prev_free` flag is set, the block isn't the first and the |
| 355 | /// previous block is free. |
| 356 | /// * If the `last` flag is set, the block is the sentinel last block. It is |
| 357 | /// summarily considered used and has no next block. |
| 358 | |
| 359 | public: |
| 360 | /// Only for testing. |
| 361 | static constexpr size_t PREV_FIELD_SIZE = sizeof(prev_); |
| 362 | }; |
| 363 | |
| 364 | static_assert(alignof(Block) >= 4, |
| 365 | "at least 2 bits must be available in block sizes for flags" ); |
| 366 | |
| 367 | LIBC_INLINE |
| 368 | optional<Block *> Block::init(ByteSpan region) { |
| 369 | if (!region.data()) |
| 370 | return {}; |
| 371 | |
| 372 | uintptr_t start = reinterpret_cast<uintptr_t>(region.data()); |
| 373 | uintptr_t end = start + region.size(); |
| 374 | if (end < start) |
| 375 | return {}; |
| 376 | |
| 377 | uintptr_t block_start = next_possible_block_start(start); |
| 378 | if (block_start < start) |
| 379 | return {}; |
| 380 | |
| 381 | uintptr_t last_start = prev_possible_block_start(end); |
| 382 | if (last_start >= end) |
| 383 | return {}; |
| 384 | |
| 385 | if (block_start + sizeof(Block) > last_start) |
| 386 | return {}; |
| 387 | |
| 388 | auto *last_start_ptr = reinterpret_cast<cpp::byte *>(last_start); |
| 389 | Block *block = |
| 390 | as_block({reinterpret_cast<cpp::byte *>(block_start), last_start_ptr}); |
| 391 | make_last_block(last_start_ptr); |
| 392 | block->mark_free(); |
| 393 | return block; |
| 394 | } |
| 395 | |
| 396 | LIBC_INLINE |
| 397 | Block::BlockInfo Block::allocate(Block *block, size_t alignment, size_t size) { |
| 398 | LIBC_ASSERT(alignment % alignof(max_align_t) == 0 && |
| 399 | "alignment must be a multiple of max_align_t" ); |
| 400 | |
| 401 | BlockInfo info{block, /*prev=*/nullptr, /*next=*/nullptr}; |
| 402 | |
| 403 | if (!info.block->is_usable_space_aligned(alignment)) { |
| 404 | Block *original = info.block; |
| 405 | // The padding block has no minimum size requirement. |
| 406 | optional<Block *> maybe_aligned_block = original->split(0, alignment); |
| 407 | LIBC_ASSERT(maybe_aligned_block.has_value() && |
| 408 | "it should always be possible to split for alignment" ); |
| 409 | |
| 410 | if (Block *prev = original->prev_free()) { |
| 411 | // If there is a free block before this, we can merge the current one with |
| 412 | // the newly created one. |
| 413 | prev->merge_next(); |
| 414 | } else { |
| 415 | info.prev = original; |
| 416 | } |
| 417 | |
| 418 | Block *aligned_block = *maybe_aligned_block; |
| 419 | LIBC_ASSERT(aligned_block->is_usable_space_aligned(alignment) && |
| 420 | "The aligned block isn't aligned somehow." ); |
| 421 | info.block = aligned_block; |
| 422 | } |
| 423 | |
| 424 | // Now get a block for the requested size. |
| 425 | if (optional<Block *> next = info.block->split(size)) |
| 426 | info.next = *next; |
| 427 | |
| 428 | return info; |
| 429 | } |
| 430 | |
| 431 | LIBC_INLINE |
| 432 | optional<Block *> Block::split(size_t new_inner_size, |
| 433 | size_t usable_space_alignment) { |
| 434 | LIBC_ASSERT(usable_space_alignment % alignof(max_align_t) == 0 && |
| 435 | "alignment must be a multiple of max_align_t" ); |
| 436 | if (used()) |
| 437 | return {}; |
| 438 | |
| 439 | // Compute the minimum outer size that produces a block of at least |
| 440 | // `new_inner_size`. |
| 441 | size_t min_outer_size = outer_size(cpp::max(new_inner_size, sizeof(prev_))); |
| 442 | |
| 443 | uintptr_t start = reinterpret_cast<uintptr_t>(this); |
| 444 | uintptr_t next_block_start = |
| 445 | next_possible_block_start(start + min_outer_size, usable_space_alignment); |
| 446 | if (next_block_start < start) |
| 447 | return {}; |
| 448 | size_t new_outer_size = next_block_start - start; |
| 449 | LIBC_ASSERT(new_outer_size % alignof(max_align_t) == 0 && |
| 450 | "new size must be aligned to max_align_t" ); |
| 451 | |
| 452 | if (outer_size() < new_outer_size || |
| 453 | outer_size() - new_outer_size < sizeof(Block)) |
| 454 | return {}; |
| 455 | |
| 456 | ByteSpan new_region = region().subspan(new_outer_size); |
| 457 | next_ &= ~SIZE_MASK; |
| 458 | next_ |= new_outer_size; |
| 459 | |
| 460 | Block *new_block = as_block(new_region); |
| 461 | mark_free(); // Free status for this block is now stored in new_block. |
| 462 | new_block->next()->prev_ = new_region.size(); |
| 463 | |
| 464 | LIBC_ASSERT(new_block->is_usable_space_aligned(usable_space_alignment) && |
| 465 | "usable space must have requested alignment" ); |
| 466 | return new_block; |
| 467 | } |
| 468 | |
| 469 | LIBC_INLINE |
| 470 | bool Block::merge_next() { |
| 471 | if (used() || next()->used()) |
| 472 | return false; |
| 473 | size_t new_size = outer_size() + next()->outer_size(); |
| 474 | next_ &= ~SIZE_MASK; |
| 475 | next_ |= new_size; |
| 476 | next()->prev_ = new_size; |
| 477 | return true; |
| 478 | } |
| 479 | |
| 480 | } // namespace LIBC_NAMESPACE_DECL |
| 481 | |
| 482 | #endif // LLVM_LIBC_SRC___SUPPORT_BLOCK_H |
| 483 | |