1 | //===----------------------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include <cstddef> |
10 | #include <memory> |
11 | #include <memory_resource> |
12 | |
13 | #if _LIBCPP_HAS_ATOMIC_HEADER |
14 | # include <atomic> |
15 | #elif _LIBCPP_HAS_THREADS |
16 | # include <mutex> |
17 | # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) |
18 | # pragma comment(lib, "pthread") |
19 | # endif |
20 | #endif |
21 | |
22 | _LIBCPP_BEGIN_NAMESPACE_STD |
23 | |
24 | namespace pmr { |
25 | |
26 | // memory_resource |
27 | |
28 | memory_resource::~memory_resource() = default; |
29 | |
30 | // new_delete_resource() |
31 | |
32 | #if !_LIBCPP_HAS_ALIGNED_ALLOCATION |
33 | static bool is_aligned_to(void* ptr, size_t align) { |
34 | void* p2 = ptr; |
35 | size_t space = 1; |
36 | void* result = std::align(align: align, size: 1, ptr&: p2, space&: space); |
37 | return (result == ptr); |
38 | } |
39 | #endif |
40 | |
41 | class _LIBCPP_HIDDEN __new_delete_memory_resource_imp : public memory_resource { |
42 | void* do_allocate(size_t bytes, size_t align) override { |
43 | #if _LIBCPP_HAS_ALIGNED_ALLOCATION |
44 | return std::__libcpp_allocate<std::byte>(__element_count(bytes), align); |
45 | #else |
46 | if (bytes == 0) |
47 | bytes = 1; |
48 | std::byte* result = std::__libcpp_allocate<std::byte>(__element_count(bytes), align); |
49 | if (!is_aligned_to(result, align)) { |
50 | std::__libcpp_deallocate<std::byte>(result, __element_count(bytes), align); |
51 | std::__throw_bad_alloc(); |
52 | } |
53 | return result; |
54 | #endif |
55 | } |
56 | |
57 | void do_deallocate(void* p, size_t bytes, size_t align) override { |
58 | std::__libcpp_deallocate<std::byte>(static_cast<std::byte*>(p), __element_count(bytes), align); |
59 | } |
60 | |
61 | bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; } |
62 | }; |
63 | |
64 | // null_memory_resource() |
65 | |
66 | class _LIBCPP_HIDDEN __null_memory_resource_imp : public memory_resource { |
67 | void* do_allocate(size_t, size_t) override { std::__throw_bad_alloc(); } |
68 | void do_deallocate(void*, size_t, size_t) override {} |
69 | bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; } |
70 | }; |
71 | |
72 | namespace { |
73 | |
74 | union ResourceInitHelper { |
75 | struct { |
76 | __new_delete_memory_resource_imp new_delete_res; |
77 | __null_memory_resource_imp null_res; |
78 | } resources; |
79 | char dummy; |
80 | constexpr ResourceInitHelper() : resources() {} |
81 | ~ResourceInitHelper() {} |
82 | }; |
83 | |
84 | // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority |
85 | // attribute with a value that's reserved for the implementation (we're the implementation). |
86 | #include "memory_resource_init_helper.h" |
87 | |
88 | } // namespace |
89 | |
90 | memory_resource* new_delete_resource() noexcept { return &res_init.resources.new_delete_res; } |
91 | |
92 | memory_resource* null_memory_resource() noexcept { return &res_init.resources.null_res; } |
93 | |
94 | // default_memory_resource() |
95 | |
96 | static memory_resource* __default_memory_resource(bool set = false, memory_resource* new_res = nullptr) noexcept { |
97 | #if _LIBCPP_HAS_ATOMIC_HEADER |
98 | static constinit atomic<memory_resource*> __res{&res_init.resources.new_delete_res}; |
99 | if (set) { |
100 | new_res = new_res ? new_res : new_delete_resource(); |
101 | // TODO: Can a weaker ordering be used? |
102 | return std::atomic_exchange_explicit(&__res, new_res, memory_order_acq_rel); |
103 | } else { |
104 | return std::atomic_load_explicit(&__res, memory_order_acquire); |
105 | } |
106 | #elif _LIBCPP_HAS_THREADS |
107 | static constinit memory_resource* res = &res_init.resources.new_delete_res; |
108 | static mutex res_lock; |
109 | if (set) { |
110 | new_res = new_res ? new_res : new_delete_resource(); |
111 | lock_guard<mutex> guard(res_lock); |
112 | memory_resource* old_res = res; |
113 | res = new_res; |
114 | return old_res; |
115 | } else { |
116 | lock_guard<mutex> guard(res_lock); |
117 | return res; |
118 | } |
119 | #else |
120 | static constinit memory_resource* res = &res_init.resources.new_delete_res; |
121 | if (set) { |
122 | new_res = new_res ? new_res : new_delete_resource(); |
123 | memory_resource* old_res = res; |
124 | res = new_res; |
125 | return old_res; |
126 | } else { |
127 | return res; |
128 | } |
129 | #endif |
130 | } |
131 | |
132 | memory_resource* get_default_resource() noexcept { return __default_memory_resource(); } |
133 | |
134 | memory_resource* set_default_resource(memory_resource* __new_res) noexcept { |
135 | return __default_memory_resource(true, __new_res); |
136 | } |
137 | |
138 | // 23.12.5, mem.res.pool |
139 | |
140 | static size_t roundup(size_t count, size_t alignment) { |
141 | size_t mask = alignment - 1; |
142 | return (count + mask) & ~mask; |
143 | } |
144 | |
145 | struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer { |
146 | __chunk_footer* __next_; |
147 | char* __start_; |
148 | size_t __align_; |
149 | size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); } |
150 | }; |
151 | |
152 | void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource* upstream) { |
153 | while (__first_ != nullptr) { |
154 | __chunk_footer* next = __first_->__next_; |
155 | upstream->deallocate(__first_->__start_, __first_->__allocation_size(), __first_->__align_); |
156 | __first_ = next; |
157 | } |
158 | } |
159 | |
160 | void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource* upstream, size_t bytes, size_t align) { |
161 | const size_t footer_size = sizeof(__chunk_footer); |
162 | const size_t footer_align = alignof(__chunk_footer); |
163 | |
164 | if (align < footer_align) |
165 | align = footer_align; |
166 | |
167 | size_t aligned_capacity = roundup(bytes, footer_align) + footer_size; |
168 | |
169 | void* result = upstream->allocate(aligned_capacity, align); |
170 | |
171 | __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size); |
172 | h->__next_ = __first_; |
173 | h->__start_ = (char*)result; |
174 | h->__align_ = align; |
175 | __first_ = h; |
176 | return result; |
177 | } |
178 | |
179 | void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate( |
180 | memory_resource* upstream, void* p, size_t bytes, size_t align) { |
181 | _LIBCPP_ASSERT_NON_NULL(__first_ != nullptr, "deallocating a block that was not allocated with this allocator" ); |
182 | if (__first_->__start_ == p) { |
183 | __chunk_footer* next = __first_->__next_; |
184 | upstream->deallocate(p, __first_->__allocation_size(), __first_->__align_); |
185 | __first_ = next; |
186 | } else { |
187 | for (__chunk_footer* h = __first_; h->__next_ != nullptr; h = h->__next_) { |
188 | if (h->__next_->__start_ == p) { |
189 | __chunk_footer* next = h->__next_->__next_; |
190 | upstream->deallocate(p, h->__next_->__allocation_size(), h->__next_->__align_); |
191 | h->__next_ = next; |
192 | return; |
193 | } |
194 | } |
195 | // The request to deallocate memory ends up being a no-op, likely resulting in a memory leak. |
196 | _LIBCPP_ASSERT_VALID_DEALLOCATION(false, "deallocating a block that was not allocated with this allocator" ); |
197 | } |
198 | } |
199 | |
200 | class unsynchronized_pool_resource::__fixed_pool { |
201 | struct __chunk_footer { |
202 | __chunk_footer* __next_; |
203 | char* __start_; |
204 | size_t __align_; |
205 | size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); } |
206 | }; |
207 | |
208 | struct __vacancy_header { |
209 | __vacancy_header* __next_vacancy_; |
210 | }; |
211 | |
212 | __chunk_footer* __first_chunk_ = nullptr; |
213 | __vacancy_header* __first_vacancy_ = nullptr; |
214 | |
215 | public: |
216 | explicit __fixed_pool() = default; |
217 | |
218 | void __release_ptr(memory_resource* upstream) { |
219 | __first_vacancy_ = nullptr; |
220 | while (__first_chunk_ != nullptr) { |
221 | __chunk_footer* next = __first_chunk_->__next_; |
222 | upstream->deallocate(__first_chunk_->__start_, __first_chunk_->__allocation_size(), __first_chunk_->__align_); |
223 | __first_chunk_ = next; |
224 | } |
225 | } |
226 | |
227 | void* __try_allocate_from_vacancies() { |
228 | if (__first_vacancy_ != nullptr) { |
229 | void* result = __first_vacancy_; |
230 | __first_vacancy_ = __first_vacancy_->__next_vacancy_; |
231 | return result; |
232 | } |
233 | return nullptr; |
234 | } |
235 | |
236 | void* __allocate_in_new_chunk(memory_resource* upstream, size_t block_size, size_t chunk_size) { |
237 | _LIBCPP_ASSERT_INTERNAL(chunk_size % block_size == 0, "" ); |
238 | static_assert(__default_alignment >= alignof(std::max_align_t), "" ); |
239 | static_assert(__default_alignment >= alignof(__chunk_footer), "" ); |
240 | static_assert(__default_alignment >= alignof(__vacancy_header), "" ); |
241 | |
242 | const size_t footer_size = sizeof(__chunk_footer); |
243 | const size_t footer_align = alignof(__chunk_footer); |
244 | |
245 | size_t aligned_capacity = roundup(chunk_size, footer_align) + footer_size; |
246 | |
247 | void* result = upstream->allocate(aligned_capacity, __default_alignment); |
248 | |
249 | __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size); |
250 | h->__next_ = __first_chunk_; |
251 | h->__start_ = (char*)result; |
252 | h->__align_ = __default_alignment; |
253 | __first_chunk_ = h; |
254 | |
255 | if (chunk_size > block_size) { |
256 | __vacancy_header* last_vh = this->__first_vacancy_; |
257 | for (size_t i = block_size; i != chunk_size; i += block_size) { |
258 | __vacancy_header* vh = (__vacancy_header*)((char*)result + i); |
259 | vh->__next_vacancy_ = last_vh; |
260 | last_vh = vh; |
261 | } |
262 | this->__first_vacancy_ = last_vh; |
263 | } |
264 | return result; |
265 | } |
266 | |
267 | void __evacuate(void* p) { |
268 | __vacancy_header* vh = (__vacancy_header*)(p); |
269 | vh->__next_vacancy_ = __first_vacancy_; |
270 | __first_vacancy_ = vh; |
271 | } |
272 | |
273 | size_t __previous_chunk_size_in_bytes() const { return __first_chunk_ ? __first_chunk_->__allocation_size() : 0; } |
274 | |
275 | static const size_t __default_alignment = alignof(max_align_t); |
276 | }; |
277 | |
278 | size_t unsynchronized_pool_resource::__pool_block_size(int i) const { return size_t(1) << __log2_pool_block_size(i); } |
279 | |
280 | int unsynchronized_pool_resource::__log2_pool_block_size(int i) const { return (i + __log2_smallest_block_size); } |
281 | |
282 | int unsynchronized_pool_resource::__pool_index(size_t bytes, size_t align) const { |
283 | if (align > alignof(std::max_align_t) || bytes > (size_t(1) << __num_fixed_pools_)) |
284 | return __num_fixed_pools_; |
285 | else { |
286 | int i = 0; |
287 | bytes = (bytes > align) ? bytes : align; |
288 | bytes -= 1; |
289 | bytes >>= __log2_smallest_block_size; |
290 | while (bytes != 0) { |
291 | bytes >>= 1; |
292 | i += 1; |
293 | } |
294 | return i; |
295 | } |
296 | } |
297 | |
298 | unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream) |
299 | : __res_(upstream), __fixed_pools_(nullptr) { |
300 | size_t largest_block_size; |
301 | if (opts.largest_required_pool_block == 0) |
302 | largest_block_size = __default_largest_block_size; |
303 | else if (opts.largest_required_pool_block < __smallest_block_size) |
304 | largest_block_size = __smallest_block_size; |
305 | else if (opts.largest_required_pool_block > __max_largest_block_size) |
306 | largest_block_size = __max_largest_block_size; |
307 | else |
308 | largest_block_size = opts.largest_required_pool_block; |
309 | |
310 | if (opts.max_blocks_per_chunk == 0) |
311 | __options_max_blocks_per_chunk_ = __max_blocks_per_chunk; |
312 | else if (opts.max_blocks_per_chunk < __min_blocks_per_chunk) |
313 | __options_max_blocks_per_chunk_ = __min_blocks_per_chunk; |
314 | else if (opts.max_blocks_per_chunk > __max_blocks_per_chunk) |
315 | __options_max_blocks_per_chunk_ = __max_blocks_per_chunk; |
316 | else |
317 | __options_max_blocks_per_chunk_ = opts.max_blocks_per_chunk; |
318 | |
319 | __num_fixed_pools_ = 1; |
320 | size_t capacity = __smallest_block_size; |
321 | while (capacity < largest_block_size) { |
322 | capacity <<= 1; |
323 | __num_fixed_pools_ += 1; |
324 | } |
325 | } |
326 | |
327 | pool_options unsynchronized_pool_resource::options() const { |
328 | pool_options p; |
329 | p.max_blocks_per_chunk = __options_max_blocks_per_chunk_; |
330 | p.largest_required_pool_block = __pool_block_size(__num_fixed_pools_ - 1); |
331 | return p; |
332 | } |
333 | |
334 | void unsynchronized_pool_resource::release() { |
335 | __adhoc_pool_.__release_ptr(__res_); |
336 | if (__fixed_pools_ != nullptr) { |
337 | const int n = __num_fixed_pools_; |
338 | for (int i = 0; i < n; ++i) |
339 | __fixed_pools_[i].__release_ptr(__res_); |
340 | __res_->deallocate(__fixed_pools_, __num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool)); |
341 | __fixed_pools_ = nullptr; |
342 | } |
343 | } |
344 | |
345 | void* unsynchronized_pool_resource::do_allocate(size_t bytes, size_t align) { |
346 | // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes. |
347 | // The size and alignment of the allocated memory shall meet the requirements for |
348 | // a class derived from memory_resource (23.12). |
349 | // If the pool selected for a block of size bytes is unable to satisfy the memory request |
350 | // from its own internal data structures, it will call upstream_resource()->allocate() |
351 | // to obtain more memory. If bytes is larger than that which the largest pool can handle, |
352 | // then memory will be allocated using upstream_resource()->allocate(). |
353 | |
354 | int i = __pool_index(bytes, align); |
355 | if (i == __num_fixed_pools_) |
356 | return __adhoc_pool_.__do_allocate(__res_, bytes, align); |
357 | else { |
358 | if (__fixed_pools_ == nullptr) { |
359 | __fixed_pools_ = |
360 | (__fixed_pool*)__res_->allocate(__num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool)); |
361 | __fixed_pool* first = __fixed_pools_; |
362 | __fixed_pool* last = __fixed_pools_ + __num_fixed_pools_; |
363 | for (__fixed_pool* pool = first; pool != last; ++pool) |
364 | ::new ((void*)pool) __fixed_pool; |
365 | } |
366 | void* result = __fixed_pools_[i].__try_allocate_from_vacancies(); |
367 | if (result == nullptr) { |
368 | auto min = [](size_t a, size_t b) { return a < b ? a : b; }; |
369 | auto max = [](size_t a, size_t b) { return a < b ? b : a; }; |
370 | |
371 | size_t prev_chunk_size_in_bytes = __fixed_pools_[i].__previous_chunk_size_in_bytes(); |
372 | size_t prev_chunk_size_in_blocks = prev_chunk_size_in_bytes >> __log2_pool_block_size(i); |
373 | |
374 | size_t chunk_size_in_blocks; |
375 | |
376 | if (prev_chunk_size_in_blocks == 0) { |
377 | size_t min_blocks_per_chunk = max(__min_bytes_per_chunk >> __log2_pool_block_size(i), __min_blocks_per_chunk); |
378 | chunk_size_in_blocks = min_blocks_per_chunk; |
379 | } else { |
380 | static_assert(__max_bytes_per_chunk <= SIZE_MAX - (__max_bytes_per_chunk / 4), "unsigned overflow is possible" ); |
381 | chunk_size_in_blocks = prev_chunk_size_in_blocks + (prev_chunk_size_in_blocks / 4); |
382 | } |
383 | |
384 | size_t max_blocks_per_chunk = |
385 | min((__max_bytes_per_chunk >> __log2_pool_block_size(i)), |
386 | min(__max_blocks_per_chunk, __options_max_blocks_per_chunk_)); |
387 | if (chunk_size_in_blocks > max_blocks_per_chunk) |
388 | chunk_size_in_blocks = max_blocks_per_chunk; |
389 | |
390 | size_t block_size = __pool_block_size(i); |
391 | |
392 | size_t chunk_size_in_bytes = (chunk_size_in_blocks << __log2_pool_block_size(i)); |
393 | result = __fixed_pools_[i].__allocate_in_new_chunk(__res_, block_size, chunk_size_in_bytes); |
394 | } |
395 | return result; |
396 | } |
397 | } |
398 | |
399 | void unsynchronized_pool_resource::do_deallocate(void* p, size_t bytes, size_t align) { |
400 | // Returns the memory at p to the pool. It is unspecified if, |
401 | // or under what circumstances, this operation will result in |
402 | // a call to upstream_resource()->deallocate(). |
403 | |
404 | int i = __pool_index(bytes, align); |
405 | if (i == __num_fixed_pools_) |
406 | return __adhoc_pool_.__do_deallocate(__res_, p, bytes, align); |
407 | else { |
408 | _LIBCPP_ASSERT_NON_NULL( |
409 | __fixed_pools_ != nullptr, "deallocating a block that was not allocated with this allocator" ); |
410 | __fixed_pools_[i].__evacuate(p); |
411 | } |
412 | } |
413 | |
414 | bool synchronized_pool_resource::do_is_equal(const memory_resource& other) const noexcept { return &other == this; } |
415 | |
416 | // 23.12.6, mem.res.monotonic.buffer |
417 | |
418 | constexpr size_t __default_growth_factor = 2; |
419 | |
420 | static void* align_down(size_t align, size_t size, void*& ptr, size_t& space) { |
421 | if (size > space) |
422 | return nullptr; |
423 | |
424 | char* p1 = static_cast<char*>(ptr); |
425 | char* new_ptr = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1 - size) & ~(align - 1)); |
426 | |
427 | if (new_ptr < (p1 - space)) |
428 | return nullptr; |
429 | |
430 | ptr = new_ptr; |
431 | space -= p1 - new_ptr; |
432 | |
433 | return ptr; |
434 | } |
435 | |
436 | template <bool is_initial, typename Chunk> |
437 | void* __try_allocate_from_chunk(Chunk& self, size_t bytes, size_t align) { |
438 | if constexpr (is_initial) { |
439 | // only for __initial_descriptor. |
440 | // if __initial_descriptor.__cur_ equals nullptr, means no available buffer given when ctor. |
441 | // here we just return nullptr, let the caller do the next handling. |
442 | if (!self.__cur_) |
443 | return nullptr; |
444 | } |
445 | void* new_ptr = static_cast<void*>(self.__cur_); |
446 | size_t new_capacity = (self.__cur_ - self.__start_); |
447 | void* aligned_ptr = align_down(align, size: bytes, ptr&: new_ptr, space&: new_capacity); |
448 | if (aligned_ptr != nullptr) |
449 | self.__cur_ = static_cast<char*>(new_ptr); |
450 | return aligned_ptr; |
451 | } |
452 | |
453 | void* monotonic_buffer_resource::do_allocate(size_t bytes, size_t align) { |
454 | const size_t footer_size = sizeof(__chunk_footer); |
455 | const size_t footer_align = alignof(__chunk_footer); |
456 | |
457 | auto previous_allocation_size = [&]() { |
458 | if (__chunks_ != nullptr) |
459 | return __chunks_->__allocation_size(); |
460 | |
461 | size_t newsize = (__initial_.__start_ != nullptr) ? (__initial_.__end_ - __initial_.__start_) : __initial_.__size_; |
462 | |
463 | return roundup(newsize, footer_align) + footer_size; |
464 | }; |
465 | |
466 | if (void* result = __try_allocate_from_chunk<true, __initial_descriptor>(__initial_, bytes, align)) |
467 | return result; |
468 | if (__chunks_ != nullptr) { |
469 | if (void* result = __try_allocate_from_chunk<false, __chunk_footer>(*__chunks_, bytes, align)) |
470 | return result; |
471 | } |
472 | |
473 | // Allocate a brand-new chunk. |
474 | |
475 | if (align < footer_align) |
476 | align = footer_align; |
477 | |
478 | size_t aligned_capacity = roundup(bytes, footer_align) + footer_size; |
479 | size_t previous_capacity = previous_allocation_size(); |
480 | |
481 | if (aligned_capacity <= previous_capacity) { |
482 | size_t newsize = __default_growth_factor * (previous_capacity - footer_size); |
483 | aligned_capacity = roundup(newsize, footer_align) + footer_size; |
484 | } |
485 | |
486 | char* start = (char*)__res_->allocate(aligned_capacity, align); |
487 | auto end = start + aligned_capacity - footer_size; |
488 | __chunk_footer* footer = (__chunk_footer*)(end); |
489 | footer->__next_ = __chunks_; |
490 | footer->__start_ = start; |
491 | footer->__cur_ = end; |
492 | footer->__align_ = align; |
493 | __chunks_ = footer; |
494 | |
495 | return __try_allocate_from_chunk<false, __chunk_footer>(*__chunks_, bytes, align); |
496 | } |
497 | |
498 | } // namespace pmr |
499 | |
500 | _LIBCPP_END_NAMESPACE_STD |
501 | |