1 | //===----------------------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include <memory> |
10 | #include <memory_resource> |
11 | |
12 | #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER |
13 | # include <atomic> |
14 | #elif !defined(_LIBCPP_HAS_NO_THREADS) |
15 | # include <mutex> |
16 | # if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) |
17 | # pragma comment(lib, "pthread") |
18 | # endif |
19 | #endif |
20 | |
21 | _LIBCPP_BEGIN_NAMESPACE_STD |
22 | |
23 | namespace pmr { |
24 | |
25 | // memory_resource |
26 | |
27 | memory_resource::~memory_resource() = default; |
28 | |
29 | // new_delete_resource() |
30 | |
31 | #ifdef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION |
32 | static bool is_aligned_to(void* ptr, size_t align) { |
33 | void* p2 = ptr; |
34 | size_t space = 1; |
35 | void* result = std::align(align, 1, p2, space); |
36 | return (result == ptr); |
37 | } |
38 | #endif |
39 | |
40 | class _LIBCPP_EXPORTED_FROM_ABI __new_delete_memory_resource_imp : public memory_resource { |
41 | void* do_allocate(size_t bytes, size_t align) override { |
42 | #ifndef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION |
43 | return std::__libcpp_allocate(bytes, align); |
44 | #else |
45 | if (bytes == 0) |
46 | bytes = 1; |
47 | void* result = std::__libcpp_allocate(bytes, align); |
48 | if (!is_aligned_to(result, align)) { |
49 | std::__libcpp_deallocate(result, bytes, align); |
50 | __throw_bad_alloc(); |
51 | } |
52 | return result; |
53 | #endif |
54 | } |
55 | |
56 | void do_deallocate(void* p, size_t bytes, size_t align) override { std::__libcpp_deallocate(p, bytes, align); } |
57 | |
58 | bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; } |
59 | }; |
60 | |
61 | // null_memory_resource() |
62 | |
63 | class _LIBCPP_EXPORTED_FROM_ABI __null_memory_resource_imp : public memory_resource { |
64 | void* do_allocate(size_t, size_t) override { __throw_bad_alloc(); } |
65 | void do_deallocate(void*, size_t, size_t) override {} |
66 | bool do_is_equal(const memory_resource& other) const noexcept override { return &other == this; } |
67 | }; |
68 | |
69 | namespace { |
70 | |
71 | union ResourceInitHelper { |
72 | struct { |
73 | __new_delete_memory_resource_imp new_delete_res; |
74 | __null_memory_resource_imp null_res; |
75 | } resources; |
76 | char dummy; |
77 | constexpr ResourceInitHelper() : resources() {} |
78 | ~ResourceInitHelper() {} |
79 | }; |
80 | |
81 | // Pretend we're inside a system header so the compiler doesn't flag the use of the init_priority |
82 | // attribute with a value that's reserved for the implementation (we're the implementation). |
83 | #include "memory_resource_init_helper.h" |
84 | |
85 | } // end namespace |
86 | |
87 | memory_resource* new_delete_resource() noexcept { return &res_init.resources.new_delete_res; } |
88 | |
89 | memory_resource* null_memory_resource() noexcept { return &res_init.resources.null_res; } |
90 | |
91 | // default_memory_resource() |
92 | |
93 | static memory_resource* __default_memory_resource(bool set = false, memory_resource* new_res = nullptr) noexcept { |
94 | #ifndef _LIBCPP_HAS_NO_ATOMIC_HEADER |
95 | static constinit atomic<memory_resource*> __res{&res_init.resources.new_delete_res}; |
96 | if (set) { |
97 | new_res = new_res ? new_res : new_delete_resource(); |
98 | // TODO: Can a weaker ordering be used? |
99 | return std::atomic_exchange_explicit(&__res, new_res, memory_order_acq_rel); |
100 | } else { |
101 | return std::atomic_load_explicit(&__res, memory_order_acquire); |
102 | } |
103 | #elif !defined(_LIBCPP_HAS_NO_THREADS) |
104 | static constinit memory_resource* res = &res_init.resources.new_delete_res; |
105 | static mutex res_lock; |
106 | if (set) { |
107 | new_res = new_res ? new_res : new_delete_resource(); |
108 | lock_guard<mutex> guard(res_lock); |
109 | memory_resource* old_res = res; |
110 | res = new_res; |
111 | return old_res; |
112 | } else { |
113 | lock_guard<mutex> guard(res_lock); |
114 | return res; |
115 | } |
116 | #else |
117 | static constinit memory_resource* res = &res_init.resources.new_delete_res; |
118 | if (set) { |
119 | new_res = new_res ? new_res : new_delete_resource(); |
120 | memory_resource* old_res = res; |
121 | res = new_res; |
122 | return old_res; |
123 | } else { |
124 | return res; |
125 | } |
126 | #endif |
127 | } |
128 | |
129 | memory_resource* get_default_resource() noexcept { return __default_memory_resource(); } |
130 | |
131 | memory_resource* set_default_resource(memory_resource* __new_res) noexcept { |
132 | return __default_memory_resource(true, __new_res); |
133 | } |
134 | |
135 | // 23.12.5, mem.res.pool |
136 | |
137 | static size_t roundup(size_t count, size_t alignment) { |
138 | size_t mask = alignment - 1; |
139 | return (count + mask) & ~mask; |
140 | } |
141 | |
142 | struct unsynchronized_pool_resource::__adhoc_pool::__chunk_footer { |
143 | __chunk_footer* __next_; |
144 | char* __start_; |
145 | size_t __align_; |
146 | size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); } |
147 | }; |
148 | |
149 | void unsynchronized_pool_resource::__adhoc_pool::__release_ptr(memory_resource* upstream) { |
150 | while (__first_ != nullptr) { |
151 | __chunk_footer* next = __first_->__next_; |
152 | upstream->deallocate(__first_->__start_, __first_->__allocation_size(), __first_->__align_); |
153 | __first_ = next; |
154 | } |
155 | } |
156 | |
157 | void* unsynchronized_pool_resource::__adhoc_pool::__do_allocate(memory_resource* upstream, size_t bytes, size_t align) { |
158 | const size_t footer_size = sizeof(__chunk_footer); |
159 | const size_t footer_align = alignof(__chunk_footer); |
160 | |
161 | if (align < footer_align) |
162 | align = footer_align; |
163 | |
164 | size_t aligned_capacity = roundup(bytes, footer_align) + footer_size; |
165 | |
166 | void* result = upstream->allocate(aligned_capacity, align); |
167 | |
168 | __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size); |
169 | h->__next_ = __first_; |
170 | h->__start_ = (char*)result; |
171 | h->__align_ = align; |
172 | __first_ = h; |
173 | return result; |
174 | } |
175 | |
176 | void unsynchronized_pool_resource::__adhoc_pool::__do_deallocate( |
177 | memory_resource* upstream, void* p, size_t bytes, size_t align) { |
178 | _LIBCPP_ASSERT_NON_NULL(__first_ != nullptr, "deallocating a block that was not allocated with this allocator" ); |
179 | if (__first_->__start_ == p) { |
180 | __chunk_footer* next = __first_->__next_; |
181 | upstream->deallocate(p, __first_->__allocation_size(), __first_->__align_); |
182 | __first_ = next; |
183 | } else { |
184 | for (__chunk_footer* h = __first_; h->__next_ != nullptr; h = h->__next_) { |
185 | if (h->__next_->__start_ == p) { |
186 | __chunk_footer* next = h->__next_->__next_; |
187 | upstream->deallocate(p, h->__next_->__allocation_size(), h->__next_->__align_); |
188 | h->__next_ = next; |
189 | return; |
190 | } |
191 | } |
192 | // The request to deallocate memory ends up being a no-op, likely resulting in a memory leak. |
193 | _LIBCPP_ASSERT_VALID_DEALLOCATION(false, "deallocating a block that was not allocated with this allocator" ); |
194 | } |
195 | } |
196 | |
197 | class unsynchronized_pool_resource::__fixed_pool { |
198 | struct __chunk_footer { |
199 | __chunk_footer* __next_; |
200 | char* __start_; |
201 | size_t __align_; |
202 | size_t __allocation_size() { return (reinterpret_cast<char*>(this) - __start_) + sizeof(*this); } |
203 | }; |
204 | |
205 | struct __vacancy_header { |
206 | __vacancy_header* __next_vacancy_; |
207 | }; |
208 | |
209 | __chunk_footer* __first_chunk_ = nullptr; |
210 | __vacancy_header* __first_vacancy_ = nullptr; |
211 | |
212 | public: |
213 | explicit __fixed_pool() = default; |
214 | |
215 | void __release_ptr(memory_resource* upstream) { |
216 | __first_vacancy_ = nullptr; |
217 | while (__first_chunk_ != nullptr) { |
218 | __chunk_footer* next = __first_chunk_->__next_; |
219 | upstream->deallocate(__first_chunk_->__start_, __first_chunk_->__allocation_size(), __first_chunk_->__align_); |
220 | __first_chunk_ = next; |
221 | } |
222 | } |
223 | |
224 | void* __try_allocate_from_vacancies() { |
225 | if (__first_vacancy_ != nullptr) { |
226 | void* result = __first_vacancy_; |
227 | __first_vacancy_ = __first_vacancy_->__next_vacancy_; |
228 | return result; |
229 | } |
230 | return nullptr; |
231 | } |
232 | |
233 | void* __allocate_in_new_chunk(memory_resource* upstream, size_t block_size, size_t chunk_size) { |
234 | _LIBCPP_ASSERT_INTERNAL(chunk_size % block_size == 0, "" ); |
235 | static_assert(__default_alignment >= alignof(std::max_align_t), "" ); |
236 | static_assert(__default_alignment >= alignof(__chunk_footer), "" ); |
237 | static_assert(__default_alignment >= alignof(__vacancy_header), "" ); |
238 | |
239 | const size_t footer_size = sizeof(__chunk_footer); |
240 | const size_t footer_align = alignof(__chunk_footer); |
241 | |
242 | size_t aligned_capacity = roundup(chunk_size, footer_align) + footer_size; |
243 | |
244 | void* result = upstream->allocate(aligned_capacity, __default_alignment); |
245 | |
246 | __chunk_footer* h = (__chunk_footer*)((char*)result + aligned_capacity - footer_size); |
247 | h->__next_ = __first_chunk_; |
248 | h->__start_ = (char*)result; |
249 | h->__align_ = __default_alignment; |
250 | __first_chunk_ = h; |
251 | |
252 | if (chunk_size > block_size) { |
253 | __vacancy_header* last_vh = this->__first_vacancy_; |
254 | for (size_t i = block_size; i != chunk_size; i += block_size) { |
255 | __vacancy_header* vh = (__vacancy_header*)((char*)result + i); |
256 | vh->__next_vacancy_ = last_vh; |
257 | last_vh = vh; |
258 | } |
259 | this->__first_vacancy_ = last_vh; |
260 | } |
261 | return result; |
262 | } |
263 | |
264 | void __evacuate(void* p) { |
265 | __vacancy_header* vh = (__vacancy_header*)(p); |
266 | vh->__next_vacancy_ = __first_vacancy_; |
267 | __first_vacancy_ = vh; |
268 | } |
269 | |
270 | size_t __previous_chunk_size_in_bytes() const { return __first_chunk_ ? __first_chunk_->__allocation_size() : 0; } |
271 | |
272 | static const size_t __default_alignment = alignof(max_align_t); |
273 | }; |
274 | |
275 | size_t unsynchronized_pool_resource::__pool_block_size(int i) const { return size_t(1) << __log2_pool_block_size(i); } |
276 | |
277 | int unsynchronized_pool_resource::__log2_pool_block_size(int i) const { return (i + __log2_smallest_block_size); } |
278 | |
279 | int unsynchronized_pool_resource::__pool_index(size_t bytes, size_t align) const { |
280 | if (align > alignof(std::max_align_t) || bytes > (size_t(1) << __num_fixed_pools_)) |
281 | return __num_fixed_pools_; |
282 | else { |
283 | int i = 0; |
284 | bytes = (bytes > align) ? bytes : align; |
285 | bytes -= 1; |
286 | bytes >>= __log2_smallest_block_size; |
287 | while (bytes != 0) { |
288 | bytes >>= 1; |
289 | i += 1; |
290 | } |
291 | return i; |
292 | } |
293 | } |
294 | |
295 | unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream) |
296 | : __res_(upstream), __fixed_pools_(nullptr) { |
297 | size_t largest_block_size; |
298 | if (opts.largest_required_pool_block == 0) |
299 | largest_block_size = __default_largest_block_size; |
300 | else if (opts.largest_required_pool_block < __smallest_block_size) |
301 | largest_block_size = __smallest_block_size; |
302 | else if (opts.largest_required_pool_block > __max_largest_block_size) |
303 | largest_block_size = __max_largest_block_size; |
304 | else |
305 | largest_block_size = opts.largest_required_pool_block; |
306 | |
307 | if (opts.max_blocks_per_chunk == 0) |
308 | __options_max_blocks_per_chunk_ = __max_blocks_per_chunk; |
309 | else if (opts.max_blocks_per_chunk < __min_blocks_per_chunk) |
310 | __options_max_blocks_per_chunk_ = __min_blocks_per_chunk; |
311 | else if (opts.max_blocks_per_chunk > __max_blocks_per_chunk) |
312 | __options_max_blocks_per_chunk_ = __max_blocks_per_chunk; |
313 | else |
314 | __options_max_blocks_per_chunk_ = opts.max_blocks_per_chunk; |
315 | |
316 | __num_fixed_pools_ = 1; |
317 | size_t capacity = __smallest_block_size; |
318 | while (capacity < largest_block_size) { |
319 | capacity <<= 1; |
320 | __num_fixed_pools_ += 1; |
321 | } |
322 | } |
323 | |
324 | pool_options unsynchronized_pool_resource::options() const { |
325 | pool_options p; |
326 | p.max_blocks_per_chunk = __options_max_blocks_per_chunk_; |
327 | p.largest_required_pool_block = __pool_block_size(__num_fixed_pools_ - 1); |
328 | return p; |
329 | } |
330 | |
331 | void unsynchronized_pool_resource::release() { |
332 | __adhoc_pool_.__release_ptr(__res_); |
333 | if (__fixed_pools_ != nullptr) { |
334 | const int n = __num_fixed_pools_; |
335 | for (int i = 0; i < n; ++i) |
336 | __fixed_pools_[i].__release_ptr(__res_); |
337 | __res_->deallocate(__fixed_pools_, __num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool)); |
338 | __fixed_pools_ = nullptr; |
339 | } |
340 | } |
341 | |
342 | void* unsynchronized_pool_resource::do_allocate(size_t bytes, size_t align) { |
343 | // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes. |
344 | // The size and alignment of the allocated memory shall meet the requirements for |
345 | // a class derived from memory_resource (23.12). |
346 | // If the pool selected for a block of size bytes is unable to satisfy the memory request |
347 | // from its own internal data structures, it will call upstream_resource()->allocate() |
348 | // to obtain more memory. If bytes is larger than that which the largest pool can handle, |
349 | // then memory will be allocated using upstream_resource()->allocate(). |
350 | |
351 | int i = __pool_index(bytes, align); |
352 | if (i == __num_fixed_pools_) |
353 | return __adhoc_pool_.__do_allocate(__res_, bytes, align); |
354 | else { |
355 | if (__fixed_pools_ == nullptr) { |
356 | __fixed_pools_ = |
357 | (__fixed_pool*)__res_->allocate(__num_fixed_pools_ * sizeof(__fixed_pool), alignof(__fixed_pool)); |
358 | __fixed_pool* first = __fixed_pools_; |
359 | __fixed_pool* last = __fixed_pools_ + __num_fixed_pools_; |
360 | for (__fixed_pool* pool = first; pool != last; ++pool) |
361 | ::new ((void*)pool) __fixed_pool; |
362 | } |
363 | void* result = __fixed_pools_[i].__try_allocate_from_vacancies(); |
364 | if (result == nullptr) { |
365 | auto min = [](size_t a, size_t b) { return a < b ? a : b; }; |
366 | auto max = [](size_t a, size_t b) { return a < b ? b : a; }; |
367 | |
368 | size_t prev_chunk_size_in_bytes = __fixed_pools_[i].__previous_chunk_size_in_bytes(); |
369 | size_t prev_chunk_size_in_blocks = prev_chunk_size_in_bytes >> __log2_pool_block_size(i); |
370 | |
371 | size_t chunk_size_in_blocks; |
372 | |
373 | if (prev_chunk_size_in_blocks == 0) { |
374 | size_t min_blocks_per_chunk = max(__min_bytes_per_chunk >> __log2_pool_block_size(i), __min_blocks_per_chunk); |
375 | chunk_size_in_blocks = min_blocks_per_chunk; |
376 | } else { |
377 | static_assert(__max_bytes_per_chunk <= SIZE_MAX - (__max_bytes_per_chunk / 4), "unsigned overflow is possible" ); |
378 | chunk_size_in_blocks = prev_chunk_size_in_blocks + (prev_chunk_size_in_blocks / 4); |
379 | } |
380 | |
381 | size_t max_blocks_per_chunk = |
382 | min((__max_bytes_per_chunk >> __log2_pool_block_size(i)), |
383 | min(__max_blocks_per_chunk, __options_max_blocks_per_chunk_)); |
384 | if (chunk_size_in_blocks > max_blocks_per_chunk) |
385 | chunk_size_in_blocks = max_blocks_per_chunk; |
386 | |
387 | size_t block_size = __pool_block_size(i); |
388 | |
389 | size_t chunk_size_in_bytes = (chunk_size_in_blocks << __log2_pool_block_size(i)); |
390 | result = __fixed_pools_[i].__allocate_in_new_chunk(__res_, block_size, chunk_size_in_bytes); |
391 | } |
392 | return result; |
393 | } |
394 | } |
395 | |
396 | void unsynchronized_pool_resource::do_deallocate(void* p, size_t bytes, size_t align) { |
397 | // Returns the memory at p to the pool. It is unspecified if, |
398 | // or under what circumstances, this operation will result in |
399 | // a call to upstream_resource()->deallocate(). |
400 | |
401 | int i = __pool_index(bytes, align); |
402 | if (i == __num_fixed_pools_) |
403 | return __adhoc_pool_.__do_deallocate(__res_, p, bytes, align); |
404 | else { |
405 | _LIBCPP_ASSERT_NON_NULL( |
406 | __fixed_pools_ != nullptr, "deallocating a block that was not allocated with this allocator" ); |
407 | __fixed_pools_[i].__evacuate(p); |
408 | } |
409 | } |
410 | |
411 | bool synchronized_pool_resource::do_is_equal(const memory_resource& other) const noexcept { return &other == this; } |
412 | |
413 | // 23.12.6, mem.res.monotonic.buffer |
414 | |
415 | static void* align_down(size_t align, size_t size, void*& ptr, size_t& space) { |
416 | if (size > space) |
417 | return nullptr; |
418 | |
419 | char* p1 = static_cast<char*>(ptr); |
420 | char* new_ptr = reinterpret_cast<char*>(reinterpret_cast<uintptr_t>(p1 - size) & ~(align - 1)); |
421 | |
422 | if (new_ptr < (p1 - space)) |
423 | return nullptr; |
424 | |
425 | ptr = new_ptr; |
426 | space -= p1 - new_ptr; |
427 | |
428 | return ptr; |
429 | } |
430 | |
431 | void* monotonic_buffer_resource::__initial_descriptor::__try_allocate_from_chunk(size_t bytes, size_t align) { |
432 | if (!__cur_) |
433 | return nullptr; |
434 | void* new_ptr = static_cast<void*>(__cur_); |
435 | size_t new_capacity = (__cur_ - __start_); |
436 | void* aligned_ptr = align_down(align, bytes, new_ptr, new_capacity); |
437 | if (aligned_ptr != nullptr) |
438 | __cur_ = static_cast<char*>(new_ptr); |
439 | return aligned_ptr; |
440 | } |
441 | |
442 | void* monotonic_buffer_resource::__chunk_footer::__try_allocate_from_chunk(size_t bytes, size_t align) { |
443 | void* new_ptr = static_cast<void*>(__cur_); |
444 | size_t new_capacity = (__cur_ - __start_); |
445 | void* aligned_ptr = align_down(align, bytes, new_ptr, new_capacity); |
446 | if (aligned_ptr != nullptr) |
447 | __cur_ = static_cast<char*>(new_ptr); |
448 | return aligned_ptr; |
449 | } |
450 | |
451 | void* monotonic_buffer_resource::do_allocate(size_t bytes, size_t align) { |
452 | const size_t footer_size = sizeof(__chunk_footer); |
453 | const size_t footer_align = alignof(__chunk_footer); |
454 | |
455 | auto previous_allocation_size = [&]() { |
456 | if (__chunks_ != nullptr) |
457 | return __chunks_->__allocation_size(); |
458 | |
459 | size_t newsize = (__initial_.__start_ != nullptr) ? (__initial_.__end_ - __initial_.__start_) : __initial_.__size_; |
460 | |
461 | return roundup(newsize, footer_align) + footer_size; |
462 | }; |
463 | |
464 | if (void* result = __initial_.__try_allocate_from_chunk(bytes, align)) |
465 | return result; |
466 | if (__chunks_ != nullptr) { |
467 | if (void* result = __chunks_->__try_allocate_from_chunk(bytes, align)) |
468 | return result; |
469 | } |
470 | |
471 | // Allocate a brand-new chunk. |
472 | |
473 | if (align < footer_align) |
474 | align = footer_align; |
475 | |
476 | size_t aligned_capacity = roundup(bytes, footer_align) + footer_size; |
477 | size_t previous_capacity = previous_allocation_size(); |
478 | |
479 | if (aligned_capacity <= previous_capacity) { |
480 | size_t newsize = 2 * (previous_capacity - footer_size); |
481 | aligned_capacity = roundup(newsize, footer_align) + footer_size; |
482 | } |
483 | |
484 | char* start = (char*)__res_->allocate(aligned_capacity, align); |
485 | auto end = start + aligned_capacity - footer_size; |
486 | __chunk_footer* footer = (__chunk_footer*)(end); |
487 | footer->__next_ = __chunks_; |
488 | footer->__start_ = start; |
489 | footer->__cur_ = end; |
490 | footer->__align_ = align; |
491 | __chunks_ = footer; |
492 | |
493 | return __chunks_->__try_allocate_from_chunk(bytes, align); |
494 | } |
495 | |
496 | } // namespace pmr |
497 | |
498 | _LIBCPP_END_NAMESPACE_STD |
499 | |