1 | /* Malloc implementation for multiple threads without lock contention. |
2 | Copyright (C) 1996-2024 Free Software Foundation, Inc. |
3 | Copyright The GNU Toolchain Authors. |
4 | This file is part of the GNU C Library. |
5 | |
6 | The GNU C Library is free software; you can redistribute it and/or |
7 | modify it under the terms of the GNU Lesser General Public License as |
8 | published by the Free Software Foundation; either version 2.1 of the |
9 | License, or (at your option) any later version. |
10 | |
11 | The GNU C Library is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | Lesser General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU Lesser General Public |
17 | License along with the GNU C Library; see the file COPYING.LIB. If |
18 | not, see <https://www.gnu.org/licenses/>. */ |
19 | |
20 | /* |
21 | This is a version (aka ptmalloc2) of malloc/free/realloc written by |
22 | Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger. |
23 | |
24 | There have been substantial changes made after the integration into |
25 | glibc in all parts of the code. Do not look for much commonality |
26 | with the ptmalloc2 version. |
27 | |
28 | * Version ptmalloc2-20011215 |
29 | based on: |
30 | VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) |
31 | |
32 | * Quickstart |
33 | |
34 | In order to compile this implementation, a Makefile is provided with |
35 | the ptmalloc2 distribution, which has pre-defined targets for some |
36 | popular systems (e.g. "make posix" for Posix threads). All that is |
37 | typically required with regard to compiler flags is the selection of |
38 | the thread package via defining one out of USE_PTHREADS, USE_THR or |
39 | USE_SPROC. Check the thread-m.h file for what effects this has. |
40 | Many/most systems will additionally require USE_TSD_DATA_HACK to be |
41 | defined, so this is the default for "make posix". |
42 | |
43 | * Why use this malloc? |
44 | |
45 | This is not the fastest, most space-conserving, most portable, or |
46 | most tunable malloc ever written. However it is among the fastest |
47 | while also being among the most space-conserving, portable and tunable. |
48 | Consistent balance across these factors results in a good general-purpose |
49 | allocator for malloc-intensive programs. |
50 | |
51 | The main properties of the algorithms are: |
52 | * For large (>= 512 bytes) requests, it is a pure best-fit allocator, |
53 | with ties normally decided via FIFO (i.e. least recently used). |
54 | * For small (<= 64 bytes by default) requests, it is a caching |
55 | allocator, that maintains pools of quickly recycled chunks. |
56 | * In between, and for combinations of large and small requests, it does |
57 | the best it can trying to meet both goals at once. |
58 | * For very large requests (>= 128KB by default), it relies on system |
59 | memory mapping facilities, if supported. |
60 | |
61 | For a longer but slightly out of date high-level description, see |
62 | http://gee.cs.oswego.edu/dl/html/malloc.html |
63 | |
64 | You may already by default be using a C library containing a malloc |
65 | that is based on some version of this malloc (for example in |
66 | linux). You might still want to use the one in this file in order to |
67 | customize settings or to avoid overheads associated with library |
68 | versions. |
69 | |
70 | * Contents, described in more detail in "description of public routines" below. |
71 | |
72 | Standard (ANSI/SVID/...) functions: |
73 | malloc(size_t n); |
74 | calloc(size_t n_elements, size_t element_size); |
75 | free(void* p); |
76 | realloc(void* p, size_t n); |
77 | memalign(size_t alignment, size_t n); |
78 | valloc(size_t n); |
79 | mallinfo() |
80 | mallopt(int parameter_number, int parameter_value) |
81 | |
82 | Additional functions: |
83 | independent_calloc(size_t n_elements, size_t size, void* chunks[]); |
84 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); |
85 | pvalloc(size_t n); |
86 | malloc_trim(size_t pad); |
87 | malloc_usable_size(void* p); |
88 | malloc_stats(); |
89 | |
90 | * Vital statistics: |
91 | |
92 | Supported pointer representation: 4 or 8 bytes |
93 | Supported size_t representation: 4 or 8 bytes |
94 | Note that size_t is allowed to be 4 bytes even if pointers are 8. |
95 | You can adjust this by defining INTERNAL_SIZE_T |
96 | |
97 | Alignment: 2 * sizeof(size_t) (default) |
98 | (i.e., 8 byte alignment with 4byte size_t). This suffices for |
99 | nearly all current machines and C compilers. However, you can |
100 | define MALLOC_ALIGNMENT to be wider than this if necessary. |
101 | |
102 | Minimum overhead per allocated chunk: 4 or 8 bytes |
103 | Each malloced chunk has a hidden word of overhead holding size |
104 | and status information. |
105 | |
106 | Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead) |
107 | 8-byte ptrs: 24/32 bytes (including, 4/8 overhead) |
108 | |
109 | When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte |
110 | ptrs but 4 byte size) or 24 (for 8/8) additional bytes are |
111 | needed; 4 (8) for a trailing size field and 8 (16) bytes for |
112 | free list pointers. Thus, the minimum allocatable size is |
113 | 16/24/32 bytes. |
114 | |
115 | Even a request for zero bytes (i.e., malloc(0)) returns a |
116 | pointer to something of the minimum allocatable size. |
117 | |
118 | The maximum overhead wastage (i.e., number of extra bytes |
119 | allocated than were requested in malloc) is less than or equal |
120 | to the minimum size, except for requests >= mmap_threshold that |
121 | are serviced via mmap(), where the worst case wastage is 2 * |
122 | sizeof(size_t) bytes plus the remainder from a system page (the |
123 | minimal mmap unit); typically 4096 or 8192 bytes. |
124 | |
125 | Maximum allocated size: 4-byte size_t: 2^32 minus about two pages |
126 | 8-byte size_t: 2^64 minus about two pages |
127 | |
128 | It is assumed that (possibly signed) size_t values suffice to |
129 | represent chunk sizes. `Possibly signed' is due to the fact |
130 | that `size_t' may be defined on a system as either a signed or |
131 | an unsigned type. The ISO C standard says that it must be |
132 | unsigned, but a few systems are known not to adhere to this. |
133 | Additionally, even when size_t is unsigned, sbrk (which is by |
134 | default used to obtain memory from system) accepts signed |
135 | arguments, and may not be able to handle size_t-wide arguments |
136 | with negative sign bit. Generally, values that would |
137 | appear as negative after accounting for overhead and alignment |
138 | are supported only via mmap(), which does not have this |
139 | limitation. |
140 | |
141 | Requests for sizes outside the allowed range will perform an optional |
142 | failure action and then return null. (Requests may also |
143 | also fail because a system is out of memory.) |
144 | |
145 | Thread-safety: thread-safe |
146 | |
147 | Compliance: I believe it is compliant with the 1997 Single Unix Specification |
148 | Also SVID/XPG, ANSI C, and probably others as well. |
149 | |
150 | * Synopsis of compile-time options: |
151 | |
152 | People have reported using previous versions of this malloc on all |
153 | versions of Unix, sometimes by tweaking some of the defines |
154 | below. It has been tested most extensively on Solaris and Linux. |
155 | People also report using it in stand-alone embedded systems. |
156 | |
157 | The implementation is in straight, hand-tuned ANSI C. It is not |
158 | at all modular. (Sorry!) It uses a lot of macros. To be at all |
159 | usable, this code should be compiled using an optimizing compiler |
160 | (for example gcc -O3) that can simplify expressions and control |
161 | paths. (FAQ: some macros import variables as arguments rather than |
162 | declare locals because people reported that some debuggers |
163 | otherwise get confused.) |
164 | |
165 | OPTION DEFAULT VALUE |
166 | |
167 | Compilation Environment options: |
168 | |
169 | HAVE_MREMAP 0 |
170 | |
171 | Changing default word sizes: |
172 | |
173 | INTERNAL_SIZE_T size_t |
174 | |
175 | Configuration and functionality options: |
176 | |
177 | USE_PUBLIC_MALLOC_WRAPPERS NOT defined |
178 | USE_MALLOC_LOCK NOT defined |
179 | MALLOC_DEBUG NOT defined |
180 | REALLOC_ZERO_BYTES_FREES 1 |
181 | TRIM_FASTBINS 0 |
182 | |
183 | Options for customizing MORECORE: |
184 | |
185 | MORECORE sbrk |
186 | MORECORE_FAILURE -1 |
187 | MORECORE_CONTIGUOUS 1 |
188 | MORECORE_CANNOT_TRIM NOT defined |
189 | MORECORE_CLEARS 1 |
190 | MMAP_AS_MORECORE_SIZE (1024 * 1024) |
191 | |
192 | Tuning options that are also dynamically changeable via mallopt: |
193 | |
194 | DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit) |
195 | DEFAULT_TRIM_THRESHOLD 128 * 1024 |
196 | DEFAULT_TOP_PAD 0 |
197 | DEFAULT_MMAP_THRESHOLD 128 * 1024 |
198 | DEFAULT_MMAP_MAX 65536 |
199 | |
200 | There are several other #defined constants and macros that you |
201 | probably don't want to touch unless you are extending or adapting malloc. */ |
202 | |
203 | /* |
204 | void* is the pointer type that malloc should say it returns |
205 | */ |
206 | |
207 | #ifndef void |
208 | #define void void |
209 | #endif /*void*/ |
210 | |
211 | #include <stddef.h> /* for size_t */ |
212 | #include <stdlib.h> /* for getenv(), abort() */ |
213 | #include <unistd.h> /* for __libc_enable_secure */ |
214 | |
215 | #include <atomic.h> |
216 | #include <_itoa.h> |
217 | #include <bits/wordsize.h> |
218 | #include <sys/sysinfo.h> |
219 | |
220 | #include <ldsodefs.h> |
221 | #include <setvmaname.h> |
222 | |
223 | #include <unistd.h> |
224 | #include <stdio.h> /* needed for malloc_stats */ |
225 | #include <errno.h> |
226 | #include <assert.h> |
227 | |
228 | #include <shlib-compat.h> |
229 | |
230 | /* For uintptr_t. */ |
231 | #include <stdint.h> |
232 | |
233 | /* For va_arg, va_start, va_end. */ |
234 | #include <stdarg.h> |
235 | |
236 | /* For MIN, MAX, powerof2. */ |
237 | #include <sys/param.h> |
238 | |
239 | /* For ALIGN_UP et. al. */ |
240 | #include <libc-pointer-arith.h> |
241 | |
242 | /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */ |
243 | #include <libc-diag.h> |
244 | |
245 | /* For memory tagging. */ |
246 | #include <libc-mtag.h> |
247 | |
248 | #include <malloc/malloc-internal.h> |
249 | |
250 | /* For SINGLE_THREAD_P. */ |
251 | #include <sysdep-cancel.h> |
252 | |
253 | #include <libc-internal.h> |
254 | |
255 | /* For tcache double-free check. */ |
256 | #include <random-bits.h> |
257 | #include <sys/random.h> |
258 | #include <not-cancel.h> |
259 | |
260 | /* |
261 | Debugging: |
262 | |
263 | Because freed chunks may be overwritten with bookkeeping fields, this |
264 | malloc will often die when freed memory is overwritten by user |
265 | programs. This can be very effective (albeit in an annoying way) |
266 | in helping track down dangling pointers. |
267 | |
268 | If you compile with -DMALLOC_DEBUG, a number of assertion checks are |
269 | enabled that will catch more memory errors. You probably won't be |
270 | able to make much sense of the actual assertion errors, but they |
271 | should help you locate incorrectly overwritten memory. The checking |
272 | is fairly extensive, and will slow down execution |
273 | noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set |
274 | will attempt to check every non-mmapped allocated and free chunk in |
275 | the course of computing the summaries. (By nature, mmapped regions |
276 | cannot be checked very much automatically.) |
277 | |
278 | Setting MALLOC_DEBUG may also be helpful if you are trying to modify |
279 | this code. The assertions in the check routines spell out in more |
280 | detail the assumptions and invariants underlying the algorithms. |
281 | |
282 | Setting MALLOC_DEBUG does NOT provide an automated mechanism for |
283 | checking that all accesses to malloced memory stay within their |
284 | bounds. However, there are several add-ons and adaptations of this |
285 | or other mallocs available that do this. |
286 | */ |
287 | |
288 | #ifndef MALLOC_DEBUG |
289 | #define MALLOC_DEBUG 0 |
290 | #endif |
291 | |
292 | #if USE_TCACHE |
293 | /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */ |
294 | # define TCACHE_MAX_BINS 64 |
295 | # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1) |
296 | |
297 | /* Only used to pre-fill the tunables. */ |
298 | # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ) |
299 | |
300 | /* When "x" is from chunksize(). */ |
301 | # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT) |
302 | /* When "x" is a user-provided size. */ |
303 | # define usize2tidx(x) csize2tidx (request2size (x)) |
304 | |
305 | /* With rounding and alignment, the bins are... |
306 | idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit) |
307 | idx 1 bytes 25..40 or 13..20 |
308 | idx 2 bytes 41..56 or 21..28 |
309 | etc. */ |
310 | |
311 | /* This is another arbitrary limit, which tunables can change. Each |
312 | tcache bin will hold at most this number of chunks. */ |
313 | # define TCACHE_FILL_COUNT 7 |
314 | |
315 | /* Maximum chunks in tcache bins for tunables. This value must fit the range |
316 | of tcache->counts[] entries, else they may overflow. */ |
317 | # define MAX_TCACHE_COUNT UINT16_MAX |
318 | #endif |
319 | |
320 | /* Safe-Linking: |
321 | Use randomness from ASLR (mmap_base) to protect single-linked lists |
322 | of Fast-Bins and TCache. That is, mask the "next" pointers of the |
323 | lists' chunks, and also perform allocation alignment checks on them. |
324 | This mechanism reduces the risk of pointer hijacking, as was done with |
325 | Safe-Unlinking in the double-linked lists of Small-Bins. |
326 | It assumes a minimum page size of 4096 bytes (12 bits). Systems with |
327 | larger pages provide less entropy, although the pointer mangling |
328 | still works. */ |
329 | #define PROTECT_PTR(pos, ptr) \ |
330 | ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr))) |
331 | #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr) |
332 | |
333 | /* |
334 | The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0) |
335 | when p is nonnull. If the macro is nonzero, the realloc call returns NULL; |
336 | otherwise, the call returns what malloc (0) would. In either case, |
337 | p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which |
338 | implements common historical practice. |
339 | |
340 | ISO C17 says the realloc call has implementation-defined behavior, |
341 | and it might not even free p. |
342 | */ |
343 | |
344 | #ifndef REALLOC_ZERO_BYTES_FREES |
345 | #define REALLOC_ZERO_BYTES_FREES 1 |
346 | #endif |
347 | |
348 | /* |
349 | TRIM_FASTBINS controls whether free() of a very small chunk can |
350 | immediately lead to trimming. Setting to true (1) can reduce memory |
351 | footprint, but will almost always slow down programs that use a lot |
352 | of small chunks. |
353 | |
354 | Define this only if you are willing to give up some speed to more |
355 | aggressively reduce system-level memory footprint when releasing |
356 | memory in programs that use many small chunks. You can get |
357 | essentially the same effect by setting MXFAST to 0, but this can |
358 | lead to even greater slowdowns in programs using many small chunks. |
359 | TRIM_FASTBINS is an in-between compile-time option, that disables |
360 | only those chunks bordering topmost memory from being placed in |
361 | fastbins. |
362 | */ |
363 | |
364 | #ifndef TRIM_FASTBINS |
365 | #define TRIM_FASTBINS 0 |
366 | #endif |
367 | |
368 | /* Definition for getting more memory from the OS. */ |
369 | #include "morecore.c" |
370 | |
371 | #define MORECORE (*__glibc_morecore) |
372 | #define MORECORE_FAILURE 0 |
373 | |
374 | /* Memory tagging. */ |
375 | |
376 | /* Some systems support the concept of tagging (sometimes known as |
377 | coloring) memory locations on a fine grained basis. Each memory |
378 | location is given a color (normally allocated randomly) and |
379 | pointers are also colored. When the pointer is dereferenced, the |
380 | pointer's color is checked against the memory's color and if they |
381 | differ the access is faulted (sometimes lazily). |
382 | |
383 | We use this in glibc by maintaining a single color for the malloc |
384 | data structures that are interleaved with the user data and then |
385 | assigning separate colors for each block allocation handed out. In |
386 | this way simple buffer overruns will be rapidly detected. When |
387 | memory is freed, the memory is recolored back to the glibc default |
388 | so that simple use-after-free errors can also be detected. |
389 | |
390 | If memory is reallocated the buffer is recolored even if the |
391 | address remains the same. This has a performance impact, but |
392 | guarantees that the old pointer cannot mistakenly be reused (code |
393 | that compares old against new will see a mismatch and will then |
394 | need to behave as though realloc moved the data to a new location). |
395 | |
396 | Internal API for memory tagging support. |
397 | |
398 | The aim is to keep the code for memory tagging support as close to |
399 | the normal APIs in glibc as possible, so that if tagging is not |
400 | enabled in the library, or is disabled at runtime then standard |
401 | operations can continue to be used. Support macros are used to do |
402 | this: |
403 | |
404 | void *tag_new_zero_region (void *ptr, size_t size) |
405 | |
406 | Allocates a new tag, colors the memory with that tag, zeros the |
407 | memory and returns a pointer that is correctly colored for that |
408 | location. The non-tagging version will simply call memset with 0. |
409 | |
410 | void *tag_region (void *ptr, size_t size) |
411 | |
412 | Color the region of memory pointed to by PTR and size SIZE with |
413 | the color of PTR. Returns the original pointer. |
414 | |
415 | void *tag_new_usable (void *ptr) |
416 | |
417 | Allocate a new random color and use it to color the user region of |
418 | a chunk; this may include data from the subsequent chunk's header |
419 | if tagging is sufficiently fine grained. Returns PTR suitably |
420 | recolored for accessing the memory there. |
421 | |
422 | void *tag_at (void *ptr) |
423 | |
424 | Read the current color of the memory at the address pointed to by |
425 | PTR (ignoring it's current color) and return PTR recolored to that |
426 | color. PTR must be valid address in all other respects. When |
427 | tagging is not enabled, it simply returns the original pointer. |
428 | */ |
429 | |
430 | #ifdef USE_MTAG |
431 | static bool mtag_enabled = false; |
432 | static int mtag_mmap_flags = 0; |
433 | #else |
434 | # define mtag_enabled false |
435 | # define mtag_mmap_flags 0 |
436 | #endif |
437 | |
438 | static __always_inline void * |
439 | tag_region (void *ptr, size_t size) |
440 | { |
441 | if (__glibc_unlikely (mtag_enabled)) |
442 | return __libc_mtag_tag_region (p: ptr, n: size); |
443 | return ptr; |
444 | } |
445 | |
446 | static __always_inline void * |
447 | tag_new_zero_region (void *ptr, size_t size) |
448 | { |
449 | if (__glibc_unlikely (mtag_enabled)) |
450 | return __libc_mtag_tag_zero_region (p: __libc_mtag_new_tag (p: ptr), n: size); |
451 | return memset (s: ptr, c: 0, n: size); |
452 | } |
453 | |
454 | /* Defined later. */ |
455 | static void * |
456 | tag_new_usable (void *ptr); |
457 | |
458 | static __always_inline void * |
459 | tag_at (void *ptr) |
460 | { |
461 | if (__glibc_unlikely (mtag_enabled)) |
462 | return __libc_mtag_address_get_tag (p: ptr); |
463 | return ptr; |
464 | } |
465 | |
466 | #include <string.h> |
467 | |
468 | /* |
469 | MORECORE-related declarations. By default, rely on sbrk |
470 | */ |
471 | |
472 | |
473 | /* |
474 | MORECORE is the name of the routine to call to obtain more memory |
475 | from the system. See below for general guidance on writing |
476 | alternative MORECORE functions, as well as a version for WIN32 and a |
477 | sample version for pre-OSX macos. |
478 | */ |
479 | |
480 | #ifndef MORECORE |
481 | #define MORECORE sbrk |
482 | #endif |
483 | |
484 | /* |
485 | MORECORE_FAILURE is the value returned upon failure of MORECORE |
486 | as well as mmap. Since it cannot be an otherwise valid memory address, |
487 | and must reflect values of standard sys calls, you probably ought not |
488 | try to redefine it. |
489 | */ |
490 | |
491 | #ifndef MORECORE_FAILURE |
492 | #define MORECORE_FAILURE (-1) |
493 | #endif |
494 | |
495 | /* |
496 | If MORECORE_CONTIGUOUS is true, take advantage of fact that |
497 | consecutive calls to MORECORE with positive arguments always return |
498 | contiguous increasing addresses. This is true of unix sbrk. Even |
499 | if not defined, when regions happen to be contiguous, malloc will |
500 | permit allocations spanning regions obtained from different |
501 | calls. But defining this when applicable enables some stronger |
502 | consistency checks and space efficiencies. |
503 | */ |
504 | |
505 | #ifndef MORECORE_CONTIGUOUS |
506 | #define MORECORE_CONTIGUOUS 1 |
507 | #endif |
508 | |
509 | /* |
510 | Define MORECORE_CANNOT_TRIM if your version of MORECORE |
511 | cannot release space back to the system when given negative |
512 | arguments. This is generally necessary only if you are using |
513 | a hand-crafted MORECORE function that cannot handle negative arguments. |
514 | */ |
515 | |
516 | /* #define MORECORE_CANNOT_TRIM */ |
517 | |
518 | /* MORECORE_CLEARS (default 1) |
519 | The degree to which the routine mapped to MORECORE zeroes out |
520 | memory: never (0), only for newly allocated space (1) or always |
521 | (2). The distinction between (1) and (2) is necessary because on |
522 | some systems, if the application first decrements and then |
523 | increments the break value, the contents of the reallocated space |
524 | are unspecified. |
525 | */ |
526 | |
527 | #ifndef MORECORE_CLEARS |
528 | # define MORECORE_CLEARS 1 |
529 | #endif |
530 | |
531 | |
532 | /* |
533 | MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if |
534 | sbrk fails, and mmap is used as a backup. The value must be a |
535 | multiple of page size. This backup strategy generally applies only |
536 | when systems have "holes" in address space, so sbrk cannot perform |
537 | contiguous expansion, but there is still space available on system. |
538 | On systems for which this is known to be useful (i.e. most linux |
539 | kernels), this occurs only when programs allocate huge amounts of |
540 | memory. Between this, and the fact that mmap regions tend to be |
541 | limited, the size should be large, to avoid too many mmap calls and |
542 | thus avoid running out of kernel resources. */ |
543 | |
544 | #ifndef MMAP_AS_MORECORE_SIZE |
545 | #define MMAP_AS_MORECORE_SIZE (1024 * 1024) |
546 | #endif |
547 | |
548 | /* |
549 | Define HAVE_MREMAP to make realloc() use mremap() to re-allocate |
550 | large blocks. |
551 | */ |
552 | |
553 | #ifndef HAVE_MREMAP |
554 | #define HAVE_MREMAP 0 |
555 | #endif |
556 | |
557 | /* |
558 | This version of malloc supports the standard SVID/XPG mallinfo |
559 | routine that returns a struct containing usage properties and |
560 | statistics. It should work on any SVID/XPG compliant system that has |
561 | a /usr/include/malloc.h defining struct mallinfo. (If you'd like to |
562 | install such a thing yourself, cut out the preliminary declarations |
563 | as described above and below and save them in a malloc.h file. But |
564 | there's no compelling reason to bother to do this.) |
565 | |
566 | The main declaration needed is the mallinfo struct that is returned |
567 | (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a |
568 | bunch of fields that are not even meaningful in this version of |
569 | malloc. These fields are are instead filled by mallinfo() with |
570 | other numbers that might be of interest. |
571 | */ |
572 | |
573 | |
574 | /* ---------- description of public routines ------------ */ |
575 | |
576 | #if IS_IN (libc) |
577 | /* |
578 | malloc(size_t n) |
579 | Returns a pointer to a newly allocated chunk of at least n bytes, or null |
580 | if no space is available. Additionally, on failure, errno is |
581 | set to ENOMEM on ANSI C systems. |
582 | |
583 | If n is zero, malloc returns a minimum-sized chunk. (The minimum |
584 | size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit |
585 | systems.) On most systems, size_t is an unsigned type, so calls |
586 | with negative arguments are interpreted as requests for huge amounts |
587 | of space, which will often fail. The maximum supported value of n |
588 | differs across systems, but is in all cases less than the maximum |
589 | representable value of a size_t. |
590 | */ |
591 | void* __libc_malloc(size_t); |
592 | libc_hidden_proto (__libc_malloc) |
593 | |
594 | /* |
595 | free(void* p) |
596 | Releases the chunk of memory pointed to by p, that had been previously |
597 | allocated using malloc or a related routine such as realloc. |
598 | It has no effect if p is null. It can have arbitrary (i.e., bad!) |
599 | effects if p has already been freed. |
600 | |
601 | Unless disabled (using mallopt), freeing very large spaces will |
602 | when possible, automatically trigger operations that give |
603 | back unused memory to the system, thus reducing program footprint. |
604 | */ |
605 | void __libc_free(void*); |
606 | libc_hidden_proto (__libc_free) |
607 | |
608 | /* |
609 | calloc(size_t n_elements, size_t element_size); |
610 | Returns a pointer to n_elements * element_size bytes, with all locations |
611 | set to zero. |
612 | */ |
613 | void* __libc_calloc(size_t, size_t); |
614 | |
615 | /* |
616 | realloc(void* p, size_t n) |
617 | Returns a pointer to a chunk of size n that contains the same data |
618 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
619 | if no space is available. |
620 | |
621 | The returned pointer may or may not be the same as p. The algorithm |
622 | prefers extending p when possible, otherwise it employs the |
623 | equivalent of a malloc-copy-free sequence. |
624 | |
625 | If p is null, realloc is equivalent to malloc. |
626 | |
627 | If space is not available, realloc returns null, errno is set (if on |
628 | ANSI) and p is NOT freed. |
629 | |
630 | if n is for fewer bytes than already held by p, the newly unused |
631 | space is lopped off and freed if possible. Unless the #define |
632 | REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of |
633 | zero (re)allocates a minimum-sized chunk. |
634 | |
635 | Large chunks that were internally obtained via mmap will always be |
636 | grown using malloc-copy-free sequences unless the system supports |
637 | MREMAP (currently only linux). |
638 | |
639 | The old unix realloc convention of allowing the last-free'd chunk |
640 | to be used as an argument to realloc is not supported. |
641 | */ |
642 | void* __libc_realloc(void*, size_t); |
643 | libc_hidden_proto (__libc_realloc) |
644 | |
645 | /* |
646 | memalign(size_t alignment, size_t n); |
647 | Returns a pointer to a newly allocated chunk of n bytes, aligned |
648 | in accord with the alignment argument. |
649 | |
650 | The alignment argument should be a power of two. If the argument is |
651 | not a power of two, the nearest greater power is used. |
652 | 8-byte alignment is guaranteed by normal malloc calls, so don't |
653 | bother calling memalign with an argument of 8 or less. |
654 | |
655 | Overreliance on memalign is a sure way to fragment space. |
656 | */ |
657 | void* __libc_memalign(size_t, size_t); |
658 | libc_hidden_proto (__libc_memalign) |
659 | |
660 | /* |
661 | valloc(size_t n); |
662 | Equivalent to memalign(pagesize, n), where pagesize is the page |
663 | size of the system. If the pagesize is unknown, 4096 is used. |
664 | */ |
665 | void* __libc_valloc(size_t); |
666 | |
667 | |
668 | |
669 | /* |
670 | mallinfo() |
671 | Returns (by copy) a struct containing various summary statistics: |
672 | |
673 | arena: current total non-mmapped bytes allocated from system |
674 | ordblks: the number of free chunks |
675 | smblks: the number of fastbin blocks (i.e., small chunks that |
676 | have been freed but not reused or consolidated) |
677 | hblks: current number of mmapped regions |
678 | hblkhd: total bytes held in mmapped regions |
679 | usmblks: always 0 |
680 | fsmblks: total bytes held in fastbin blocks |
681 | uordblks: current total allocated space (normal or mmapped) |
682 | fordblks: total free space |
683 | keepcost: the maximum number of bytes that could ideally be released |
684 | back to system via malloc_trim. ("ideally" means that |
685 | it ignores page restrictions etc.) |
686 | |
687 | Because these fields are ints, but internal bookkeeping may |
688 | be kept as longs, the reported values may wrap around zero and |
689 | thus be inaccurate. |
690 | */ |
691 | struct mallinfo2 __libc_mallinfo2(void); |
692 | libc_hidden_proto (__libc_mallinfo2) |
693 | |
694 | struct mallinfo __libc_mallinfo(void); |
695 | |
696 | |
697 | /* |
698 | pvalloc(size_t n); |
699 | Equivalent to valloc(minimum-page-that-holds(n)), that is, |
700 | round up n to nearest pagesize. |
701 | */ |
702 | void* __libc_pvalloc(size_t); |
703 | |
704 | /* |
705 | malloc_trim(size_t pad); |
706 | |
707 | If possible, gives memory back to the system (via negative |
708 | arguments to sbrk) if there is unused memory at the `high' end of |
709 | the malloc pool. You can call this after freeing large blocks of |
710 | memory to potentially reduce the system-level memory requirements |
711 | of a program. However, it cannot guarantee to reduce memory. Under |
712 | some allocation patterns, some large free blocks of memory will be |
713 | locked between two used chunks, so they cannot be given back to |
714 | the system. |
715 | |
716 | The `pad' argument to malloc_trim represents the amount of free |
717 | trailing space to leave untrimmed. If this argument is zero, |
718 | only the minimum amount of memory to maintain internal data |
719 | structures will be left (one page or less). Non-zero arguments |
720 | can be supplied to maintain enough trailing space to service |
721 | future expected allocations without having to re-obtain memory |
722 | from the system. |
723 | |
724 | Malloc_trim returns 1 if it actually released any memory, else 0. |
725 | On systems that do not support "negative sbrks", it will always |
726 | return 0. |
727 | */ |
728 | int __malloc_trim(size_t); |
729 | |
730 | /* |
731 | malloc_usable_size(void* p); |
732 | |
733 | Returns the number of bytes you can actually use in |
734 | an allocated chunk, which may be more than you requested (although |
735 | often not) due to alignment and minimum size constraints. |
736 | You can use this many bytes without worrying about |
737 | overwriting other allocated objects. This is not a particularly great |
738 | programming practice. malloc_usable_size can be more useful in |
739 | debugging and assertions, for example: |
740 | |
741 | p = malloc(n); |
742 | assert(malloc_usable_size(p) >= 256); |
743 | |
744 | */ |
745 | size_t __malloc_usable_size(void*); |
746 | |
747 | /* |
748 | malloc_stats(); |
749 | Prints on stderr the amount of space obtained from the system (both |
750 | via sbrk and mmap), the maximum amount (which may be more than |
751 | current if malloc_trim and/or munmap got called), and the current |
752 | number of bytes allocated via malloc (or realloc, etc) but not yet |
753 | freed. Note that this is the number of bytes allocated, not the |
754 | number requested. It will be larger than the number requested |
755 | because of alignment and bookkeeping overhead. Because it includes |
756 | alignment wastage as being in use, this figure may be greater than |
757 | zero even when no user-level chunks are allocated. |
758 | |
759 | The reported current and maximum system memory can be inaccurate if |
760 | a program makes other calls to system memory allocation functions |
761 | (normally sbrk) outside of malloc. |
762 | |
763 | malloc_stats prints only the most commonly interesting statistics. |
764 | More information can be obtained by calling mallinfo. |
765 | |
766 | */ |
767 | void __malloc_stats(void); |
768 | |
769 | /* |
770 | posix_memalign(void **memptr, size_t alignment, size_t size); |
771 | |
772 | POSIX wrapper like memalign(), checking for validity of size. |
773 | */ |
774 | int __posix_memalign(void **, size_t, size_t); |
775 | #endif /* IS_IN (libc) */ |
776 | |
777 | /* |
778 | mallopt(int parameter_number, int parameter_value) |
779 | Sets tunable parameters The format is to provide a |
780 | (parameter-number, parameter-value) pair. mallopt then sets the |
781 | corresponding parameter to the argument value if it can (i.e., so |
782 | long as the value is meaningful), and returns 1 if successful else |
783 | 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, |
784 | normally defined in malloc.h. Only one of these (M_MXFAST) is used |
785 | in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply, |
786 | so setting them has no effect. But this malloc also supports four |
787 | other options in mallopt. See below for details. Briefly, supported |
788 | parameters are as follows (listed defaults are for "typical" |
789 | configurations). |
790 | |
791 | Symbol param # default allowed param values |
792 | M_MXFAST 1 64 0-80 (0 disables fastbins) |
793 | M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming) |
794 | M_TOP_PAD -2 0 any |
795 | M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support) |
796 | M_MMAP_MAX -4 65536 any (0 disables use of mmap) |
797 | */ |
798 | int __libc_mallopt(int, int); |
799 | #if IS_IN (libc) |
800 | libc_hidden_proto (__libc_mallopt) |
801 | #endif |
802 | |
803 | /* mallopt tuning options */ |
804 | |
805 | /* |
806 | M_MXFAST is the maximum request size used for "fastbins", special bins |
807 | that hold returned chunks without consolidating their spaces. This |
808 | enables future requests for chunks of the same size to be handled |
809 | very quickly, but can increase fragmentation, and thus increase the |
810 | overall memory footprint of a program. |
811 | |
812 | This malloc manages fastbins very conservatively yet still |
813 | efficiently, so fragmentation is rarely a problem for values less |
814 | than or equal to the default. The maximum supported value of MXFAST |
815 | is 80. You wouldn't want it any higher than this anyway. Fastbins |
816 | are designed especially for use with many small structs, objects or |
817 | strings -- the default handles structs/objects/arrays with sizes up |
818 | to 8 4byte fields, or small strings representing words, tokens, |
819 | etc. Using fastbins for larger objects normally worsens |
820 | fragmentation without improving speed. |
821 | |
822 | M_MXFAST is set in REQUEST size units. It is internally used in |
823 | chunksize units, which adds padding and alignment. You can reduce |
824 | M_MXFAST to 0 to disable all use of fastbins. This causes the malloc |
825 | algorithm to be a closer approximation of fifo-best-fit in all cases, |
826 | not just for larger requests, but will generally cause it to be |
827 | slower. |
828 | */ |
829 | |
830 | |
831 | /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */ |
832 | #ifndef M_MXFAST |
833 | #define M_MXFAST 1 |
834 | #endif |
835 | |
836 | #ifndef DEFAULT_MXFAST |
837 | #define DEFAULT_MXFAST (64 * SIZE_SZ / 4) |
838 | #endif |
839 | |
840 | |
841 | /* |
842 | M_TRIM_THRESHOLD is the maximum amount of unused top-most memory |
843 | to keep before releasing via malloc_trim in free(). |
844 | |
845 | Automatic trimming is mainly useful in long-lived programs. |
846 | Because trimming via sbrk can be slow on some systems, and can |
847 | sometimes be wasteful (in cases where programs immediately |
848 | afterward allocate more large chunks) the value should be high |
849 | enough so that your overall system performance would improve by |
850 | releasing this much memory. |
851 | |
852 | The trim threshold and the mmap control parameters (see below) |
853 | can be traded off with one another. Trimming and mmapping are |
854 | two different ways of releasing unused memory back to the |
855 | system. Between these two, it is often possible to keep |
856 | system-level demands of a long-lived program down to a bare |
857 | minimum. For example, in one test suite of sessions measuring |
858 | the XF86 X server on Linux, using a trim threshold of 128K and a |
859 | mmap threshold of 192K led to near-minimal long term resource |
860 | consumption. |
861 | |
862 | If you are using this malloc in a long-lived program, it should |
863 | pay to experiment with these values. As a rough guide, you |
864 | might set to a value close to the average size of a process |
865 | (program) running on your system. Releasing this much memory |
866 | would allow such a process to run in memory. Generally, it's |
867 | worth it to tune for trimming rather tham memory mapping when a |
868 | program undergoes phases where several large chunks are |
869 | allocated and released in ways that can reuse each other's |
870 | storage, perhaps mixed with phases where there are no such |
871 | chunks at all. And in well-behaved long-lived programs, |
872 | controlling release of large blocks via trimming versus mapping |
873 | is usually faster. |
874 | |
875 | However, in most programs, these parameters serve mainly as |
876 | protection against the system-level effects of carrying around |
877 | massive amounts of unneeded memory. Since frequent calls to |
878 | sbrk, mmap, and munmap otherwise degrade performance, the default |
879 | parameters are set to relatively high values that serve only as |
880 | safeguards. |
881 | |
882 | The trim value It must be greater than page size to have any useful |
883 | effect. To disable trimming completely, you can set to |
884 | (unsigned long)(-1) |
885 | |
886 | Trim settings interact with fastbin (MXFAST) settings: Unless |
887 | TRIM_FASTBINS is defined, automatic trimming never takes place upon |
888 | freeing a chunk with size less than or equal to MXFAST. Trimming is |
889 | instead delayed until subsequent freeing of larger chunks. However, |
890 | you can still force an attempted trim by calling malloc_trim. |
891 | |
892 | Also, trimming is not generally possible in cases where |
893 | the main arena is obtained via mmap. |
894 | |
895 | Note that the trick some people use of mallocing a huge space and |
896 | then freeing it at program startup, in an attempt to reserve system |
897 | memory, doesn't have the intended effect under automatic trimming, |
898 | since that memory will immediately be returned to the system. |
899 | */ |
900 | |
901 | #define M_TRIM_THRESHOLD -1 |
902 | |
903 | #ifndef DEFAULT_TRIM_THRESHOLD |
904 | #define DEFAULT_TRIM_THRESHOLD (128 * 1024) |
905 | #endif |
906 | |
907 | /* |
908 | M_TOP_PAD is the amount of extra `padding' space to allocate or |
909 | retain whenever sbrk is called. It is used in two ways internally: |
910 | |
911 | * When sbrk is called to extend the top of the arena to satisfy |
912 | a new malloc request, this much padding is added to the sbrk |
913 | request. |
914 | |
915 | * When malloc_trim is called automatically from free(), |
916 | it is used as the `pad' argument. |
917 | |
918 | In both cases, the actual amount of padding is rounded |
919 | so that the end of the arena is always a system page boundary. |
920 | |
921 | The main reason for using padding is to avoid calling sbrk so |
922 | often. Having even a small pad greatly reduces the likelihood |
923 | that nearly every malloc request during program start-up (or |
924 | after trimming) will invoke sbrk, which needlessly wastes |
925 | time. |
926 | |
927 | Automatic rounding-up to page-size units is normally sufficient |
928 | to avoid measurable overhead, so the default is 0. However, in |
929 | systems where sbrk is relatively slow, it can pay to increase |
930 | this value, at the expense of carrying around more memory than |
931 | the program needs. |
932 | */ |
933 | |
934 | #define M_TOP_PAD -2 |
935 | |
936 | #ifndef DEFAULT_TOP_PAD |
937 | #define DEFAULT_TOP_PAD (0) |
938 | #endif |
939 | |
940 | /* |
941 | MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically |
942 | adjusted MMAP_THRESHOLD. |
943 | */ |
944 | |
945 | #ifndef DEFAULT_MMAP_THRESHOLD_MIN |
946 | #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024) |
947 | #endif |
948 | |
949 | #ifndef DEFAULT_MMAP_THRESHOLD_MAX |
950 | /* For 32-bit platforms we cannot increase the maximum mmap |
951 | threshold much because it is also the minimum value for the |
952 | maximum heap size and its alignment. Going above 512k (i.e., 1M |
953 | for new heaps) wastes too much address space. */ |
954 | # if __WORDSIZE == 32 |
955 | # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024) |
956 | # else |
957 | # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long)) |
958 | # endif |
959 | #endif |
960 | |
961 | /* |
962 | M_MMAP_THRESHOLD is the request size threshold for using mmap() |
963 | to service a request. Requests of at least this size that cannot |
964 | be allocated using already-existing space will be serviced via mmap. |
965 | (If enough normal freed space already exists it is used instead.) |
966 | |
967 | Using mmap segregates relatively large chunks of memory so that |
968 | they can be individually obtained and released from the host |
969 | system. A request serviced through mmap is never reused by any |
970 | other request (at least not directly; the system may just so |
971 | happen to remap successive requests to the same locations). |
972 | |
973 | Segregating space in this way has the benefits that: |
974 | |
975 | 1. Mmapped space can ALWAYS be individually released back |
976 | to the system, which helps keep the system level memory |
977 | demands of a long-lived program low. |
978 | 2. Mapped memory can never become `locked' between |
979 | other chunks, as can happen with normally allocated chunks, which |
980 | means that even trimming via malloc_trim would not release them. |
981 | 3. On some systems with "holes" in address spaces, mmap can obtain |
982 | memory that sbrk cannot. |
983 | |
984 | However, it has the disadvantages that: |
985 | |
986 | 1. The space cannot be reclaimed, consolidated, and then |
987 | used to service later requests, as happens with normal chunks. |
988 | 2. It can lead to more wastage because of mmap page alignment |
989 | requirements |
990 | 3. It causes malloc performance to be more dependent on host |
991 | system memory management support routines which may vary in |
992 | implementation quality and may impose arbitrary |
993 | limitations. Generally, servicing a request via normal |
994 | malloc steps is faster than going through a system's mmap. |
995 | |
996 | The advantages of mmap nearly always outweigh disadvantages for |
997 | "large" chunks, but the value of "large" varies across systems. The |
998 | default is an empirically derived value that works well in most |
999 | systems. |
1000 | |
1001 | |
1002 | Update in 2006: |
1003 | The above was written in 2001. Since then the world has changed a lot. |
1004 | Memory got bigger. Applications got bigger. The virtual address space |
1005 | layout in 32 bit linux changed. |
1006 | |
1007 | In the new situation, brk() and mmap space is shared and there are no |
1008 | artificial limits on brk size imposed by the kernel. What is more, |
1009 | applications have started using transient allocations larger than the |
1010 | 128Kb as was imagined in 2001. |
1011 | |
1012 | The price for mmap is also high now; each time glibc mmaps from the |
1013 | kernel, the kernel is forced to zero out the memory it gives to the |
1014 | application. Zeroing memory is expensive and eats a lot of cache and |
1015 | memory bandwidth. This has nothing to do with the efficiency of the |
1016 | virtual memory system, by doing mmap the kernel just has no choice but |
1017 | to zero. |
1018 | |
1019 | In 2001, the kernel had a maximum size for brk() which was about 800 |
1020 | megabytes on 32 bit x86, at that point brk() would hit the first |
1021 | mmaped shared libraries and couldn't expand anymore. With current 2.6 |
1022 | kernels, the VA space layout is different and brk() and mmap |
1023 | both can span the entire heap at will. |
1024 | |
1025 | Rather than using a static threshold for the brk/mmap tradeoff, |
1026 | we are now using a simple dynamic one. The goal is still to avoid |
1027 | fragmentation. The old goals we kept are |
1028 | 1) try to get the long lived large allocations to use mmap() |
1029 | 2) really large allocations should always use mmap() |
1030 | and we're adding now: |
1031 | 3) transient allocations should use brk() to avoid forcing the kernel |
1032 | having to zero memory over and over again |
1033 | |
1034 | The implementation works with a sliding threshold, which is by default |
1035 | limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts |
1036 | out at 128Kb as per the 2001 default. |
1037 | |
1038 | This allows us to satisfy requirement 1) under the assumption that long |
1039 | lived allocations are made early in the process' lifespan, before it has |
1040 | started doing dynamic allocations of the same size (which will |
1041 | increase the threshold). |
1042 | |
1043 | The upperbound on the threshold satisfies requirement 2) |
1044 | |
1045 | The threshold goes up in value when the application frees memory that was |
1046 | allocated with the mmap allocator. The idea is that once the application |
1047 | starts freeing memory of a certain size, it's highly probable that this is |
1048 | a size the application uses for transient allocations. This estimator |
1049 | is there to satisfy the new third requirement. |
1050 | |
1051 | */ |
1052 | |
1053 | #define M_MMAP_THRESHOLD -3 |
1054 | |
1055 | #ifndef DEFAULT_MMAP_THRESHOLD |
1056 | #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN |
1057 | #endif |
1058 | |
1059 | /* |
1060 | M_MMAP_MAX is the maximum number of requests to simultaneously |
1061 | service using mmap. This parameter exists because |
1062 | some systems have a limited number of internal tables for |
1063 | use by mmap, and using more than a few of them may degrade |
1064 | performance. |
1065 | |
1066 | The default is set to a value that serves only as a safeguard. |
1067 | Setting to 0 disables use of mmap for servicing large requests. |
1068 | */ |
1069 | |
1070 | #define M_MMAP_MAX -4 |
1071 | |
1072 | #ifndef DEFAULT_MMAP_MAX |
1073 | #define DEFAULT_MMAP_MAX (65536) |
1074 | #endif |
1075 | |
1076 | #include <malloc.h> |
1077 | |
1078 | #ifndef RETURN_ADDRESS |
1079 | #define RETURN_ADDRESS(X_) (NULL) |
1080 | #endif |
1081 | |
1082 | /* Forward declarations. */ |
1083 | struct malloc_chunk; |
1084 | typedef struct malloc_chunk* mchunkptr; |
1085 | |
1086 | /* Internal routines. */ |
1087 | |
1088 | static void* _int_malloc(mstate, size_t); |
1089 | static void _int_free(mstate, mchunkptr, int); |
1090 | static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T); |
1091 | static INTERNAL_SIZE_T _int_free_create_chunk (mstate, |
1092 | mchunkptr, INTERNAL_SIZE_T, |
1093 | mchunkptr, INTERNAL_SIZE_T); |
1094 | static void _int_free_maybe_consolidate (mstate, INTERNAL_SIZE_T); |
1095 | static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T, |
1096 | INTERNAL_SIZE_T); |
1097 | static void* _int_memalign(mstate, size_t, size_t); |
1098 | #if IS_IN (libc) |
1099 | static void* _mid_memalign(size_t, size_t, void *); |
1100 | #endif |
1101 | |
1102 | static void malloc_printerr(const char *str) __attribute__ ((noreturn)); |
1103 | |
1104 | static void munmap_chunk(mchunkptr p); |
1105 | #if HAVE_MREMAP |
1106 | static mchunkptr mremap_chunk(mchunkptr p, size_t new_size); |
1107 | #endif |
1108 | |
1109 | static size_t musable (void *mem); |
1110 | |
1111 | /* ------------------ MMAP support ------------------ */ |
1112 | |
1113 | |
1114 | #include <fcntl.h> |
1115 | #include <sys/mman.h> |
1116 | |
1117 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
1118 | # define MAP_ANONYMOUS MAP_ANON |
1119 | #endif |
1120 | |
1121 | #define MMAP(addr, size, prot, flags) \ |
1122 | __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0) |
1123 | |
1124 | |
1125 | /* |
1126 | ----------------------- Chunk representations ----------------------- |
1127 | */ |
1128 | |
1129 | |
1130 | /* |
1131 | This struct declaration is misleading (but accurate and necessary). |
1132 | It declares a "view" into memory allowing access to necessary |
1133 | fields at known offsets from a given base. See explanation below. |
1134 | */ |
1135 | |
1136 | struct malloc_chunk { |
1137 | |
1138 | INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */ |
1139 | INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */ |
1140 | |
1141 | struct malloc_chunk* fd; /* double links -- used only if free. */ |
1142 | struct malloc_chunk* bk; |
1143 | |
1144 | /* Only used for large blocks: pointer to next larger size. */ |
1145 | struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */ |
1146 | struct malloc_chunk* bk_nextsize; |
1147 | }; |
1148 | |
1149 | |
1150 | /* |
1151 | malloc_chunk details: |
1152 | |
1153 | (The following includes lightly edited explanations by Colin Plumb.) |
1154 | |
1155 | Chunks of memory are maintained using a `boundary tag' method as |
1156 | described in e.g., Knuth or Standish. (See the paper by Paul |
1157 | Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a |
1158 | survey of such techniques.) Sizes of free chunks are stored both |
1159 | in the front of each chunk and at the end. This makes |
1160 | consolidating fragmented chunks into bigger chunks very fast. The |
1161 | size fields also hold bits representing whether chunks are free or |
1162 | in use. |
1163 | |
1164 | An allocated chunk looks like this: |
1165 | |
1166 | |
1167 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1168 | | Size of previous chunk, if unallocated (P clear) | |
1169 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1170 | | Size of chunk, in bytes |A|M|P| |
1171 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1172 | | User data starts here... . |
1173 | . . |
1174 | . (malloc_usable_size() bytes) . |
1175 | . | |
1176 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1177 | | (size of chunk, but used for application data) | |
1178 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1179 | | Size of next chunk, in bytes |A|0|1| |
1180 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1181 | |
1182 | Where "chunk" is the front of the chunk for the purpose of most of |
1183 | the malloc code, but "mem" is the pointer that is returned to the |
1184 | user. "Nextchunk" is the beginning of the next contiguous chunk. |
1185 | |
1186 | Chunks always begin on even word boundaries, so the mem portion |
1187 | (which is returned to the user) is also on an even word boundary, and |
1188 | thus at least double-word aligned. |
1189 | |
1190 | Free chunks are stored in circular doubly-linked lists, and look like this: |
1191 | |
1192 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1193 | | Size of previous chunk, if unallocated (P clear) | |
1194 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1195 | `head:' | Size of chunk, in bytes |A|0|P| |
1196 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1197 | | Forward pointer to next chunk in list | |
1198 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1199 | | Back pointer to previous chunk in list | |
1200 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1201 | | Unused space (may be 0 bytes long) . |
1202 | . . |
1203 | . | |
1204 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1205 | `foot:' | Size of chunk, in bytes | |
1206 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1207 | | Size of next chunk, in bytes |A|0|0| |
1208 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
1209 | |
1210 | The P (PREV_INUSE) bit, stored in the unused low-order bit of the |
1211 | chunk size (which is always a multiple of two words), is an in-use |
1212 | bit for the *previous* chunk. If that bit is *clear*, then the |
1213 | word before the current chunk size contains the previous chunk |
1214 | size, and can be used to find the front of the previous chunk. |
1215 | The very first chunk allocated always has this bit set, |
1216 | preventing access to non-existent (or non-owned) memory. If |
1217 | prev_inuse is set for any given chunk, then you CANNOT determine |
1218 | the size of the previous chunk, and might even get a memory |
1219 | addressing fault when trying to do so. |
1220 | |
1221 | The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial, |
1222 | main arena, described by the main_arena variable. When additional |
1223 | threads are spawned, each thread receives its own arena (up to a |
1224 | configurable limit, after which arenas are reused for multiple |
1225 | threads), and the chunks in these arenas have the A bit set. To |
1226 | find the arena for a chunk on such a non-main arena, heap_for_ptr |
1227 | performs a bit mask operation and indirection through the ar_ptr |
1228 | member of the per-heap header heap_info (see arena.c). |
1229 | |
1230 | Note that the `foot' of the current chunk is actually represented |
1231 | as the prev_size of the NEXT chunk. This makes it easier to |
1232 | deal with alignments etc but can be very confusing when trying |
1233 | to extend or adapt this code. |
1234 | |
1235 | The three exceptions to all this are: |
1236 | |
1237 | 1. The special chunk `top' doesn't bother using the |
1238 | trailing size field since there is no next contiguous chunk |
1239 | that would have to index off it. After initialization, `top' |
1240 | is forced to always exist. If it would become less than |
1241 | MINSIZE bytes long, it is replenished. |
1242 | |
1243 | 2. Chunks allocated via mmap, which have the second-lowest-order |
1244 | bit M (IS_MMAPPED) set in their size fields. Because they are |
1245 | allocated one-by-one, each must contain its own trailing size |
1246 | field. If the M bit is set, the other bits are ignored |
1247 | (because mmapped chunks are neither in an arena, nor adjacent |
1248 | to a freed chunk). The M bit is also used for chunks which |
1249 | originally came from a dumped heap via malloc_set_state in |
1250 | hooks.c. |
1251 | |
1252 | 3. Chunks in fastbins are treated as allocated chunks from the |
1253 | point of view of the chunk allocator. They are consolidated |
1254 | with their neighbors only in bulk, in malloc_consolidate. |
1255 | */ |
1256 | |
1257 | /* |
1258 | ---------- Size and alignment checks and conversions ---------- |
1259 | */ |
1260 | |
1261 | /* Conversion from malloc headers to user pointers, and back. When |
1262 | using memory tagging the user data and the malloc data structure |
1263 | headers have distinct tags. Converting fully from one to the other |
1264 | involves extracting the tag at the other address and creating a |
1265 | suitable pointer using it. That can be quite expensive. There are |
1266 | cases when the pointers are not dereferenced (for example only used |
1267 | for alignment check) so the tags are not relevant, and there are |
1268 | cases when user data is not tagged distinctly from malloc headers |
1269 | (user data is untagged because tagging is done late in malloc and |
1270 | early in free). User memory tagging across internal interfaces: |
1271 | |
1272 | sysmalloc: Returns untagged memory. |
1273 | _int_malloc: Returns untagged memory. |
1274 | _int_free: Takes untagged memory. |
1275 | _int_memalign: Returns untagged memory. |
1276 | _int_memalign: Returns untagged memory. |
1277 | _mid_memalign: Returns tagged memory. |
1278 | _int_realloc: Takes and returns tagged memory. |
1279 | */ |
1280 | |
1281 | /* The chunk header is two SIZE_SZ elements, but this is used widely, so |
1282 | we define it here for clarity later. */ |
1283 | #define CHUNK_HDR_SZ (2 * SIZE_SZ) |
1284 | |
1285 | /* Convert a chunk address to a user mem pointer without correcting |
1286 | the tag. */ |
1287 | #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ)) |
1288 | |
1289 | /* Convert a chunk address to a user mem pointer and extract the right tag. */ |
1290 | #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ)) |
1291 | |
1292 | /* Convert a user mem pointer to a chunk address and extract the right tag. */ |
1293 | #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ))) |
1294 | |
1295 | /* The smallest possible chunk */ |
1296 | #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) |
1297 | |
1298 | /* The smallest size we can malloc is an aligned minimal chunk */ |
1299 | |
1300 | #define MINSIZE \ |
1301 | (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)) |
1302 | |
1303 | /* Check if m has acceptable alignment */ |
1304 | |
1305 | #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0) |
1306 | |
1307 | #define misaligned_chunk(p) \ |
1308 | ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \ |
1309 | & MALLOC_ALIGN_MASK) |
1310 | |
1311 | /* pad request bytes into a usable size -- internal version */ |
1312 | /* Note: This must be a macro that evaluates to a compile time constant |
1313 | if passed a literal constant. */ |
1314 | #define request2size(req) \ |
1315 | (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \ |
1316 | MINSIZE : \ |
1317 | ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK) |
1318 | |
1319 | /* Check if REQ overflows when padded and aligned and if the resulting |
1320 | value is less than PTRDIFF_T. Returns the requested size or |
1321 | MINSIZE in case the value is less than MINSIZE, or 0 if any of the |
1322 | previous checks fail. */ |
1323 | static inline size_t |
1324 | checked_request2size (size_t req) __nonnull (1) |
1325 | { |
1326 | if (__glibc_unlikely (req > PTRDIFF_MAX)) |
1327 | return 0; |
1328 | |
1329 | /* When using tagged memory, we cannot share the end of the user |
1330 | block with the header for the next chunk, so ensure that we |
1331 | allocate blocks that are rounded up to the granule size. Take |
1332 | care not to overflow from close to MAX_SIZE_T to a small |
1333 | number. Ideally, this would be part of request2size(), but that |
1334 | must be a macro that produces a compile time constant if passed |
1335 | a constant literal. */ |
1336 | if (__glibc_unlikely (mtag_enabled)) |
1337 | { |
1338 | /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */ |
1339 | asm ("" ); |
1340 | |
1341 | req = (req + (__MTAG_GRANULE_SIZE - 1)) & |
1342 | ~(size_t)(__MTAG_GRANULE_SIZE - 1); |
1343 | } |
1344 | |
1345 | return request2size (req); |
1346 | } |
1347 | |
1348 | /* |
1349 | --------------- Physical chunk operations --------------- |
1350 | */ |
1351 | |
1352 | |
1353 | /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */ |
1354 | #define PREV_INUSE 0x1 |
1355 | |
1356 | /* extract inuse bit of previous chunk */ |
1357 | #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE) |
1358 | |
1359 | |
1360 | /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */ |
1361 | #define IS_MMAPPED 0x2 |
1362 | |
1363 | /* check for mmap()'ed chunk */ |
1364 | #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED) |
1365 | |
1366 | |
1367 | /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained |
1368 | from a non-main arena. This is only set immediately before handing |
1369 | the chunk to the user, if necessary. */ |
1370 | #define NON_MAIN_ARENA 0x4 |
1371 | |
1372 | /* Check for chunk from main arena. */ |
1373 | #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0) |
1374 | |
1375 | /* Mark a chunk as not being on the main arena. */ |
1376 | #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA) |
1377 | |
1378 | |
1379 | /* |
1380 | Bits to mask off when extracting size |
1381 | |
1382 | Note: IS_MMAPPED is intentionally not masked off from size field in |
1383 | macros for which mmapped chunks should never be seen. This should |
1384 | cause helpful core dumps to occur if it is tried by accident by |
1385 | people extending or adapting this malloc. |
1386 | */ |
1387 | #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA) |
1388 | |
1389 | /* Get size, ignoring use bits */ |
1390 | #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS)) |
1391 | |
1392 | /* Like chunksize, but do not mask SIZE_BITS. */ |
1393 | #define chunksize_nomask(p) ((p)->mchunk_size) |
1394 | |
1395 | /* Ptr to next physical malloc_chunk. */ |
1396 | #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p))) |
1397 | |
1398 | /* Size of the chunk below P. Only valid if !prev_inuse (P). */ |
1399 | #define prev_size(p) ((p)->mchunk_prev_size) |
1400 | |
1401 | /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */ |
1402 | #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz)) |
1403 | |
1404 | /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */ |
1405 | #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p))) |
1406 | |
1407 | /* Treat space at ptr + offset as a chunk */ |
1408 | #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s))) |
1409 | |
1410 | /* extract p's inuse bit */ |
1411 | #define inuse(p) \ |
1412 | ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE) |
1413 | |
1414 | /* set/clear chunk as being inuse without otherwise disturbing */ |
1415 | #define set_inuse(p) \ |
1416 | ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE |
1417 | |
1418 | #define clear_inuse(p) \ |
1419 | ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE) |
1420 | |
1421 | |
1422 | /* check/set/clear inuse bits in known places */ |
1423 | #define inuse_bit_at_offset(p, s) \ |
1424 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE) |
1425 | |
1426 | #define set_inuse_bit_at_offset(p, s) \ |
1427 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE) |
1428 | |
1429 | #define clear_inuse_bit_at_offset(p, s) \ |
1430 | (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE)) |
1431 | |
1432 | |
1433 | /* Set size at head, without disturbing its use bit */ |
1434 | #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s))) |
1435 | |
1436 | /* Set size/use field */ |
1437 | #define set_head(p, s) ((p)->mchunk_size = (s)) |
1438 | |
1439 | /* Set size at footer (only when chunk is not in use) */ |
1440 | #define (p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s)) |
1441 | |
1442 | #pragma GCC poison mchunk_size |
1443 | #pragma GCC poison mchunk_prev_size |
1444 | |
1445 | /* This is the size of the real usable data in the chunk. Not valid for |
1446 | dumped heap chunks. */ |
1447 | #define memsize(p) \ |
1448 | (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \ |
1449 | chunksize (p) - CHUNK_HDR_SZ : \ |
1450 | chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) |
1451 | |
1452 | /* If memory tagging is enabled the layout changes to accommodate the granule |
1453 | size, this is wasteful for small allocations so not done by default. |
1454 | Both the chunk header and user data has to be granule aligned. */ |
1455 | _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ, |
1456 | "memory tagging is not supported with large granule." ); |
1457 | |
1458 | static __always_inline void * |
1459 | tag_new_usable (void *ptr) |
1460 | { |
1461 | if (__glibc_unlikely (mtag_enabled) && ptr) |
1462 | { |
1463 | mchunkptr cp = mem2chunk(ptr); |
1464 | ptr = __libc_mtag_tag_region (p: __libc_mtag_new_tag (p: ptr), memsize (cp)); |
1465 | } |
1466 | return ptr; |
1467 | } |
1468 | |
1469 | /* |
1470 | -------------------- Internal data structures -------------------- |
1471 | |
1472 | All internal state is held in an instance of malloc_state defined |
1473 | below. There are no other static variables, except in two optional |
1474 | cases: |
1475 | * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above. |
1476 | * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor |
1477 | for mmap. |
1478 | |
1479 | Beware of lots of tricks that minimize the total bookkeeping space |
1480 | requirements. The result is a little over 1K bytes (for 4byte |
1481 | pointers and size_t.) |
1482 | */ |
1483 | |
1484 | /* |
1485 | Bins |
1486 | |
1487 | An array of bin headers for free chunks. Each bin is doubly |
1488 | linked. The bins are approximately proportionally (log) spaced. |
1489 | There are a lot of these bins (128). This may look excessive, but |
1490 | works very well in practice. Most bins hold sizes that are |
1491 | unusual as malloc request sizes, but are more usual for fragments |
1492 | and consolidated sets of chunks, which is what these bins hold, so |
1493 | they can be found quickly. All procedures maintain the invariant |
1494 | that no consolidated chunk physically borders another one, so each |
1495 | chunk in a list is known to be preceded and followed by either |
1496 | inuse chunks or the ends of memory. |
1497 | |
1498 | Chunks in bins are kept in size order, with ties going to the |
1499 | approximately least recently used chunk. Ordering isn't needed |
1500 | for the small bins, which all contain the same-sized chunks, but |
1501 | facilitates best-fit allocation for larger chunks. These lists |
1502 | are just sequential. Keeping them in order almost never requires |
1503 | enough traversal to warrant using fancier ordered data |
1504 | structures. |
1505 | |
1506 | Chunks of the same size are linked with the most |
1507 | recently freed at the front, and allocations are taken from the |
1508 | back. This results in LRU (FIFO) allocation order, which tends |
1509 | to give each chunk an equal opportunity to be consolidated with |
1510 | adjacent freed chunks, resulting in larger free chunks and less |
1511 | fragmentation. |
1512 | |
1513 | To simplify use in double-linked lists, each bin header acts |
1514 | as a malloc_chunk. This avoids special-casing for headers. |
1515 | But to conserve space and improve locality, we allocate |
1516 | only the fd/bk pointers of bins, and then use repositioning tricks |
1517 | to treat these as the fields of a malloc_chunk*. |
1518 | */ |
1519 | |
1520 | typedef struct malloc_chunk *mbinptr; |
1521 | |
1522 | /* addressing -- note that bin_at(0) does not exist */ |
1523 | #define bin_at(m, i) \ |
1524 | (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \ |
1525 | - offsetof (struct malloc_chunk, fd)) |
1526 | |
1527 | /* analog of ++bin */ |
1528 | #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1))) |
1529 | |
1530 | /* Reminders about list directionality within bins */ |
1531 | #define first(b) ((b)->fd) |
1532 | #define last(b) ((b)->bk) |
1533 | |
1534 | /* |
1535 | Indexing |
1536 | |
1537 | Bins for sizes < 512 bytes contain chunks of all the same size, spaced |
1538 | 8 bytes apart. Larger bins are approximately logarithmically spaced: |
1539 | |
1540 | 64 bins of size 8 |
1541 | 32 bins of size 64 |
1542 | 16 bins of size 512 |
1543 | 8 bins of size 4096 |
1544 | 4 bins of size 32768 |
1545 | 2 bins of size 262144 |
1546 | 1 bin of size what's left |
1547 | |
1548 | There is actually a little bit of slop in the numbers in bin_index |
1549 | for the sake of speed. This makes no difference elsewhere. |
1550 | |
1551 | The bins top out around 1MB because we expect to service large |
1552 | requests via mmap. |
1553 | |
1554 | Bin 0 does not exist. Bin 1 is the unordered list; if that would be |
1555 | a valid chunk size the small bins are bumped up one. |
1556 | */ |
1557 | |
1558 | #define NBINS 128 |
1559 | #define NSMALLBINS 64 |
1560 | #define SMALLBIN_WIDTH MALLOC_ALIGNMENT |
1561 | #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ) |
1562 | #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH) |
1563 | |
1564 | #define in_smallbin_range(sz) \ |
1565 | ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE) |
1566 | |
1567 | #define smallbin_index(sz) \ |
1568 | ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\ |
1569 | + SMALLBIN_CORRECTION) |
1570 | |
1571 | #define largebin_index_32(sz) \ |
1572 | (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\ |
1573 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ |
1574 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ |
1575 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ |
1576 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ |
1577 | 126) |
1578 | |
1579 | #define largebin_index_32_big(sz) \ |
1580 | (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\ |
1581 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ |
1582 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ |
1583 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ |
1584 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ |
1585 | 126) |
1586 | |
1587 | // XXX It remains to be seen whether it is good to keep the widths of |
1588 | // XXX the buckets the same or whether it should be scaled by a factor |
1589 | // XXX of two as well. |
1590 | #define largebin_index_64(sz) \ |
1591 | (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\ |
1592 | ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\ |
1593 | ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\ |
1594 | ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\ |
1595 | ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\ |
1596 | 126) |
1597 | |
1598 | #define largebin_index(sz) \ |
1599 | (SIZE_SZ == 8 ? largebin_index_64 (sz) \ |
1600 | : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \ |
1601 | : largebin_index_32 (sz)) |
1602 | |
1603 | #define bin_index(sz) \ |
1604 | ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz)) |
1605 | |
1606 | /* Take a chunk off a bin list. */ |
1607 | static void |
1608 | unlink_chunk (mstate av, mchunkptr p) |
1609 | { |
1610 | if (chunksize (p) != prev_size (next_chunk (p))) |
1611 | malloc_printerr (str: "corrupted size vs. prev_size" ); |
1612 | |
1613 | mchunkptr fd = p->fd; |
1614 | mchunkptr bk = p->bk; |
1615 | |
1616 | if (__builtin_expect (fd->bk != p || bk->fd != p, 0)) |
1617 | malloc_printerr (str: "corrupted double-linked list" ); |
1618 | |
1619 | fd->bk = bk; |
1620 | bk->fd = fd; |
1621 | if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL) |
1622 | { |
1623 | if (p->fd_nextsize->bk_nextsize != p |
1624 | || p->bk_nextsize->fd_nextsize != p) |
1625 | malloc_printerr (str: "corrupted double-linked list (not small)" ); |
1626 | |
1627 | if (fd->fd_nextsize == NULL) |
1628 | { |
1629 | if (p->fd_nextsize == p) |
1630 | fd->fd_nextsize = fd->bk_nextsize = fd; |
1631 | else |
1632 | { |
1633 | fd->fd_nextsize = p->fd_nextsize; |
1634 | fd->bk_nextsize = p->bk_nextsize; |
1635 | p->fd_nextsize->bk_nextsize = fd; |
1636 | p->bk_nextsize->fd_nextsize = fd; |
1637 | } |
1638 | } |
1639 | else |
1640 | { |
1641 | p->fd_nextsize->bk_nextsize = p->bk_nextsize; |
1642 | p->bk_nextsize->fd_nextsize = p->fd_nextsize; |
1643 | } |
1644 | } |
1645 | } |
1646 | |
1647 | /* |
1648 | Unsorted chunks |
1649 | |
1650 | All remainders from chunk splits, as well as all returned chunks, |
1651 | are first placed in the "unsorted" bin. They are then placed |
1652 | in regular bins after malloc gives them ONE chance to be used before |
1653 | binning. So, basically, the unsorted_chunks list acts as a queue, |
1654 | with chunks being placed on it in free (and malloc_consolidate), |
1655 | and taken off (to be either used or placed in bins) in malloc. |
1656 | |
1657 | The NON_MAIN_ARENA flag is never set for unsorted chunks, so it |
1658 | does not have to be taken into account in size comparisons. |
1659 | */ |
1660 | |
1661 | /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */ |
1662 | #define unsorted_chunks(M) (bin_at (M, 1)) |
1663 | |
1664 | /* |
1665 | Top |
1666 | |
1667 | The top-most available chunk (i.e., the one bordering the end of |
1668 | available memory) is treated specially. It is never included in |
1669 | any bin, is used only if no other chunk is available, and is |
1670 | released back to the system if it is very large (see |
1671 | M_TRIM_THRESHOLD). Because top initially |
1672 | points to its own bin with initial zero size, thus forcing |
1673 | extension on the first malloc request, we avoid having any special |
1674 | code in malloc to check whether it even exists yet. But we still |
1675 | need to do so when getting memory from system, so we make |
1676 | initial_top treat the bin as a legal but unusable chunk during the |
1677 | interval between initialization and the first call to |
1678 | sysmalloc. (This is somewhat delicate, since it relies on |
1679 | the 2 preceding words to be zero during this interval as well.) |
1680 | */ |
1681 | |
1682 | /* Conveniently, the unsorted bin can be used as dummy top on first call */ |
1683 | #define initial_top(M) (unsorted_chunks (M)) |
1684 | |
1685 | /* |
1686 | Binmap |
1687 | |
1688 | To help compensate for the large number of bins, a one-level index |
1689 | structure is used for bin-by-bin searching. `binmap' is a |
1690 | bitvector recording whether bins are definitely empty so they can |
1691 | be skipped over during during traversals. The bits are NOT always |
1692 | cleared as soon as bins are empty, but instead only |
1693 | when they are noticed to be empty during traversal in malloc. |
1694 | */ |
1695 | |
1696 | /* Conservatively use 32 bits per map word, even if on 64bit system */ |
1697 | #define BINMAPSHIFT 5 |
1698 | #define BITSPERMAP (1U << BINMAPSHIFT) |
1699 | #define BINMAPSIZE (NBINS / BITSPERMAP) |
1700 | |
1701 | #define idx2block(i) ((i) >> BINMAPSHIFT) |
1702 | #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1)))) |
1703 | |
1704 | #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i)) |
1705 | #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i))) |
1706 | #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i)) |
1707 | |
1708 | /* |
1709 | Fastbins |
1710 | |
1711 | An array of lists holding recently freed small chunks. Fastbins |
1712 | are not doubly linked. It is faster to single-link them, and |
1713 | since chunks are never removed from the middles of these lists, |
1714 | double linking is not necessary. Also, unlike regular bins, they |
1715 | are not even processed in FIFO order (they use faster LIFO) since |
1716 | ordering doesn't much matter in the transient contexts in which |
1717 | fastbins are normally used. |
1718 | |
1719 | Chunks in fastbins keep their inuse bit set, so they cannot |
1720 | be consolidated with other free chunks. malloc_consolidate |
1721 | releases all chunks in fastbins and consolidates them with |
1722 | other free chunks. |
1723 | */ |
1724 | |
1725 | typedef struct malloc_chunk *mfastbinptr; |
1726 | #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx]) |
1727 | |
1728 | /* offset 2 to use otherwise unindexable first 2 bins */ |
1729 | #define fastbin_index(sz) \ |
1730 | ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2) |
1731 | |
1732 | |
1733 | /* The maximum fastbin request size we support */ |
1734 | #define MAX_FAST_SIZE (80 * SIZE_SZ / 4) |
1735 | |
1736 | #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1) |
1737 | |
1738 | /* |
1739 | FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free() |
1740 | that triggers automatic consolidation of possibly-surrounding |
1741 | fastbin chunks. This is a heuristic, so the exact value should not |
1742 | matter too much. It is defined at half the default trim threshold as a |
1743 | compromise heuristic to only attempt consolidation if it is likely |
1744 | to lead to trimming. However, it is not dynamically tunable, since |
1745 | consolidation reduces fragmentation surrounding large chunks even |
1746 | if trimming is not used. |
1747 | */ |
1748 | |
1749 | #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL) |
1750 | |
1751 | /* |
1752 | NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous |
1753 | regions. Otherwise, contiguity is exploited in merging together, |
1754 | when possible, results from consecutive MORECORE calls. |
1755 | |
1756 | The initial value comes from MORECORE_CONTIGUOUS, but is |
1757 | changed dynamically if mmap is ever used as an sbrk substitute. |
1758 | */ |
1759 | |
1760 | #define NONCONTIGUOUS_BIT (2U) |
1761 | |
1762 | #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0) |
1763 | #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0) |
1764 | #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT) |
1765 | #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT) |
1766 | |
1767 | /* Maximum size of memory handled in fastbins. */ |
1768 | static uint8_t global_max_fast; |
1769 | |
1770 | /* |
1771 | Set value of max_fast. |
1772 | Use impossibly small value if 0. |
1773 | Precondition: there are no existing fastbin chunks in the main arena. |
1774 | Since do_check_malloc_state () checks this, we call malloc_consolidate () |
1775 | before changing max_fast. Note other arenas will leak their fast bin |
1776 | entries if max_fast is reduced. |
1777 | */ |
1778 | |
1779 | #define set_max_fast(s) \ |
1780 | global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \ |
1781 | ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK)) |
1782 | |
1783 | static inline INTERNAL_SIZE_T |
1784 | get_max_fast (void) |
1785 | { |
1786 | /* Tell the GCC optimizers that global_max_fast is never larger |
1787 | than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in |
1788 | _int_malloc after constant propagation of the size parameter. |
1789 | (The code never executes because malloc preserves the |
1790 | global_max_fast invariant, but the optimizers may not recognize |
1791 | this.) */ |
1792 | if (global_max_fast > MAX_FAST_SIZE) |
1793 | __builtin_unreachable (); |
1794 | return global_max_fast; |
1795 | } |
1796 | |
1797 | /* |
1798 | ----------- Internal state representation and initialization ----------- |
1799 | */ |
1800 | |
1801 | /* |
1802 | have_fastchunks indicates that there are probably some fastbin chunks. |
1803 | It is set true on entering a chunk into any fastbin, and cleared early in |
1804 | malloc_consolidate. The value is approximate since it may be set when there |
1805 | are no fastbin chunks, or it may be clear even if there are fastbin chunks |
1806 | available. Given it's sole purpose is to reduce number of redundant calls to |
1807 | malloc_consolidate, it does not affect correctness. As a result we can safely |
1808 | use relaxed atomic accesses. |
1809 | */ |
1810 | |
1811 | |
1812 | struct malloc_state |
1813 | { |
1814 | /* Serialize access. */ |
1815 | __libc_lock_define (, mutex); |
1816 | |
1817 | /* Flags (formerly in max_fast). */ |
1818 | int flags; |
1819 | |
1820 | /* Set if the fastbin chunks contain recently inserted free blocks. */ |
1821 | /* Note this is a bool but not all targets support atomics on booleans. */ |
1822 | int have_fastchunks; |
1823 | |
1824 | /* Fastbins */ |
1825 | mfastbinptr fastbinsY[NFASTBINS]; |
1826 | |
1827 | /* Base of the topmost chunk -- not otherwise kept in a bin */ |
1828 | mchunkptr top; |
1829 | |
1830 | /* The remainder from the most recent split of a small request */ |
1831 | mchunkptr last_remainder; |
1832 | |
1833 | /* Normal bins packed as described above */ |
1834 | mchunkptr bins[NBINS * 2 - 2]; |
1835 | |
1836 | /* Bitmap of bins */ |
1837 | unsigned int binmap[BINMAPSIZE]; |
1838 | |
1839 | /* Linked list */ |
1840 | struct malloc_state *next; |
1841 | |
1842 | /* Linked list for free arenas. Access to this field is serialized |
1843 | by free_list_lock in arena.c. */ |
1844 | struct malloc_state *next_free; |
1845 | |
1846 | /* Number of threads attached to this arena. 0 if the arena is on |
1847 | the free list. Access to this field is serialized by |
1848 | free_list_lock in arena.c. */ |
1849 | INTERNAL_SIZE_T attached_threads; |
1850 | |
1851 | /* Memory allocated from the system in this arena. */ |
1852 | INTERNAL_SIZE_T system_mem; |
1853 | INTERNAL_SIZE_T max_system_mem; |
1854 | }; |
1855 | |
1856 | struct malloc_par |
1857 | { |
1858 | /* Tunable parameters */ |
1859 | unsigned long trim_threshold; |
1860 | INTERNAL_SIZE_T top_pad; |
1861 | INTERNAL_SIZE_T mmap_threshold; |
1862 | INTERNAL_SIZE_T arena_test; |
1863 | INTERNAL_SIZE_T arena_max; |
1864 | |
1865 | /* Transparent Large Page support. */ |
1866 | INTERNAL_SIZE_T thp_pagesize; |
1867 | /* A value different than 0 means to align mmap allocation to hp_pagesize |
1868 | add hp_flags on flags. */ |
1869 | INTERNAL_SIZE_T hp_pagesize; |
1870 | int hp_flags; |
1871 | |
1872 | /* Memory map support */ |
1873 | int n_mmaps; |
1874 | int n_mmaps_max; |
1875 | int max_n_mmaps; |
1876 | /* the mmap_threshold is dynamic, until the user sets |
1877 | it manually, at which point we need to disable any |
1878 | dynamic behavior. */ |
1879 | int no_dyn_threshold; |
1880 | |
1881 | /* Statistics */ |
1882 | INTERNAL_SIZE_T mmapped_mem; |
1883 | INTERNAL_SIZE_T max_mmapped_mem; |
1884 | |
1885 | /* First address handed out by MORECORE/sbrk. */ |
1886 | char *sbrk_base; |
1887 | |
1888 | #if USE_TCACHE |
1889 | /* Maximum number of buckets to use. */ |
1890 | size_t tcache_bins; |
1891 | size_t tcache_max_bytes; |
1892 | /* Maximum number of chunks in each bucket. */ |
1893 | size_t tcache_count; |
1894 | /* Maximum number of chunks to remove from the unsorted list, which |
1895 | aren't used to prefill the cache. */ |
1896 | size_t tcache_unsorted_limit; |
1897 | #endif |
1898 | }; |
1899 | |
1900 | /* There are several instances of this struct ("arenas") in this |
1901 | malloc. If you are adapting this malloc in a way that does NOT use |
1902 | a static or mmapped malloc_state, you MUST explicitly zero-fill it |
1903 | before using. This malloc relies on the property that malloc_state |
1904 | is initialized to all zeroes (as is true of C statics). */ |
1905 | |
1906 | static struct malloc_state main_arena = |
1907 | { |
1908 | .mutex = _LIBC_LOCK_INITIALIZER, |
1909 | .next = &main_arena, |
1910 | .attached_threads = 1 |
1911 | }; |
1912 | |
1913 | /* There is only one instance of the malloc parameters. */ |
1914 | |
1915 | static struct malloc_par mp_ = |
1916 | { |
1917 | .top_pad = DEFAULT_TOP_PAD, |
1918 | .n_mmaps_max = DEFAULT_MMAP_MAX, |
1919 | .mmap_threshold = DEFAULT_MMAP_THRESHOLD, |
1920 | .trim_threshold = DEFAULT_TRIM_THRESHOLD, |
1921 | #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8)) |
1922 | .arena_test = NARENAS_FROM_NCORES (1) |
1923 | #if USE_TCACHE |
1924 | , |
1925 | .tcache_count = TCACHE_FILL_COUNT, |
1926 | .tcache_bins = TCACHE_MAX_BINS, |
1927 | .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1), |
1928 | .tcache_unsorted_limit = 0 /* No limit. */ |
1929 | #endif |
1930 | }; |
1931 | |
1932 | /* |
1933 | Initialize a malloc_state struct. |
1934 | |
1935 | This is called from ptmalloc_init () or from _int_new_arena () |
1936 | when creating a new arena. |
1937 | */ |
1938 | |
1939 | static void |
1940 | malloc_init_state (mstate av) |
1941 | { |
1942 | int i; |
1943 | mbinptr bin; |
1944 | |
1945 | /* Establish circular links for normal bins */ |
1946 | for (i = 1; i < NBINS; ++i) |
1947 | { |
1948 | bin = bin_at (av, i); |
1949 | bin->fd = bin->bk = bin; |
1950 | } |
1951 | |
1952 | #if MORECORE_CONTIGUOUS |
1953 | if (av != &main_arena) |
1954 | #endif |
1955 | set_noncontiguous (av); |
1956 | if (av == &main_arena) |
1957 | set_max_fast (DEFAULT_MXFAST); |
1958 | atomic_store_relaxed (&av->have_fastchunks, false); |
1959 | |
1960 | av->top = initial_top (av); |
1961 | } |
1962 | |
1963 | /* |
1964 | Other internal utilities operating on mstates |
1965 | */ |
1966 | |
1967 | static void *sysmalloc (INTERNAL_SIZE_T, mstate); |
1968 | static int systrim (size_t, mstate); |
1969 | static void malloc_consolidate (mstate); |
1970 | |
1971 | |
1972 | /* -------------- Early definitions for debugging hooks ---------------- */ |
1973 | |
1974 | /* This function is called from the arena shutdown hook, to free the |
1975 | thread cache (if it exists). */ |
1976 | static void tcache_thread_shutdown (void); |
1977 | |
1978 | /* ------------------ Testing support ----------------------------------*/ |
1979 | |
1980 | static int perturb_byte; |
1981 | |
1982 | static void |
1983 | alloc_perturb (char *p, size_t n) |
1984 | { |
1985 | if (__glibc_unlikely (perturb_byte)) |
1986 | memset (s: p, c: perturb_byte ^ 0xff, n: n); |
1987 | } |
1988 | |
1989 | static void |
1990 | free_perturb (char *p, size_t n) |
1991 | { |
1992 | if (__glibc_unlikely (perturb_byte)) |
1993 | memset (s: p, c: perturb_byte, n: n); |
1994 | } |
1995 | |
1996 | |
1997 | |
1998 | #include <stap-probe.h> |
1999 | |
2000 | /* ----------- Routines dealing with transparent huge pages ----------- */ |
2001 | |
2002 | static inline void |
2003 | madvise_thp (void *p, INTERNAL_SIZE_T size) |
2004 | { |
2005 | #ifdef MADV_HUGEPAGE |
2006 | /* Do not consider areas smaller than a huge page or if the tunable is |
2007 | not active. */ |
2008 | if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize) |
2009 | return; |
2010 | |
2011 | /* Linux requires the input address to be page-aligned, and unaligned |
2012 | inputs happens only for initial data segment. */ |
2013 | if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize)))) |
2014 | { |
2015 | void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize)); |
2016 | size += PTR_DIFF (p, q); |
2017 | p = q; |
2018 | } |
2019 | |
2020 | __madvise (addr: p, len: size, MADV_HUGEPAGE); |
2021 | #endif |
2022 | } |
2023 | |
2024 | /* ------------------- Support for multiple arenas -------------------- */ |
2025 | #include "arena.c" |
2026 | |
2027 | /* |
2028 | Debugging support |
2029 | |
2030 | These routines make a number of assertions about the states |
2031 | of data structures that should be true at all times. If any |
2032 | are not true, it's very likely that a user program has somehow |
2033 | trashed memory. (It's also possible that there is a coding error |
2034 | in malloc. In which case, please report it!) |
2035 | */ |
2036 | |
2037 | #if !MALLOC_DEBUG |
2038 | |
2039 | # define check_chunk(A, P) |
2040 | # define check_free_chunk(A, P) |
2041 | # define check_inuse_chunk(A, P) |
2042 | # define check_remalloced_chunk(A, P, N) |
2043 | # define check_malloced_chunk(A, P, N) |
2044 | # define check_malloc_state(A) |
2045 | |
2046 | #else |
2047 | |
2048 | # define check_chunk(A, P) do_check_chunk (A, P) |
2049 | # define check_free_chunk(A, P) do_check_free_chunk (A, P) |
2050 | # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P) |
2051 | # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N) |
2052 | # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N) |
2053 | # define check_malloc_state(A) do_check_malloc_state (A) |
2054 | |
2055 | /* |
2056 | Properties of all chunks |
2057 | */ |
2058 | |
2059 | static void |
2060 | do_check_chunk (mstate av, mchunkptr p) |
2061 | { |
2062 | unsigned long sz = chunksize (p); |
2063 | /* min and max possible addresses assuming contiguous allocation */ |
2064 | char *max_address = (char *) (av->top) + chunksize (av->top); |
2065 | char *min_address = max_address - av->system_mem; |
2066 | |
2067 | if (!chunk_is_mmapped (p)) |
2068 | { |
2069 | /* Has legal address ... */ |
2070 | if (p != av->top) |
2071 | { |
2072 | if (contiguous (av)) |
2073 | { |
2074 | assert (((char *) p) >= min_address); |
2075 | assert (((char *) p + sz) <= ((char *) (av->top))); |
2076 | } |
2077 | } |
2078 | else |
2079 | { |
2080 | /* top size is always at least MINSIZE */ |
2081 | assert ((unsigned long) (sz) >= MINSIZE); |
2082 | /* top predecessor always marked inuse */ |
2083 | assert (prev_inuse (p)); |
2084 | } |
2085 | } |
2086 | else |
2087 | { |
2088 | /* address is outside main heap */ |
2089 | if (contiguous (av) && av->top != initial_top (av)) |
2090 | { |
2091 | assert (((char *) p) < min_address || ((char *) p) >= max_address); |
2092 | } |
2093 | /* chunk is page-aligned */ |
2094 | assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0); |
2095 | /* mem is aligned */ |
2096 | assert (aligned_OK (chunk2mem (p))); |
2097 | } |
2098 | } |
2099 | |
2100 | /* |
2101 | Properties of free chunks |
2102 | */ |
2103 | |
2104 | static void |
2105 | do_check_free_chunk (mstate av, mchunkptr p) |
2106 | { |
2107 | INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA); |
2108 | mchunkptr next = chunk_at_offset (p, sz); |
2109 | |
2110 | do_check_chunk (av, p); |
2111 | |
2112 | /* Chunk must claim to be free ... */ |
2113 | assert (!inuse (p)); |
2114 | assert (!chunk_is_mmapped (p)); |
2115 | |
2116 | /* Unless a special marker, must have OK fields */ |
2117 | if ((unsigned long) (sz) >= MINSIZE) |
2118 | { |
2119 | assert ((sz & MALLOC_ALIGN_MASK) == 0); |
2120 | assert (aligned_OK (chunk2mem (p))); |
2121 | /* ... matching footer field */ |
2122 | assert (prev_size (next_chunk (p)) == sz); |
2123 | /* ... and is fully consolidated */ |
2124 | assert (prev_inuse (p)); |
2125 | assert (next == av->top || inuse (next)); |
2126 | |
2127 | /* ... and has minimally sane links */ |
2128 | assert (p->fd->bk == p); |
2129 | assert (p->bk->fd == p); |
2130 | } |
2131 | else /* markers are always of size SIZE_SZ */ |
2132 | assert (sz == SIZE_SZ); |
2133 | } |
2134 | |
2135 | /* |
2136 | Properties of inuse chunks |
2137 | */ |
2138 | |
2139 | static void |
2140 | do_check_inuse_chunk (mstate av, mchunkptr p) |
2141 | { |
2142 | mchunkptr next; |
2143 | |
2144 | do_check_chunk (av, p); |
2145 | |
2146 | if (chunk_is_mmapped (p)) |
2147 | return; /* mmapped chunks have no next/prev */ |
2148 | |
2149 | /* Check whether it claims to be in use ... */ |
2150 | assert (inuse (p)); |
2151 | |
2152 | next = next_chunk (p); |
2153 | |
2154 | /* ... and is surrounded by OK chunks. |
2155 | Since more things can be checked with free chunks than inuse ones, |
2156 | if an inuse chunk borders them and debug is on, it's worth doing them. |
2157 | */ |
2158 | if (!prev_inuse (p)) |
2159 | { |
2160 | /* Note that we cannot even look at prev unless it is not inuse */ |
2161 | mchunkptr prv = prev_chunk (p); |
2162 | assert (next_chunk (prv) == p); |
2163 | do_check_free_chunk (av, prv); |
2164 | } |
2165 | |
2166 | if (next == av->top) |
2167 | { |
2168 | assert (prev_inuse (next)); |
2169 | assert (chunksize (next) >= MINSIZE); |
2170 | } |
2171 | else if (!inuse (next)) |
2172 | do_check_free_chunk (av, next); |
2173 | } |
2174 | |
2175 | /* |
2176 | Properties of chunks recycled from fastbins |
2177 | */ |
2178 | |
2179 | static void |
2180 | do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
2181 | { |
2182 | INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA); |
2183 | |
2184 | if (!chunk_is_mmapped (p)) |
2185 | { |
2186 | assert (av == arena_for_chunk (p)); |
2187 | if (chunk_main_arena (p)) |
2188 | assert (av == &main_arena); |
2189 | else |
2190 | assert (av != &main_arena); |
2191 | } |
2192 | |
2193 | do_check_inuse_chunk (av, p); |
2194 | |
2195 | /* Legal size ... */ |
2196 | assert ((sz & MALLOC_ALIGN_MASK) == 0); |
2197 | assert ((unsigned long) (sz) >= MINSIZE); |
2198 | /* ... and alignment */ |
2199 | assert (aligned_OK (chunk2mem (p))); |
2200 | /* chunk is less than MINSIZE more than request */ |
2201 | assert ((long) (sz) - (long) (s) >= 0); |
2202 | assert ((long) (sz) - (long) (s + MINSIZE) < 0); |
2203 | } |
2204 | |
2205 | /* |
2206 | Properties of nonrecycled chunks at the point they are malloced |
2207 | */ |
2208 | |
2209 | static void |
2210 | do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s) |
2211 | { |
2212 | /* same as recycled case ... */ |
2213 | do_check_remalloced_chunk (av, p, s); |
2214 | |
2215 | /* |
2216 | ... plus, must obey implementation invariant that prev_inuse is |
2217 | always true of any allocated chunk; i.e., that each allocated |
2218 | chunk borders either a previously allocated and still in-use |
2219 | chunk, or the base of its memory arena. This is ensured |
2220 | by making all allocations from the `lowest' part of any found |
2221 | chunk. This does not necessarily hold however for chunks |
2222 | recycled via fastbins. |
2223 | */ |
2224 | |
2225 | assert (prev_inuse (p)); |
2226 | } |
2227 | |
2228 | |
2229 | /* |
2230 | Properties of malloc_state. |
2231 | |
2232 | This may be useful for debugging malloc, as well as detecting user |
2233 | programmer errors that somehow write into malloc_state. |
2234 | |
2235 | If you are extending or experimenting with this malloc, you can |
2236 | probably figure out how to hack this routine to print out or |
2237 | display chunk addresses, sizes, bins, and other instrumentation. |
2238 | */ |
2239 | |
2240 | static void |
2241 | do_check_malloc_state (mstate av) |
2242 | { |
2243 | int i; |
2244 | mchunkptr p; |
2245 | mchunkptr q; |
2246 | mbinptr b; |
2247 | unsigned int idx; |
2248 | INTERNAL_SIZE_T size; |
2249 | unsigned long total = 0; |
2250 | int max_fast_bin; |
2251 | |
2252 | /* internal size_t must be no wider than pointer type */ |
2253 | assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *)); |
2254 | |
2255 | /* alignment is a power of 2 */ |
2256 | assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0); |
2257 | |
2258 | /* Check the arena is initialized. */ |
2259 | assert (av->top != 0); |
2260 | |
2261 | /* No memory has been allocated yet, so doing more tests is not possible. */ |
2262 | if (av->top == initial_top (av)) |
2263 | return; |
2264 | |
2265 | /* pagesize is a power of 2 */ |
2266 | assert (powerof2(GLRO (dl_pagesize))); |
2267 | |
2268 | /* A contiguous main_arena is consistent with sbrk_base. */ |
2269 | if (av == &main_arena && contiguous (av)) |
2270 | assert ((char *) mp_.sbrk_base + av->system_mem == |
2271 | (char *) av->top + chunksize (av->top)); |
2272 | |
2273 | /* properties of fastbins */ |
2274 | |
2275 | /* max_fast is in allowed range */ |
2276 | assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE)); |
2277 | |
2278 | max_fast_bin = fastbin_index (get_max_fast ()); |
2279 | |
2280 | for (i = 0; i < NFASTBINS; ++i) |
2281 | { |
2282 | p = fastbin (av, i); |
2283 | |
2284 | /* The following test can only be performed for the main arena. |
2285 | While mallopt calls malloc_consolidate to get rid of all fast |
2286 | bins (especially those larger than the new maximum) this does |
2287 | only happen for the main arena. Trying to do this for any |
2288 | other arena would mean those arenas have to be locked and |
2289 | malloc_consolidate be called for them. This is excessive. And |
2290 | even if this is acceptable to somebody it still cannot solve |
2291 | the problem completely since if the arena is locked a |
2292 | concurrent malloc call might create a new arena which then |
2293 | could use the newly invalid fast bins. */ |
2294 | |
2295 | /* all bins past max_fast are empty */ |
2296 | if (av == &main_arena && i > max_fast_bin) |
2297 | assert (p == 0); |
2298 | |
2299 | while (p != 0) |
2300 | { |
2301 | if (__glibc_unlikely (misaligned_chunk (p))) |
2302 | malloc_printerr ("do_check_malloc_state(): " |
2303 | "unaligned fastbin chunk detected" ); |
2304 | /* each chunk claims to be inuse */ |
2305 | do_check_inuse_chunk (av, p); |
2306 | total += chunksize (p); |
2307 | /* chunk belongs in this bin */ |
2308 | assert (fastbin_index (chunksize (p)) == i); |
2309 | p = REVEAL_PTR (p->fd); |
2310 | } |
2311 | } |
2312 | |
2313 | /* check normal bins */ |
2314 | for (i = 1; i < NBINS; ++i) |
2315 | { |
2316 | b = bin_at (av, i); |
2317 | |
2318 | /* binmap is accurate (except for bin 1 == unsorted_chunks) */ |
2319 | if (i >= 2) |
2320 | { |
2321 | unsigned int binbit = get_binmap (av, i); |
2322 | int empty = last (b) == b; |
2323 | if (!binbit) |
2324 | assert (empty); |
2325 | else if (!empty) |
2326 | assert (binbit); |
2327 | } |
2328 | |
2329 | for (p = last (b); p != b; p = p->bk) |
2330 | { |
2331 | /* each chunk claims to be free */ |
2332 | do_check_free_chunk (av, p); |
2333 | size = chunksize (p); |
2334 | total += size; |
2335 | if (i >= 2) |
2336 | { |
2337 | /* chunk belongs in bin */ |
2338 | idx = bin_index (size); |
2339 | assert (idx == i); |
2340 | /* lists are sorted */ |
2341 | assert (p->bk == b || |
2342 | (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p)); |
2343 | |
2344 | if (!in_smallbin_range (size)) |
2345 | { |
2346 | if (p->fd_nextsize != NULL) |
2347 | { |
2348 | if (p->fd_nextsize == p) |
2349 | assert (p->bk_nextsize == p); |
2350 | else |
2351 | { |
2352 | if (p->fd_nextsize == first (b)) |
2353 | assert (chunksize (p) < chunksize (p->fd_nextsize)); |
2354 | else |
2355 | assert (chunksize (p) > chunksize (p->fd_nextsize)); |
2356 | |
2357 | if (p == first (b)) |
2358 | assert (chunksize (p) > chunksize (p->bk_nextsize)); |
2359 | else |
2360 | assert (chunksize (p) < chunksize (p->bk_nextsize)); |
2361 | } |
2362 | } |
2363 | else |
2364 | assert (p->bk_nextsize == NULL); |
2365 | } |
2366 | } |
2367 | else if (!in_smallbin_range (size)) |
2368 | assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL); |
2369 | /* chunk is followed by a legal chain of inuse chunks */ |
2370 | for (q = next_chunk (p); |
2371 | (q != av->top && inuse (q) && |
2372 | (unsigned long) (chunksize (q)) >= MINSIZE); |
2373 | q = next_chunk (q)) |
2374 | do_check_inuse_chunk (av, q); |
2375 | } |
2376 | } |
2377 | |
2378 | /* top chunk is OK */ |
2379 | check_chunk (av, av->top); |
2380 | } |
2381 | #endif |
2382 | |
2383 | |
2384 | /* ----------------- Support for debugging hooks -------------------- */ |
2385 | #if IS_IN (libc) |
2386 | #include "hooks.c" |
2387 | #endif |
2388 | |
2389 | |
2390 | /* ----------- Routines dealing with system allocation -------------- */ |
2391 | |
2392 | /* |
2393 | sysmalloc handles malloc cases requiring more memory from the system. |
2394 | On entry, it is assumed that av->top does not have enough |
2395 | space to service request for nb bytes, thus requiring that av->top |
2396 | be extended or replaced. |
2397 | */ |
2398 | |
2399 | static void * |
2400 | sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int , mstate av) |
2401 | { |
2402 | long int size; |
2403 | |
2404 | /* |
2405 | Round up size to nearest page. For mmapped chunks, the overhead is one |
2406 | SIZE_SZ unit larger than for normal chunks, because there is no |
2407 | following chunk whose prev_size field could be used. |
2408 | |
2409 | See the front_misalign handling below, for glibc there is no need for |
2410 | further alignments unless we have have high alignment. |
2411 | */ |
2412 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) |
2413 | size = ALIGN_UP (nb + SIZE_SZ, pagesize); |
2414 | else |
2415 | size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize); |
2416 | |
2417 | /* Don't try if size wraps around 0. */ |
2418 | if ((unsigned long) (size) <= (unsigned long) (nb)) |
2419 | return MAP_FAILED; |
2420 | |
2421 | char *mm = (char *) MMAP (0, size, |
2422 | mtag_mmap_flags | PROT_READ | PROT_WRITE, |
2423 | extra_flags); |
2424 | if (mm == MAP_FAILED) |
2425 | return mm; |
2426 | |
2427 | #ifdef MAP_HUGETLB |
2428 | if (!(extra_flags & MAP_HUGETLB)) |
2429 | madvise_thp (p: mm, size); |
2430 | #endif |
2431 | |
2432 | __set_vma_name (start: mm, len: size, name: " glibc: malloc" ); |
2433 | |
2434 | /* |
2435 | The offset to the start of the mmapped region is stored in the prev_size |
2436 | field of the chunk. This allows us to adjust returned start address to |
2437 | meet alignment requirements here and in memalign(), and still be able to |
2438 | compute proper address argument for later munmap in free() and realloc(). |
2439 | */ |
2440 | |
2441 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ |
2442 | |
2443 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) |
2444 | { |
2445 | /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and |
2446 | MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page |
2447 | aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */ |
2448 | assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0); |
2449 | front_misalign = 0; |
2450 | } |
2451 | else |
2452 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK; |
2453 | |
2454 | mchunkptr p; /* the allocated/returned chunk */ |
2455 | |
2456 | if (front_misalign > 0) |
2457 | { |
2458 | ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign; |
2459 | p = (mchunkptr) (mm + correction); |
2460 | set_prev_size (p, correction); |
2461 | set_head (p, (size - correction) | IS_MMAPPED); |
2462 | } |
2463 | else |
2464 | { |
2465 | p = (mchunkptr) mm; |
2466 | set_prev_size (p, 0); |
2467 | set_head (p, size | IS_MMAPPED); |
2468 | } |
2469 | |
2470 | /* update statistics */ |
2471 | int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1; |
2472 | atomic_max (&mp_.max_n_mmaps, new); |
2473 | |
2474 | unsigned long sum; |
2475 | sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size; |
2476 | atomic_max (&mp_.max_mmapped_mem, sum); |
2477 | |
2478 | check_chunk (av, p); |
2479 | |
2480 | return chunk2mem (p); |
2481 | } |
2482 | |
2483 | /* |
2484 | Allocate memory using mmap() based on S and NB requested size, aligning to |
2485 | PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call |
2486 | succeeds S is updated with the allocated size. This is used as a fallback |
2487 | if MORECORE fails. |
2488 | */ |
2489 | static void * |
2490 | sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb, |
2491 | INTERNAL_SIZE_T old_size, size_t minsize, |
2492 | size_t pagesize, int , mstate av) |
2493 | { |
2494 | long int size = *s; |
2495 | |
2496 | /* Cannot merge with old top, so add its size back in */ |
2497 | if (contiguous (av)) |
2498 | size = ALIGN_UP (size + old_size, pagesize); |
2499 | |
2500 | /* If we are relying on mmap as backup, then use larger units */ |
2501 | if ((unsigned long) (size) < minsize) |
2502 | size = minsize; |
2503 | |
2504 | /* Don't try if size wraps around 0 */ |
2505 | if ((unsigned long) (size) <= (unsigned long) (nb)) |
2506 | return MORECORE_FAILURE; |
2507 | |
2508 | char *mbrk = (char *) (MMAP (0, size, |
2509 | mtag_mmap_flags | PROT_READ | PROT_WRITE, |
2510 | extra_flags)); |
2511 | if (mbrk == MAP_FAILED) |
2512 | return MAP_FAILED; |
2513 | |
2514 | #ifdef MAP_HUGETLB |
2515 | if (!(extra_flags & MAP_HUGETLB)) |
2516 | madvise_thp (p: mbrk, size); |
2517 | #endif |
2518 | |
2519 | __set_vma_name (start: mbrk, len: size, name: " glibc: malloc" ); |
2520 | |
2521 | /* Record that we no longer have a contiguous sbrk region. After the first |
2522 | time mmap is used as backup, we do not ever rely on contiguous space |
2523 | since this could incorrectly bridge regions. */ |
2524 | set_noncontiguous (av); |
2525 | |
2526 | *s = size; |
2527 | return mbrk; |
2528 | } |
2529 | |
2530 | static void * |
2531 | sysmalloc (INTERNAL_SIZE_T nb, mstate av) |
2532 | { |
2533 | mchunkptr old_top; /* incoming value of av->top */ |
2534 | INTERNAL_SIZE_T old_size; /* its size */ |
2535 | char *old_end; /* its end address */ |
2536 | |
2537 | long size; /* arg to first MORECORE or mmap call */ |
2538 | char *brk; /* return value from MORECORE */ |
2539 | |
2540 | long correction; /* arg to 2nd MORECORE call */ |
2541 | char *snd_brk; /* 2nd return val */ |
2542 | |
2543 | INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */ |
2544 | INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */ |
2545 | char *aligned_brk; /* aligned offset into brk */ |
2546 | |
2547 | mchunkptr p; /* the allocated/returned chunk */ |
2548 | mchunkptr remainder; /* remainder from allocation */ |
2549 | unsigned long remainder_size; /* its size */ |
2550 | |
2551 | |
2552 | size_t pagesize = GLRO (dl_pagesize); |
2553 | bool tried_mmap = false; |
2554 | |
2555 | |
2556 | /* |
2557 | If have mmap, and the request size meets the mmap threshold, and |
2558 | the system supports mmap, and there are few enough currently |
2559 | allocated mmapped regions, try to directly map this request |
2560 | rather than expanding top. |
2561 | */ |
2562 | |
2563 | if (av == NULL |
2564 | || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) |
2565 | && (mp_.n_mmaps < mp_.n_mmaps_max))) |
2566 | { |
2567 | char *mm; |
2568 | if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize) |
2569 | { |
2570 | /* There is no need to issue the THP madvise call if Huge Pages are |
2571 | used directly. */ |
2572 | mm = sysmalloc_mmap (nb, pagesize: mp_.hp_pagesize, extra_flags: mp_.hp_flags, av); |
2573 | if (mm != MAP_FAILED) |
2574 | return mm; |
2575 | } |
2576 | mm = sysmalloc_mmap (nb, pagesize, extra_flags: 0, av); |
2577 | if (mm != MAP_FAILED) |
2578 | return mm; |
2579 | tried_mmap = true; |
2580 | } |
2581 | |
2582 | /* There are no usable arenas and mmap also failed. */ |
2583 | if (av == NULL) |
2584 | return 0; |
2585 | |
2586 | /* Record incoming configuration of top */ |
2587 | |
2588 | old_top = av->top; |
2589 | old_size = chunksize (old_top); |
2590 | old_end = (char *) (chunk_at_offset (old_top, old_size)); |
2591 | |
2592 | brk = snd_brk = (char *) (MORECORE_FAILURE); |
2593 | |
2594 | /* |
2595 | If not the first time through, we require old_size to be |
2596 | at least MINSIZE and to have prev_inuse set. |
2597 | */ |
2598 | |
2599 | assert ((old_top == initial_top (av) && old_size == 0) || |
2600 | ((unsigned long) (old_size) >= MINSIZE && |
2601 | prev_inuse (old_top) && |
2602 | ((unsigned long) old_end & (pagesize - 1)) == 0)); |
2603 | |
2604 | /* Precondition: not enough current space to satisfy nb request */ |
2605 | assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE)); |
2606 | |
2607 | |
2608 | if (av != &main_arena) |
2609 | { |
2610 | heap_info *old_heap, *heap; |
2611 | size_t old_heap_size; |
2612 | |
2613 | /* First try to extend the current heap. */ |
2614 | old_heap = heap_for_ptr (ptr: old_top); |
2615 | old_heap_size = old_heap->size; |
2616 | if ((long) (MINSIZE + nb - old_size) > 0 |
2617 | && grow_heap (h: old_heap, MINSIZE + nb - old_size) == 0) |
2618 | { |
2619 | av->system_mem += old_heap->size - old_heap_size; |
2620 | set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top) |
2621 | | PREV_INUSE); |
2622 | } |
2623 | else if ((heap = new_heap (size: nb + (MINSIZE + sizeof (*heap)), top_pad: mp_.top_pad))) |
2624 | { |
2625 | /* Use a newly allocated heap. */ |
2626 | heap->ar_ptr = av; |
2627 | heap->prev = old_heap; |
2628 | av->system_mem += heap->size; |
2629 | /* Set up the new top. */ |
2630 | top (av) = chunk_at_offset (heap, sizeof (*heap)); |
2631 | set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE); |
2632 | |
2633 | /* Setup fencepost and free the old top chunk with a multiple of |
2634 | MALLOC_ALIGNMENT in size. */ |
2635 | /* The fencepost takes at least MINSIZE bytes, because it might |
2636 | become the top chunk again later. Note that a footer is set |
2637 | up, too, although the chunk is marked in use. */ |
2638 | old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK; |
2639 | set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ), |
2640 | 0 | PREV_INUSE); |
2641 | if (old_size >= MINSIZE) |
2642 | { |
2643 | set_head (chunk_at_offset (old_top, old_size), |
2644 | CHUNK_HDR_SZ | PREV_INUSE); |
2645 | set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ); |
2646 | set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA); |
2647 | _int_free (av, old_top, 1); |
2648 | } |
2649 | else |
2650 | { |
2651 | set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE); |
2652 | set_foot (old_top, (old_size + CHUNK_HDR_SZ)); |
2653 | } |
2654 | } |
2655 | else if (!tried_mmap) |
2656 | { |
2657 | /* We can at least try to use to mmap memory. If new_heap fails |
2658 | it is unlikely that trying to allocate huge pages will |
2659 | succeed. */ |
2660 | char *mm = sysmalloc_mmap (nb, pagesize, extra_flags: 0, av); |
2661 | if (mm != MAP_FAILED) |
2662 | return mm; |
2663 | } |
2664 | } |
2665 | else /* av == main_arena */ |
2666 | |
2667 | |
2668 | { /* Request enough space for nb + pad + overhead */ |
2669 | size = nb + mp_.top_pad + MINSIZE; |
2670 | |
2671 | /* |
2672 | If contiguous, we can subtract out existing space that we hope to |
2673 | combine with new space. We add it back later only if |
2674 | we don't actually get contiguous space. |
2675 | */ |
2676 | |
2677 | if (contiguous (av)) |
2678 | size -= old_size; |
2679 | |
2680 | /* |
2681 | Round to a multiple of page size or huge page size. |
2682 | If MORECORE is not contiguous, this ensures that we only call it |
2683 | with whole-page arguments. And if MORECORE is contiguous and |
2684 | this is not first time through, this preserves page-alignment of |
2685 | previous calls. Otherwise, we correct to page-align below. |
2686 | */ |
2687 | |
2688 | #ifdef MADV_HUGEPAGE |
2689 | /* Defined in brk.c. */ |
2690 | extern void *__curbrk; |
2691 | if (__glibc_unlikely (mp_.thp_pagesize != 0)) |
2692 | { |
2693 | uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size, |
2694 | mp_.thp_pagesize); |
2695 | size = top - (uintptr_t) __curbrk; |
2696 | } |
2697 | else |
2698 | #endif |
2699 | size = ALIGN_UP (size, GLRO(dl_pagesize)); |
2700 | |
2701 | /* |
2702 | Don't try to call MORECORE if argument is so big as to appear |
2703 | negative. Note that since mmap takes size_t arg, it may succeed |
2704 | below even if we cannot call MORECORE. |
2705 | */ |
2706 | |
2707 | if (size > 0) |
2708 | { |
2709 | brk = (char *) (MORECORE (increment: size)); |
2710 | if (brk != (char *) (MORECORE_FAILURE)) |
2711 | madvise_thp (p: brk, size); |
2712 | LIBC_PROBE (memory_sbrk_more, 2, brk, size); |
2713 | } |
2714 | |
2715 | if (brk == (char *) (MORECORE_FAILURE)) |
2716 | { |
2717 | /* |
2718 | If have mmap, try using it as a backup when MORECORE fails or |
2719 | cannot be used. This is worth doing on systems that have "holes" in |
2720 | address space, so sbrk cannot extend to give contiguous space, but |
2721 | space is available elsewhere. Note that we ignore mmap max count |
2722 | and threshold limits, since the space will not be used as a |
2723 | segregated mmap region. |
2724 | */ |
2725 | |
2726 | char *mbrk = MAP_FAILED; |
2727 | if (mp_.hp_pagesize > 0) |
2728 | mbrk = sysmalloc_mmap_fallback (s: &size, nb, old_size, |
2729 | minsize: mp_.hp_pagesize, pagesize: mp_.hp_pagesize, |
2730 | extra_flags: mp_.hp_flags, av); |
2731 | if (mbrk == MAP_FAILED) |
2732 | mbrk = sysmalloc_mmap_fallback (s: &size, nb, old_size, MMAP_AS_MORECORE_SIZE, |
2733 | pagesize, extra_flags: 0, av); |
2734 | if (mbrk != MAP_FAILED) |
2735 | { |
2736 | /* We do not need, and cannot use, another sbrk call to find end */ |
2737 | brk = mbrk; |
2738 | snd_brk = brk + size; |
2739 | } |
2740 | } |
2741 | |
2742 | if (brk != (char *) (MORECORE_FAILURE)) |
2743 | { |
2744 | if (mp_.sbrk_base == 0) |
2745 | mp_.sbrk_base = brk; |
2746 | av->system_mem += size; |
2747 | |
2748 | /* |
2749 | If MORECORE extends previous space, we can likewise extend top size. |
2750 | */ |
2751 | |
2752 | if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE)) |
2753 | set_head (old_top, (size + old_size) | PREV_INUSE); |
2754 | |
2755 | else if (contiguous (av) && old_size && brk < old_end) |
2756 | /* Oops! Someone else killed our space.. Can't touch anything. */ |
2757 | malloc_printerr (str: "break adjusted to free malloc space" ); |
2758 | |
2759 | /* |
2760 | Otherwise, make adjustments: |
2761 | |
2762 | * If the first time through or noncontiguous, we need to call sbrk |
2763 | just to find out where the end of memory lies. |
2764 | |
2765 | * We need to ensure that all returned chunks from malloc will meet |
2766 | MALLOC_ALIGNMENT |
2767 | |
2768 | * If there was an intervening foreign sbrk, we need to adjust sbrk |
2769 | request size to account for fact that we will not be able to |
2770 | combine new space with existing space in old_top. |
2771 | |
2772 | * Almost all systems internally allocate whole pages at a time, in |
2773 | which case we might as well use the whole last page of request. |
2774 | So we allocate enough more memory to hit a page boundary now, |
2775 | which in turn causes future contiguous calls to page-align. |
2776 | */ |
2777 | |
2778 | else |
2779 | { |
2780 | front_misalign = 0; |
2781 | end_misalign = 0; |
2782 | correction = 0; |
2783 | aligned_brk = brk; |
2784 | |
2785 | /* handle contiguous cases */ |
2786 | if (contiguous (av)) |
2787 | { |
2788 | /* Count foreign sbrk as system_mem. */ |
2789 | if (old_size) |
2790 | av->system_mem += brk - old_end; |
2791 | |
2792 | /* Guarantee alignment of first new chunk made from this space */ |
2793 | |
2794 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; |
2795 | if (front_misalign > 0) |
2796 | { |
2797 | /* |
2798 | Skip over some bytes to arrive at an aligned position. |
2799 | We don't need to specially mark these wasted front bytes. |
2800 | They will never be accessed anyway because |
2801 | prev_inuse of av->top (and any chunk created from its start) |
2802 | is always true after initialization. |
2803 | */ |
2804 | |
2805 | correction = MALLOC_ALIGNMENT - front_misalign; |
2806 | aligned_brk += correction; |
2807 | } |
2808 | |
2809 | /* |
2810 | If this isn't adjacent to existing space, then we will not |
2811 | be able to merge with old_top space, so must add to 2nd request. |
2812 | */ |
2813 | |
2814 | correction += old_size; |
2815 | |
2816 | /* Extend the end address to hit a page boundary */ |
2817 | end_misalign = (INTERNAL_SIZE_T) (brk + size + correction); |
2818 | correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign; |
2819 | |
2820 | assert (correction >= 0); |
2821 | snd_brk = (char *) (MORECORE (increment: correction)); |
2822 | |
2823 | /* |
2824 | If can't allocate correction, try to at least find out current |
2825 | brk. It might be enough to proceed without failing. |
2826 | |
2827 | Note that if second sbrk did NOT fail, we assume that space |
2828 | is contiguous with first sbrk. This is a safe assumption unless |
2829 | program is multithreaded but doesn't use locks and a foreign sbrk |
2830 | occurred between our first and second calls. |
2831 | */ |
2832 | |
2833 | if (snd_brk == (char *) (MORECORE_FAILURE)) |
2834 | { |
2835 | correction = 0; |
2836 | snd_brk = (char *) (MORECORE (increment: 0)); |
2837 | } |
2838 | else |
2839 | madvise_thp (p: snd_brk, size: correction); |
2840 | } |
2841 | |
2842 | /* handle non-contiguous cases */ |
2843 | else |
2844 | { |
2845 | if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ) |
2846 | /* MORECORE/mmap must correctly align */ |
2847 | assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0); |
2848 | else |
2849 | { |
2850 | front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK; |
2851 | if (front_misalign > 0) |
2852 | { |
2853 | /* |
2854 | Skip over some bytes to arrive at an aligned position. |
2855 | We don't need to specially mark these wasted front bytes. |
2856 | They will never be accessed anyway because |
2857 | prev_inuse of av->top (and any chunk created from its start) |
2858 | is always true after initialization. |
2859 | */ |
2860 | |
2861 | aligned_brk += MALLOC_ALIGNMENT - front_misalign; |
2862 | } |
2863 | } |
2864 | |
2865 | /* Find out current end of memory */ |
2866 | if (snd_brk == (char *) (MORECORE_FAILURE)) |
2867 | { |
2868 | snd_brk = (char *) (MORECORE (increment: 0)); |
2869 | } |
2870 | } |
2871 | |
2872 | /* Adjust top based on results of second sbrk */ |
2873 | if (snd_brk != (char *) (MORECORE_FAILURE)) |
2874 | { |
2875 | av->top = (mchunkptr) aligned_brk; |
2876 | set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE); |
2877 | av->system_mem += correction; |
2878 | |
2879 | /* |
2880 | If not the first time through, we either have a |
2881 | gap due to foreign sbrk or a non-contiguous region. Insert a |
2882 | double fencepost at old_top to prevent consolidation with space |
2883 | we don't own. These fenceposts are artificial chunks that are |
2884 | marked as inuse and are in any case too small to use. We need |
2885 | two to make sizes and alignments work out. |
2886 | */ |
2887 | |
2888 | if (old_size != 0) |
2889 | { |
2890 | /* |
2891 | Shrink old_top to insert fenceposts, keeping size a |
2892 | multiple of MALLOC_ALIGNMENT. We know there is at least |
2893 | enough space in old_top to do this. |
2894 | */ |
2895 | old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK; |
2896 | set_head (old_top, old_size | PREV_INUSE); |
2897 | |
2898 | /* |
2899 | Note that the following assignments completely overwrite |
2900 | old_top when old_size was previously MINSIZE. This is |
2901 | intentional. We need the fencepost, even if old_top otherwise gets |
2902 | lost. |
2903 | */ |
2904 | set_head (chunk_at_offset (old_top, old_size), |
2905 | CHUNK_HDR_SZ | PREV_INUSE); |
2906 | set_head (chunk_at_offset (old_top, |
2907 | old_size + CHUNK_HDR_SZ), |
2908 | CHUNK_HDR_SZ | PREV_INUSE); |
2909 | |
2910 | /* If possible, release the rest. */ |
2911 | if (old_size >= MINSIZE) |
2912 | { |
2913 | _int_free (av, old_top, 1); |
2914 | } |
2915 | } |
2916 | } |
2917 | } |
2918 | } |
2919 | } /* if (av != &main_arena) */ |
2920 | |
2921 | if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem)) |
2922 | av->max_system_mem = av->system_mem; |
2923 | check_malloc_state (av); |
2924 | |
2925 | /* finally, do the allocation */ |
2926 | p = av->top; |
2927 | size = chunksize (p); |
2928 | |
2929 | /* check that one of the above allocation paths succeeded */ |
2930 | if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) |
2931 | { |
2932 | remainder_size = size - nb; |
2933 | remainder = chunk_at_offset (p, nb); |
2934 | av->top = remainder; |
2935 | set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
2936 | set_head (remainder, remainder_size | PREV_INUSE); |
2937 | check_malloced_chunk (av, p, nb); |
2938 | return chunk2mem (p); |
2939 | } |
2940 | |
2941 | /* catch all failure paths */ |
2942 | __set_errno (ENOMEM); |
2943 | return 0; |
2944 | } |
2945 | |
2946 | |
2947 | /* |
2948 | systrim is an inverse of sorts to sysmalloc. It gives memory back |
2949 | to the system (via negative arguments to sbrk) if there is unused |
2950 | memory at the `high' end of the malloc pool. It is called |
2951 | automatically by free() when top space exceeds the trim |
2952 | threshold. It is also called by the public malloc_trim routine. It |
2953 | returns 1 if it actually released any memory, else 0. |
2954 | */ |
2955 | |
2956 | static int |
2957 | systrim (size_t pad, mstate av) |
2958 | { |
2959 | long top_size; /* Amount of top-most memory */ |
2960 | long ; /* Amount to release */ |
2961 | long released; /* Amount actually released */ |
2962 | char *current_brk; /* address returned by pre-check sbrk call */ |
2963 | char *new_brk; /* address returned by post-check sbrk call */ |
2964 | long top_area; |
2965 | |
2966 | top_size = chunksize (av->top); |
2967 | |
2968 | top_area = top_size - MINSIZE - 1; |
2969 | if (top_area <= pad) |
2970 | return 0; |
2971 | |
2972 | /* Release in pagesize units and round down to the nearest page. */ |
2973 | #ifdef MADV_HUGEPAGE |
2974 | if (__glibc_unlikely (mp_.thp_pagesize != 0)) |
2975 | extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize); |
2976 | else |
2977 | #endif |
2978 | extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize)); |
2979 | |
2980 | if (extra == 0) |
2981 | return 0; |
2982 | |
2983 | /* |
2984 | Only proceed if end of memory is where we last set it. |
2985 | This avoids problems if there were foreign sbrk calls. |
2986 | */ |
2987 | current_brk = (char *) (MORECORE (increment: 0)); |
2988 | if (current_brk == (char *) (av->top) + top_size) |
2989 | { |
2990 | /* |
2991 | Attempt to release memory. We ignore MORECORE return value, |
2992 | and instead call again to find out where new end of memory is. |
2993 | This avoids problems if first call releases less than we asked, |
2994 | of if failure somehow altered brk value. (We could still |
2995 | encounter problems if it altered brk in some very bad way, |
2996 | but the only thing we can do is adjust anyway, which will cause |
2997 | some downstream failure.) |
2998 | */ |
2999 | |
3000 | MORECORE (increment: -extra); |
3001 | new_brk = (char *) (MORECORE (increment: 0)); |
3002 | |
3003 | LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra); |
3004 | |
3005 | if (new_brk != (char *) MORECORE_FAILURE) |
3006 | { |
3007 | released = (long) (current_brk - new_brk); |
3008 | |
3009 | if (released != 0) |
3010 | { |
3011 | /* Success. Adjust top. */ |
3012 | av->system_mem -= released; |
3013 | set_head (av->top, (top_size - released) | PREV_INUSE); |
3014 | check_malloc_state (av); |
3015 | return 1; |
3016 | } |
3017 | } |
3018 | } |
3019 | return 0; |
3020 | } |
3021 | |
3022 | static void |
3023 | munmap_chunk (mchunkptr p) |
3024 | { |
3025 | size_t pagesize = GLRO (dl_pagesize); |
3026 | INTERNAL_SIZE_T size = chunksize (p); |
3027 | |
3028 | assert (chunk_is_mmapped (p)); |
3029 | |
3030 | uintptr_t mem = (uintptr_t) chunk2mem (p); |
3031 | uintptr_t block = (uintptr_t) p - prev_size (p); |
3032 | size_t total_size = prev_size (p) + size; |
3033 | /* Unfortunately we have to do the compilers job by hand here. Normally |
3034 | we would test BLOCK and TOTAL-SIZE separately for compliance with the |
3035 | page size. But gcc does not recognize the optimization possibility |
3036 | (in the moment at least) so we combine the two values into one before |
3037 | the bit test. */ |
3038 | if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0 |
3039 | || __glibc_unlikely (!powerof2 (mem & (pagesize - 1)))) |
3040 | malloc_printerr (str: "munmap_chunk(): invalid pointer" ); |
3041 | |
3042 | atomic_fetch_add_relaxed (&mp_.n_mmaps, -1); |
3043 | atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size); |
3044 | |
3045 | /* If munmap failed the process virtual memory address space is in a |
3046 | bad shape. Just leave the block hanging around, the process will |
3047 | terminate shortly anyway since not much can be done. */ |
3048 | __munmap (addr: (char *) block, len: total_size); |
3049 | } |
3050 | |
3051 | #if HAVE_MREMAP |
3052 | |
3053 | static mchunkptr |
3054 | mremap_chunk (mchunkptr p, size_t new_size) |
3055 | { |
3056 | size_t pagesize = GLRO (dl_pagesize); |
3057 | INTERNAL_SIZE_T offset = prev_size (p); |
3058 | INTERNAL_SIZE_T size = chunksize (p); |
3059 | char *cp; |
3060 | |
3061 | assert (chunk_is_mmapped (p)); |
3062 | |
3063 | uintptr_t block = (uintptr_t) p - offset; |
3064 | uintptr_t mem = (uintptr_t) chunk2mem(p); |
3065 | size_t total_size = offset + size; |
3066 | if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0 |
3067 | || __glibc_unlikely (!powerof2 (mem & (pagesize - 1)))) |
3068 | malloc_printerr(str: "mremap_chunk(): invalid pointer" ); |
3069 | |
3070 | /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */ |
3071 | new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize); |
3072 | |
3073 | /* No need to remap if the number of pages does not change. */ |
3074 | if (total_size == new_size) |
3075 | return p; |
3076 | |
3077 | cp = (char *) __mremap (addr: (char *) block, old_len: total_size, new_len: new_size, |
3078 | MREMAP_MAYMOVE); |
3079 | |
3080 | if (cp == MAP_FAILED) |
3081 | return 0; |
3082 | |
3083 | madvise_thp (p: cp, size: new_size); |
3084 | |
3085 | p = (mchunkptr) (cp + offset); |
3086 | |
3087 | assert (aligned_OK (chunk2mem (p))); |
3088 | |
3089 | assert (prev_size (p) == offset); |
3090 | set_head (p, (new_size - offset) | IS_MMAPPED); |
3091 | |
3092 | INTERNAL_SIZE_T new; |
3093 | new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset) |
3094 | + new_size - size - offset; |
3095 | atomic_max (&mp_.max_mmapped_mem, new); |
3096 | return p; |
3097 | } |
3098 | #endif /* HAVE_MREMAP */ |
3099 | |
3100 | /*------------------------ Public wrappers. --------------------------------*/ |
3101 | |
3102 | #if USE_TCACHE |
3103 | |
3104 | /* We overlay this structure on the user-data portion of a chunk when |
3105 | the chunk is stored in the per-thread cache. */ |
3106 | typedef struct tcache_entry |
3107 | { |
3108 | struct tcache_entry *next; |
3109 | /* This field exists to detect double frees. */ |
3110 | uintptr_t key; |
3111 | } tcache_entry; |
3112 | |
3113 | /* There is one of these for each thread, which contains the |
3114 | per-thread cache (hence "tcache_perthread_struct"). Keeping |
3115 | overall size low is mildly important. Note that COUNTS and ENTRIES |
3116 | are redundant (we could have just counted the linked list each |
3117 | time), this is for performance reasons. */ |
3118 | typedef struct tcache_perthread_struct |
3119 | { |
3120 | uint16_t counts[TCACHE_MAX_BINS]; |
3121 | tcache_entry *entries[TCACHE_MAX_BINS]; |
3122 | } tcache_perthread_struct; |
3123 | |
3124 | static __thread bool tcache_shutting_down = false; |
3125 | static __thread tcache_perthread_struct *tcache = NULL; |
3126 | |
3127 | /* Process-wide key to try and catch a double-free in the same thread. */ |
3128 | static uintptr_t tcache_key; |
3129 | |
3130 | /* The value of tcache_key does not really have to be a cryptographically |
3131 | secure random number. It only needs to be arbitrary enough so that it does |
3132 | not collide with values present in applications. If a collision does happen |
3133 | consistently enough, it could cause a degradation in performance since the |
3134 | entire list is checked to check if the block indeed has been freed the |
3135 | second time. The odds of this happening are exceedingly low though, about 1 |
3136 | in 2^wordsize. There is probably a higher chance of the performance |
3137 | degradation being due to a double free where the first free happened in a |
3138 | different thread; that's a case this check does not cover. */ |
3139 | static void |
3140 | tcache_key_initialize (void) |
3141 | { |
3142 | /* We need to use the _nostatus version here, see BZ 29624. */ |
3143 | if (__getrandom_nocancel_nostatus (&tcache_key, sizeof(tcache_key), |
3144 | GRND_NONBLOCK) |
3145 | != sizeof (tcache_key)) |
3146 | { |
3147 | tcache_key = random_bits (); |
3148 | #if __WORDSIZE == 64 |
3149 | tcache_key = (tcache_key << 32) | random_bits (); |
3150 | #endif |
3151 | } |
3152 | } |
3153 | |
3154 | /* Caller must ensure that we know tc_idx is valid and there's room |
3155 | for more chunks. */ |
3156 | static __always_inline void |
3157 | tcache_put (mchunkptr chunk, size_t tc_idx) |
3158 | { |
3159 | tcache_entry *e = (tcache_entry *) chunk2mem (chunk); |
3160 | |
3161 | /* Mark this chunk as "in the tcache" so the test in _int_free will |
3162 | detect a double free. */ |
3163 | e->key = tcache_key; |
3164 | |
3165 | e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); |
3166 | tcache->entries[tc_idx] = e; |
3167 | ++(tcache->counts[tc_idx]); |
3168 | } |
3169 | |
3170 | /* Caller must ensure that we know tc_idx is valid and there's |
3171 | available chunks to remove. Removes chunk from the middle of the |
3172 | list. */ |
3173 | static __always_inline void * |
3174 | tcache_get_n (size_t tc_idx, tcache_entry **ep) |
3175 | { |
3176 | tcache_entry *e; |
3177 | if (ep == &(tcache->entries[tc_idx])) |
3178 | e = *ep; |
3179 | else |
3180 | e = REVEAL_PTR (*ep); |
3181 | |
3182 | if (__glibc_unlikely (!aligned_OK (e))) |
3183 | malloc_printerr ("malloc(): unaligned tcache chunk detected" ); |
3184 | |
3185 | if (ep == &(tcache->entries[tc_idx])) |
3186 | *ep = REVEAL_PTR (e->next); |
3187 | else |
3188 | *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next)); |
3189 | |
3190 | --(tcache->counts[tc_idx]); |
3191 | e->key = 0; |
3192 | return (void *) e; |
3193 | } |
3194 | |
3195 | /* Like the above, but removes from the head of the list. */ |
3196 | static __always_inline void * |
3197 | tcache_get (size_t tc_idx) |
3198 | { |
3199 | return tcache_get_n (tc_idx, & tcache->entries[tc_idx]); |
3200 | } |
3201 | |
3202 | /* Iterates through the tcache linked list. */ |
3203 | static __always_inline tcache_entry * |
3204 | tcache_next (tcache_entry *e) |
3205 | { |
3206 | return (tcache_entry *) REVEAL_PTR (e->next); |
3207 | } |
3208 | |
3209 | static void |
3210 | tcache_thread_shutdown (void) |
3211 | { |
3212 | int i; |
3213 | tcache_perthread_struct *tcache_tmp = tcache; |
3214 | |
3215 | tcache_shutting_down = true; |
3216 | |
3217 | if (!tcache) |
3218 | return; |
3219 | |
3220 | /* Disable the tcache and prevent it from being reinitialized. */ |
3221 | tcache = NULL; |
3222 | |
3223 | /* Free all of the entries and the tcache itself back to the arena |
3224 | heap for coalescing. */ |
3225 | for (i = 0; i < TCACHE_MAX_BINS; ++i) |
3226 | { |
3227 | while (tcache_tmp->entries[i]) |
3228 | { |
3229 | tcache_entry *e = tcache_tmp->entries[i]; |
3230 | if (__glibc_unlikely (!aligned_OK (e))) |
3231 | malloc_printerr ("tcache_thread_shutdown(): " |
3232 | "unaligned tcache chunk detected" ); |
3233 | tcache_tmp->entries[i] = REVEAL_PTR (e->next); |
3234 | __libc_free (e); |
3235 | } |
3236 | } |
3237 | |
3238 | __libc_free (tcache_tmp); |
3239 | } |
3240 | |
3241 | static void |
3242 | tcache_init(void) |
3243 | { |
3244 | mstate ar_ptr; |
3245 | void *victim = 0; |
3246 | const size_t bytes = sizeof (tcache_perthread_struct); |
3247 | |
3248 | if (tcache_shutting_down) |
3249 | return; |
3250 | |
3251 | arena_get (ar_ptr, bytes); |
3252 | victim = _int_malloc (ar_ptr, bytes); |
3253 | if (!victim && ar_ptr != NULL) |
3254 | { |
3255 | ar_ptr = arena_get_retry (ar_ptr, bytes); |
3256 | victim = _int_malloc (ar_ptr, bytes); |
3257 | } |
3258 | |
3259 | |
3260 | if (ar_ptr != NULL) |
3261 | __libc_lock_unlock (ar_ptr->mutex); |
3262 | |
3263 | /* In a low memory situation, we may not be able to allocate memory |
3264 | - in which case, we just keep trying later. However, we |
3265 | typically do this very early, so either there is sufficient |
3266 | memory, or there isn't enough memory to do non-trivial |
3267 | allocations anyway. */ |
3268 | if (victim) |
3269 | { |
3270 | tcache = (tcache_perthread_struct *) victim; |
3271 | memset (tcache, 0, sizeof (tcache_perthread_struct)); |
3272 | } |
3273 | |
3274 | } |
3275 | |
3276 | # define MAYBE_INIT_TCACHE() \ |
3277 | if (__glibc_unlikely (tcache == NULL)) \ |
3278 | tcache_init(); |
3279 | |
3280 | #else /* !USE_TCACHE */ |
3281 | # define MAYBE_INIT_TCACHE() |
3282 | |
3283 | static void |
3284 | tcache_thread_shutdown (void) |
3285 | { |
3286 | /* Nothing to do if there is no thread cache. */ |
3287 | } |
3288 | |
3289 | #endif /* !USE_TCACHE */ |
3290 | |
3291 | #if IS_IN (libc) |
3292 | void * |
3293 | __libc_malloc (size_t bytes) |
3294 | { |
3295 | mstate ar_ptr; |
3296 | void *victim; |
3297 | |
3298 | _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2, |
3299 | "PTRDIFF_MAX is not more than half of SIZE_MAX" ); |
3300 | |
3301 | if (!__malloc_initialized) |
3302 | ptmalloc_init (); |
3303 | #if USE_TCACHE |
3304 | /* int_free also calls request2size, be careful to not pad twice. */ |
3305 | size_t tbytes = checked_request2size (bytes); |
3306 | if (tbytes == 0) |
3307 | { |
3308 | __set_errno (ENOMEM); |
3309 | return NULL; |
3310 | } |
3311 | size_t tc_idx = csize2tidx (tbytes); |
3312 | |
3313 | MAYBE_INIT_TCACHE (); |
3314 | |
3315 | DIAG_PUSH_NEEDS_COMMENT; |
3316 | if (tc_idx < mp_.tcache_bins |
3317 | && tcache != NULL |
3318 | && tcache->counts[tc_idx] > 0) |
3319 | { |
3320 | victim = tcache_get (tc_idx); |
3321 | return tag_new_usable (victim); |
3322 | } |
3323 | DIAG_POP_NEEDS_COMMENT; |
3324 | #endif |
3325 | |
3326 | if (SINGLE_THREAD_P) |
3327 | { |
3328 | victim = tag_new_usable (_int_malloc (&main_arena, bytes)); |
3329 | assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || |
3330 | &main_arena == arena_for_chunk (mem2chunk (victim))); |
3331 | return victim; |
3332 | } |
3333 | |
3334 | arena_get (ar_ptr, bytes); |
3335 | |
3336 | victim = _int_malloc (ar_ptr, bytes); |
3337 | /* Retry with another arena only if we were able to find a usable arena |
3338 | before. */ |
3339 | if (!victim && ar_ptr != NULL) |
3340 | { |
3341 | LIBC_PROBE (memory_malloc_retry, 1, bytes); |
3342 | ar_ptr = arena_get_retry (ar_ptr, bytes); |
3343 | victim = _int_malloc (ar_ptr, bytes); |
3344 | } |
3345 | |
3346 | if (ar_ptr != NULL) |
3347 | __libc_lock_unlock (ar_ptr->mutex); |
3348 | |
3349 | victim = tag_new_usable (victim); |
3350 | |
3351 | assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || |
3352 | ar_ptr == arena_for_chunk (mem2chunk (victim))); |
3353 | return victim; |
3354 | } |
3355 | libc_hidden_def (__libc_malloc) |
3356 | |
3357 | void |
3358 | __libc_free (void *mem) |
3359 | { |
3360 | mstate ar_ptr; |
3361 | mchunkptr p; /* chunk corresponding to mem */ |
3362 | |
3363 | if (mem == 0) /* free(0) has no effect */ |
3364 | return; |
3365 | |
3366 | /* Quickly check that the freed pointer matches the tag for the memory. |
3367 | This gives a useful double-free detection. */ |
3368 | if (__glibc_unlikely (mtag_enabled)) |
3369 | *(volatile char *)mem; |
3370 | |
3371 | int err = errno; |
3372 | |
3373 | p = mem2chunk (mem); |
3374 | |
3375 | if (chunk_is_mmapped (p)) /* release mmapped memory. */ |
3376 | { |
3377 | /* See if the dynamic brk/mmap threshold needs adjusting. |
3378 | Dumped fake mmapped chunks do not affect the threshold. */ |
3379 | if (!mp_.no_dyn_threshold |
3380 | && chunksize_nomask (p) > mp_.mmap_threshold |
3381 | && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX) |
3382 | { |
3383 | mp_.mmap_threshold = chunksize (p); |
3384 | mp_.trim_threshold = 2 * mp_.mmap_threshold; |
3385 | LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2, |
3386 | mp_.mmap_threshold, mp_.trim_threshold); |
3387 | } |
3388 | munmap_chunk (p); |
3389 | } |
3390 | else |
3391 | { |
3392 | MAYBE_INIT_TCACHE (); |
3393 | |
3394 | /* Mark the chunk as belonging to the library again. */ |
3395 | (void)tag_region (chunk2mem (p), memsize (p)); |
3396 | |
3397 | ar_ptr = arena_for_chunk (p); |
3398 | _int_free (ar_ptr, p, 0); |
3399 | } |
3400 | |
3401 | __set_errno (err); |
3402 | } |
3403 | libc_hidden_def (__libc_free) |
3404 | |
3405 | void * |
3406 | __libc_realloc (void *oldmem, size_t bytes) |
3407 | { |
3408 | mstate ar_ptr; |
3409 | INTERNAL_SIZE_T nb; /* padded request size */ |
3410 | |
3411 | void *newp; /* chunk to return */ |
3412 | |
3413 | if (!__malloc_initialized) |
3414 | ptmalloc_init (); |
3415 | |
3416 | #if REALLOC_ZERO_BYTES_FREES |
3417 | if (bytes == 0 && oldmem != NULL) |
3418 | { |
3419 | __libc_free (oldmem); return 0; |
3420 | } |
3421 | #endif |
3422 | |
3423 | /* realloc of null is supposed to be same as malloc */ |
3424 | if (oldmem == 0) |
3425 | return __libc_malloc (bytes); |
3426 | |
3427 | /* Perform a quick check to ensure that the pointer's tag matches the |
3428 | memory's tag. */ |
3429 | if (__glibc_unlikely (mtag_enabled)) |
3430 | *(volatile char*) oldmem; |
3431 | |
3432 | /* chunk corresponding to oldmem */ |
3433 | const mchunkptr oldp = mem2chunk (oldmem); |
3434 | |
3435 | /* Return the chunk as is if the request grows within usable bytes, typically |
3436 | into the alignment padding. We want to avoid reusing the block for |
3437 | shrinkages because it ends up unnecessarily fragmenting the address space. |
3438 | This is also why the heuristic misses alignment padding for THP for |
3439 | now. */ |
3440 | size_t usable = musable (oldmem); |
3441 | if (bytes <= usable) |
3442 | { |
3443 | size_t difference = usable - bytes; |
3444 | if ((unsigned long) difference < 2 * sizeof (INTERNAL_SIZE_T) |
3445 | || (chunk_is_mmapped (oldp) && difference <= GLRO (dl_pagesize))) |
3446 | return oldmem; |
3447 | } |
3448 | |
3449 | /* its size */ |
3450 | const INTERNAL_SIZE_T oldsize = chunksize (oldp); |
3451 | |
3452 | if (chunk_is_mmapped (oldp)) |
3453 | ar_ptr = NULL; |
3454 | else |
3455 | { |
3456 | MAYBE_INIT_TCACHE (); |
3457 | ar_ptr = arena_for_chunk (oldp); |
3458 | } |
3459 | |
3460 | /* Little security check which won't hurt performance: the allocator |
3461 | never wraps around at the end of the address space. Therefore |
3462 | we can exclude some size values which might appear here by |
3463 | accident or by "design" from some intruder. */ |
3464 | if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0) |
3465 | || __builtin_expect (misaligned_chunk (oldp), 0))) |
3466 | malloc_printerr ("realloc(): invalid pointer" ); |
3467 | |
3468 | nb = checked_request2size (bytes); |
3469 | if (nb == 0) |
3470 | { |
3471 | __set_errno (ENOMEM); |
3472 | return NULL; |
3473 | } |
3474 | |
3475 | if (chunk_is_mmapped (oldp)) |
3476 | { |
3477 | void *newmem; |
3478 | |
3479 | #if HAVE_MREMAP |
3480 | newp = mremap_chunk (oldp, nb); |
3481 | if (newp) |
3482 | { |
3483 | void *newmem = chunk2mem_tag (newp); |
3484 | /* Give the new block a different tag. This helps to ensure |
3485 | that stale handles to the previous mapping are not |
3486 | reused. There's a performance hit for both us and the |
3487 | caller for doing this, so we might want to |
3488 | reconsider. */ |
3489 | return tag_new_usable (newmem); |
3490 | } |
3491 | #endif |
3492 | /* Note the extra SIZE_SZ overhead. */ |
3493 | if (oldsize - SIZE_SZ >= nb) |
3494 | return oldmem; /* do nothing */ |
3495 | |
3496 | /* Must alloc, copy, free. */ |
3497 | newmem = __libc_malloc (bytes); |
3498 | if (newmem == 0) |
3499 | return 0; /* propagate failure */ |
3500 | |
3501 | memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ); |
3502 | munmap_chunk (oldp); |
3503 | return newmem; |
3504 | } |
3505 | |
3506 | if (SINGLE_THREAD_P) |
3507 | { |
3508 | newp = _int_realloc (ar_ptr, oldp, oldsize, nb); |
3509 | assert (!newp || chunk_is_mmapped (mem2chunk (newp)) || |
3510 | ar_ptr == arena_for_chunk (mem2chunk (newp))); |
3511 | |
3512 | return newp; |
3513 | } |
3514 | |
3515 | __libc_lock_lock (ar_ptr->mutex); |
3516 | |
3517 | newp = _int_realloc (ar_ptr, oldp, oldsize, nb); |
3518 | |
3519 | __libc_lock_unlock (ar_ptr->mutex); |
3520 | assert (!newp || chunk_is_mmapped (mem2chunk (newp)) || |
3521 | ar_ptr == arena_for_chunk (mem2chunk (newp))); |
3522 | |
3523 | if (newp == NULL) |
3524 | { |
3525 | /* Try harder to allocate memory in other arenas. */ |
3526 | LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem); |
3527 | newp = __libc_malloc (bytes); |
3528 | if (newp != NULL) |
3529 | { |
3530 | size_t sz = memsize (oldp); |
3531 | memcpy (newp, oldmem, sz); |
3532 | (void) tag_region (chunk2mem (oldp), sz); |
3533 | _int_free (ar_ptr, oldp, 0); |
3534 | } |
3535 | } |
3536 | |
3537 | return newp; |
3538 | } |
3539 | libc_hidden_def (__libc_realloc) |
3540 | |
3541 | void * |
3542 | __libc_memalign (size_t alignment, size_t bytes) |
3543 | { |
3544 | if (!__malloc_initialized) |
3545 | ptmalloc_init (); |
3546 | |
3547 | void *address = RETURN_ADDRESS (0); |
3548 | return _mid_memalign (alignment, bytes, address); |
3549 | } |
3550 | libc_hidden_def (__libc_memalign) |
3551 | |
3552 | /* For ISO C17. */ |
3553 | void * |
3554 | weak_function |
3555 | aligned_alloc (size_t alignment, size_t bytes) |
3556 | { |
3557 | if (!__malloc_initialized) |
3558 | ptmalloc_init (); |
3559 | |
3560 | /* Similar to memalign, but starting with ISO C17 the standard |
3561 | requires an error for alignments that are not supported by the |
3562 | implementation. Valid alignments for the current implementation |
3563 | are non-negative powers of two. */ |
3564 | if (!powerof2 (alignment) || alignment == 0) |
3565 | { |
3566 | __set_errno (EINVAL); |
3567 | return 0; |
3568 | } |
3569 | |
3570 | void *address = RETURN_ADDRESS (0); |
3571 | return _mid_memalign (alignment, bytes, address); |
3572 | } |
3573 | |
3574 | static void * |
3575 | _mid_memalign (size_t alignment, size_t bytes, void *address) |
3576 | { |
3577 | mstate ar_ptr; |
3578 | void *p; |
3579 | |
3580 | /* If we need less alignment than we give anyway, just relay to malloc. */ |
3581 | if (alignment <= MALLOC_ALIGNMENT) |
3582 | return __libc_malloc (bytes); |
3583 | |
3584 | /* Otherwise, ensure that it is at least a minimum chunk size */ |
3585 | if (alignment < MINSIZE) |
3586 | alignment = MINSIZE; |
3587 | |
3588 | /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a |
3589 | power of 2 and will cause overflow in the check below. */ |
3590 | if (alignment > SIZE_MAX / 2 + 1) |
3591 | { |
3592 | __set_errno (EINVAL); |
3593 | return 0; |
3594 | } |
3595 | |
3596 | |
3597 | /* Make sure alignment is power of 2. */ |
3598 | if (!powerof2 (alignment)) |
3599 | { |
3600 | size_t a = MALLOC_ALIGNMENT * 2; |
3601 | while (a < alignment) |
3602 | a <<= 1; |
3603 | alignment = a; |
3604 | } |
3605 | |
3606 | #if USE_TCACHE |
3607 | { |
3608 | size_t tbytes; |
3609 | tbytes = checked_request2size (bytes); |
3610 | if (tbytes == 0) |
3611 | { |
3612 | __set_errno (ENOMEM); |
3613 | return NULL; |
3614 | } |
3615 | size_t tc_idx = csize2tidx (tbytes); |
3616 | |
3617 | if (tc_idx < mp_.tcache_bins |
3618 | && tcache != NULL |
3619 | && tcache->counts[tc_idx] > 0) |
3620 | { |
3621 | /* The tcache itself isn't encoded, but the chain is. */ |
3622 | tcache_entry **tep = & tcache->entries[tc_idx]; |
3623 | tcache_entry *te = *tep; |
3624 | while (te != NULL && !PTR_IS_ALIGNED (te, alignment)) |
3625 | { |
3626 | tep = & (te->next); |
3627 | te = tcache_next (te); |
3628 | } |
3629 | if (te != NULL) |
3630 | { |
3631 | void *victim = tcache_get_n (tc_idx, tep); |
3632 | return tag_new_usable (victim); |
3633 | } |
3634 | } |
3635 | } |
3636 | #endif |
3637 | |
3638 | if (SINGLE_THREAD_P) |
3639 | { |
3640 | p = _int_memalign (&main_arena, alignment, bytes); |
3641 | assert (!p || chunk_is_mmapped (mem2chunk (p)) || |
3642 | &main_arena == arena_for_chunk (mem2chunk (p))); |
3643 | return tag_new_usable (p); |
3644 | } |
3645 | |
3646 | arena_get (ar_ptr, bytes + alignment + MINSIZE); |
3647 | |
3648 | p = _int_memalign (ar_ptr, alignment, bytes); |
3649 | if (!p && ar_ptr != NULL) |
3650 | { |
3651 | LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment); |
3652 | ar_ptr = arena_get_retry (ar_ptr, bytes); |
3653 | p = _int_memalign (ar_ptr, alignment, bytes); |
3654 | } |
3655 | |
3656 | if (ar_ptr != NULL) |
3657 | __libc_lock_unlock (ar_ptr->mutex); |
3658 | |
3659 | assert (!p || chunk_is_mmapped (mem2chunk (p)) || |
3660 | ar_ptr == arena_for_chunk (mem2chunk (p))); |
3661 | return tag_new_usable (p); |
3662 | } |
3663 | |
3664 | void * |
3665 | __libc_valloc (size_t bytes) |
3666 | { |
3667 | if (!__malloc_initialized) |
3668 | ptmalloc_init (); |
3669 | |
3670 | void *address = RETURN_ADDRESS (0); |
3671 | size_t pagesize = GLRO (dl_pagesize); |
3672 | return _mid_memalign (pagesize, bytes, address); |
3673 | } |
3674 | |
3675 | void * |
3676 | __libc_pvalloc (size_t bytes) |
3677 | { |
3678 | if (!__malloc_initialized) |
3679 | ptmalloc_init (); |
3680 | |
3681 | void *address = RETURN_ADDRESS (0); |
3682 | size_t pagesize = GLRO (dl_pagesize); |
3683 | size_t rounded_bytes; |
3684 | /* ALIGN_UP with overflow check. */ |
3685 | if (__glibc_unlikely (__builtin_add_overflow (bytes, |
3686 | pagesize - 1, |
3687 | &rounded_bytes))) |
3688 | { |
3689 | __set_errno (ENOMEM); |
3690 | return 0; |
3691 | } |
3692 | rounded_bytes = rounded_bytes & -(pagesize - 1); |
3693 | |
3694 | return _mid_memalign (pagesize, rounded_bytes, address); |
3695 | } |
3696 | |
3697 | void * |
3698 | __libc_calloc (size_t n, size_t elem_size) |
3699 | { |
3700 | mstate av; |
3701 | mchunkptr oldtop; |
3702 | INTERNAL_SIZE_T sz, oldtopsize; |
3703 | void *mem; |
3704 | unsigned long clearsize; |
3705 | unsigned long nclears; |
3706 | INTERNAL_SIZE_T *d; |
3707 | ptrdiff_t bytes; |
3708 | |
3709 | if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes))) |
3710 | { |
3711 | __set_errno (ENOMEM); |
3712 | return NULL; |
3713 | } |
3714 | |
3715 | sz = bytes; |
3716 | |
3717 | if (!__malloc_initialized) |
3718 | ptmalloc_init (); |
3719 | |
3720 | MAYBE_INIT_TCACHE (); |
3721 | |
3722 | if (SINGLE_THREAD_P) |
3723 | av = &main_arena; |
3724 | else |
3725 | arena_get (av, sz); |
3726 | |
3727 | if (av) |
3728 | { |
3729 | /* Check if we hand out the top chunk, in which case there may be no |
3730 | need to clear. */ |
3731 | #if MORECORE_CLEARS |
3732 | oldtop = top (av); |
3733 | oldtopsize = chunksize (top (av)); |
3734 | # if MORECORE_CLEARS < 2 |
3735 | /* Only newly allocated memory is guaranteed to be cleared. */ |
3736 | if (av == &main_arena && |
3737 | oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop) |
3738 | oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop); |
3739 | # endif |
3740 | if (av != &main_arena) |
3741 | { |
3742 | heap_info *heap = heap_for_ptr (oldtop); |
3743 | if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop) |
3744 | oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop; |
3745 | } |
3746 | #endif |
3747 | } |
3748 | else |
3749 | { |
3750 | /* No usable arenas. */ |
3751 | oldtop = 0; |
3752 | oldtopsize = 0; |
3753 | } |
3754 | mem = _int_malloc (av, sz); |
3755 | |
3756 | assert (!mem || chunk_is_mmapped (mem2chunk (mem)) || |
3757 | av == arena_for_chunk (mem2chunk (mem))); |
3758 | |
3759 | if (!SINGLE_THREAD_P) |
3760 | { |
3761 | if (mem == 0 && av != NULL) |
3762 | { |
3763 | LIBC_PROBE (memory_calloc_retry, 1, sz); |
3764 | av = arena_get_retry (av, sz); |
3765 | mem = _int_malloc (av, sz); |
3766 | } |
3767 | |
3768 | if (av != NULL) |
3769 | __libc_lock_unlock (av->mutex); |
3770 | } |
3771 | |
3772 | /* Allocation failed even after a retry. */ |
3773 | if (mem == 0) |
3774 | return 0; |
3775 | |
3776 | mchunkptr p = mem2chunk (mem); |
3777 | |
3778 | /* If we are using memory tagging, then we need to set the tags |
3779 | regardless of MORECORE_CLEARS, so we zero the whole block while |
3780 | doing so. */ |
3781 | if (__glibc_unlikely (mtag_enabled)) |
3782 | return tag_new_zero_region (mem, memsize (p)); |
3783 | |
3784 | INTERNAL_SIZE_T csz = chunksize (p); |
3785 | |
3786 | /* Two optional cases in which clearing not necessary */ |
3787 | if (chunk_is_mmapped (p)) |
3788 | { |
3789 | if (__builtin_expect (perturb_byte, 0)) |
3790 | return memset (mem, 0, sz); |
3791 | |
3792 | return mem; |
3793 | } |
3794 | |
3795 | #if MORECORE_CLEARS |
3796 | if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) |
3797 | { |
3798 | /* clear only the bytes from non-freshly-sbrked memory */ |
3799 | csz = oldtopsize; |
3800 | } |
3801 | #endif |
3802 | |
3803 | /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that |
3804 | contents have an odd number of INTERNAL_SIZE_T-sized words; |
3805 | minimally 3. */ |
3806 | d = (INTERNAL_SIZE_T *) mem; |
3807 | clearsize = csz - SIZE_SZ; |
3808 | nclears = clearsize / sizeof (INTERNAL_SIZE_T); |
3809 | assert (nclears >= 3); |
3810 | |
3811 | if (nclears > 9) |
3812 | return memset (d, 0, clearsize); |
3813 | |
3814 | else |
3815 | { |
3816 | *(d + 0) = 0; |
3817 | *(d + 1) = 0; |
3818 | *(d + 2) = 0; |
3819 | if (nclears > 4) |
3820 | { |
3821 | *(d + 3) = 0; |
3822 | *(d + 4) = 0; |
3823 | if (nclears > 6) |
3824 | { |
3825 | *(d + 5) = 0; |
3826 | *(d + 6) = 0; |
3827 | if (nclears > 8) |
3828 | { |
3829 | *(d + 7) = 0; |
3830 | *(d + 8) = 0; |
3831 | } |
3832 | } |
3833 | } |
3834 | } |
3835 | |
3836 | return mem; |
3837 | } |
3838 | #endif /* IS_IN (libc) */ |
3839 | |
3840 | /* |
3841 | ------------------------------ malloc ------------------------------ |
3842 | */ |
3843 | |
3844 | static void * |
3845 | _int_malloc (mstate av, size_t bytes) |
3846 | { |
3847 | INTERNAL_SIZE_T nb; /* normalized request size */ |
3848 | unsigned int idx; /* associated bin index */ |
3849 | mbinptr bin; /* associated bin */ |
3850 | |
3851 | mchunkptr victim; /* inspected/selected chunk */ |
3852 | INTERNAL_SIZE_T size; /* its size */ |
3853 | int victim_index; /* its bin index */ |
3854 | |
3855 | mchunkptr remainder; /* remainder from a split */ |
3856 | unsigned long remainder_size; /* its size */ |
3857 | |
3858 | unsigned int block; /* bit map traverser */ |
3859 | unsigned int bit; /* bit map traverser */ |
3860 | unsigned int map; /* current word of binmap */ |
3861 | |
3862 | mchunkptr fwd; /* misc temp for linking */ |
3863 | mchunkptr bck; /* misc temp for linking */ |
3864 | |
3865 | #if USE_TCACHE |
3866 | size_t tcache_unsorted_count; /* count of unsorted chunks processed */ |
3867 | #endif |
3868 | |
3869 | /* |
3870 | Convert request size to internal form by adding SIZE_SZ bytes |
3871 | overhead plus possibly more to obtain necessary alignment and/or |
3872 | to obtain a size of at least MINSIZE, the smallest allocatable |
3873 | size. Also, checked_request2size returns false for request sizes |
3874 | that are so large that they wrap around zero when padded and |
3875 | aligned. |
3876 | */ |
3877 | |
3878 | nb = checked_request2size (req: bytes); |
3879 | if (nb == 0) |
3880 | { |
3881 | __set_errno (ENOMEM); |
3882 | return NULL; |
3883 | } |
3884 | |
3885 | /* There are no usable arenas. Fall back to sysmalloc to get a chunk from |
3886 | mmap. */ |
3887 | if (__glibc_unlikely (av == NULL)) |
3888 | { |
3889 | void *p = sysmalloc (nb, av); |
3890 | if (p != NULL) |
3891 | alloc_perturb (p, n: bytes); |
3892 | return p; |
3893 | } |
3894 | |
3895 | /* |
3896 | If the size qualifies as a fastbin, first check corresponding bin. |
3897 | This code is safe to execute even if av is not yet initialized, so we |
3898 | can try it without checking, which saves some time on this fast path. |
3899 | */ |
3900 | |
3901 | #define REMOVE_FB(fb, victim, pp) \ |
3902 | do \ |
3903 | { \ |
3904 | victim = pp; \ |
3905 | if (victim == NULL) \ |
3906 | break; \ |
3907 | pp = REVEAL_PTR (victim->fd); \ |
3908 | if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \ |
3909 | malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \ |
3910 | } \ |
3911 | while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \ |
3912 | != victim); \ |
3913 | |
3914 | if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ())) |
3915 | { |
3916 | idx = fastbin_index (nb); |
3917 | mfastbinptr *fb = &fastbin (av, idx); |
3918 | mchunkptr pp; |
3919 | victim = *fb; |
3920 | |
3921 | if (victim != NULL) |
3922 | { |
3923 | if (__glibc_unlikely (misaligned_chunk (victim))) |
3924 | malloc_printerr (str: "malloc(): unaligned fastbin chunk detected 2" ); |
3925 | |
3926 | if (SINGLE_THREAD_P) |
3927 | *fb = REVEAL_PTR (victim->fd); |
3928 | else |
3929 | REMOVE_FB (fb, pp, victim); |
3930 | if (__glibc_likely (victim != NULL)) |
3931 | { |
3932 | size_t victim_idx = fastbin_index (chunksize (victim)); |
3933 | if (__builtin_expect (victim_idx != idx, 0)) |
3934 | malloc_printerr (str: "malloc(): memory corruption (fast)" ); |
3935 | check_remalloced_chunk (av, victim, nb); |
3936 | #if USE_TCACHE |
3937 | /* While we're here, if we see other chunks of the same size, |
3938 | stash them in the tcache. */ |
3939 | size_t tc_idx = csize2tidx (nb); |
3940 | if (tcache != NULL && tc_idx < mp_.tcache_bins) |
3941 | { |
3942 | mchunkptr tc_victim; |
3943 | |
3944 | /* While bin not empty and tcache not full, copy chunks. */ |
3945 | while (tcache->counts[tc_idx] < mp_.tcache_count |
3946 | && (tc_victim = *fb) != NULL) |
3947 | { |
3948 | if (__glibc_unlikely (misaligned_chunk (tc_victim))) |
3949 | malloc_printerr ("malloc(): unaligned fastbin chunk detected 3" ); |
3950 | if (SINGLE_THREAD_P) |
3951 | *fb = REVEAL_PTR (tc_victim->fd); |
3952 | else |
3953 | { |
3954 | REMOVE_FB (fb, pp, tc_victim); |
3955 | if (__glibc_unlikely (tc_victim == NULL)) |
3956 | break; |
3957 | } |
3958 | tcache_put (tc_victim, tc_idx); |
3959 | } |
3960 | } |
3961 | #endif |
3962 | void *p = chunk2mem (victim); |
3963 | alloc_perturb (p, n: bytes); |
3964 | return p; |
3965 | } |
3966 | } |
3967 | } |
3968 | |
3969 | /* |
3970 | If a small request, check regular bin. Since these "smallbins" |
3971 | hold one size each, no searching within bins is necessary. |
3972 | (For a large request, we need to wait until unsorted chunks are |
3973 | processed to find best fit. But for small ones, fits are exact |
3974 | anyway, so we can check now, which is faster.) |
3975 | */ |
3976 | |
3977 | if (in_smallbin_range (nb)) |
3978 | { |
3979 | idx = smallbin_index (nb); |
3980 | bin = bin_at (av, idx); |
3981 | |
3982 | if ((victim = last (bin)) != bin) |
3983 | { |
3984 | bck = victim->bk; |
3985 | if (__glibc_unlikely (bck->fd != victim)) |
3986 | malloc_printerr (str: "malloc(): smallbin double linked list corrupted" ); |
3987 | set_inuse_bit_at_offset (victim, nb); |
3988 | bin->bk = bck; |
3989 | bck->fd = bin; |
3990 | |
3991 | if (av != &main_arena) |
3992 | set_non_main_arena (victim); |
3993 | check_malloced_chunk (av, victim, nb); |
3994 | #if USE_TCACHE |
3995 | /* While we're here, if we see other chunks of the same size, |
3996 | stash them in the tcache. */ |
3997 | size_t tc_idx = csize2tidx (nb); |
3998 | if (tcache != NULL && tc_idx < mp_.tcache_bins) |
3999 | { |
4000 | mchunkptr tc_victim; |
4001 | |
4002 | /* While bin not empty and tcache not full, copy chunks over. */ |
4003 | while (tcache->counts[tc_idx] < mp_.tcache_count |
4004 | && (tc_victim = last (bin)) != bin) |
4005 | { |
4006 | if (tc_victim != 0) |
4007 | { |
4008 | bck = tc_victim->bk; |
4009 | set_inuse_bit_at_offset (tc_victim, nb); |
4010 | if (av != &main_arena) |
4011 | set_non_main_arena (tc_victim); |
4012 | bin->bk = bck; |
4013 | bck->fd = bin; |
4014 | |
4015 | tcache_put (tc_victim, tc_idx); |
4016 | } |
4017 | } |
4018 | } |
4019 | #endif |
4020 | void *p = chunk2mem (victim); |
4021 | alloc_perturb (p, n: bytes); |
4022 | return p; |
4023 | } |
4024 | } |
4025 | |
4026 | /* |
4027 | If this is a large request, consolidate fastbins before continuing. |
4028 | While it might look excessive to kill all fastbins before |
4029 | even seeing if there is space available, this avoids |
4030 | fragmentation problems normally associated with fastbins. |
4031 | Also, in practice, programs tend to have runs of either small or |
4032 | large requests, but less often mixtures, so consolidation is not |
4033 | invoked all that often in most programs. And the programs that |
4034 | it is called frequently in otherwise tend to fragment. |
4035 | */ |
4036 | |
4037 | else |
4038 | { |
4039 | idx = largebin_index (nb); |
4040 | if (atomic_load_relaxed (&av->have_fastchunks)) |
4041 | malloc_consolidate (av); |
4042 | } |
4043 | |
4044 | /* |
4045 | Process recently freed or remaindered chunks, taking one only if |
4046 | it is exact fit, or, if this a small request, the chunk is remainder from |
4047 | the most recent non-exact fit. Place other traversed chunks in |
4048 | bins. Note that this step is the only place in any routine where |
4049 | chunks are placed in bins. |
4050 | |
4051 | The outer loop here is needed because we might not realize until |
4052 | near the end of malloc that we should have consolidated, so must |
4053 | do so and retry. This happens at most once, and only when we would |
4054 | otherwise need to expand memory to service a "small" request. |
4055 | */ |
4056 | |
4057 | #if USE_TCACHE |
4058 | INTERNAL_SIZE_T tcache_nb = 0; |
4059 | size_t tc_idx = csize2tidx (nb); |
4060 | if (tcache != NULL && tc_idx < mp_.tcache_bins) |
4061 | tcache_nb = nb; |
4062 | int return_cached = 0; |
4063 | |
4064 | tcache_unsorted_count = 0; |
4065 | #endif |
4066 | |
4067 | for (;; ) |
4068 | { |
4069 | int iters = 0; |
4070 | while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av)) |
4071 | { |
4072 | bck = victim->bk; |
4073 | size = chunksize (victim); |
4074 | mchunkptr next = chunk_at_offset (victim, size); |
4075 | |
4076 | if (__glibc_unlikely (size <= CHUNK_HDR_SZ) |
4077 | || __glibc_unlikely (size > av->system_mem)) |
4078 | malloc_printerr (str: "malloc(): invalid size (unsorted)" ); |
4079 | if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ) |
4080 | || __glibc_unlikely (chunksize_nomask (next) > av->system_mem)) |
4081 | malloc_printerr (str: "malloc(): invalid next size (unsorted)" ); |
4082 | if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size)) |
4083 | malloc_printerr (str: "malloc(): mismatching next->prev_size (unsorted)" ); |
4084 | if (__glibc_unlikely (bck->fd != victim) |
4085 | || __glibc_unlikely (victim->fd != unsorted_chunks (av))) |
4086 | malloc_printerr (str: "malloc(): unsorted double linked list corrupted" ); |
4087 | if (__glibc_unlikely (prev_inuse (next))) |
4088 | malloc_printerr (str: "malloc(): invalid next->prev_inuse (unsorted)" ); |
4089 | |
4090 | /* |
4091 | If a small request, try to use last remainder if it is the |
4092 | only chunk in unsorted bin. This helps promote locality for |
4093 | runs of consecutive small requests. This is the only |
4094 | exception to best-fit, and applies only when there is |
4095 | no exact fit for a small chunk. |
4096 | */ |
4097 | |
4098 | if (in_smallbin_range (nb) && |
4099 | bck == unsorted_chunks (av) && |
4100 | victim == av->last_remainder && |
4101 | (unsigned long) (size) > (unsigned long) (nb + MINSIZE)) |
4102 | { |
4103 | /* split and reattach remainder */ |
4104 | remainder_size = size - nb; |
4105 | remainder = chunk_at_offset (victim, nb); |
4106 | unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder; |
4107 | av->last_remainder = remainder; |
4108 | remainder->bk = remainder->fd = unsorted_chunks (av); |
4109 | if (!in_smallbin_range (remainder_size)) |
4110 | { |
4111 | remainder->fd_nextsize = NULL; |
4112 | remainder->bk_nextsize = NULL; |
4113 | } |
4114 | |
4115 | set_head (victim, nb | PREV_INUSE | |
4116 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4117 | set_head (remainder, remainder_size | PREV_INUSE); |
4118 | set_foot (remainder, remainder_size); |
4119 | |
4120 | check_malloced_chunk (av, victim, nb); |
4121 | void *p = chunk2mem (victim); |
4122 | alloc_perturb (p, n: bytes); |
4123 | return p; |
4124 | } |
4125 | |
4126 | /* remove from unsorted list */ |
4127 | unsorted_chunks (av)->bk = bck; |
4128 | bck->fd = unsorted_chunks (av); |
4129 | |
4130 | /* Take now instead of binning if exact fit */ |
4131 | |
4132 | if (size == nb) |
4133 | { |
4134 | set_inuse_bit_at_offset (victim, size); |
4135 | if (av != &main_arena) |
4136 | set_non_main_arena (victim); |
4137 | #if USE_TCACHE |
4138 | /* Fill cache first, return to user only if cache fills. |
4139 | We may return one of these chunks later. */ |
4140 | if (tcache_nb > 0 |
4141 | && tcache->counts[tc_idx] < mp_.tcache_count) |
4142 | { |
4143 | tcache_put (victim, tc_idx); |
4144 | return_cached = 1; |
4145 | continue; |
4146 | } |
4147 | else |
4148 | { |
4149 | #endif |
4150 | check_malloced_chunk (av, victim, nb); |
4151 | void *p = chunk2mem (victim); |
4152 | alloc_perturb (p, n: bytes); |
4153 | return p; |
4154 | #if USE_TCACHE |
4155 | } |
4156 | #endif |
4157 | } |
4158 | |
4159 | /* place chunk in bin */ |
4160 | |
4161 | if (in_smallbin_range (size)) |
4162 | { |
4163 | victim_index = smallbin_index (size); |
4164 | bck = bin_at (av, victim_index); |
4165 | fwd = bck->fd; |
4166 | } |
4167 | else |
4168 | { |
4169 | victim_index = largebin_index (size); |
4170 | bck = bin_at (av, victim_index); |
4171 | fwd = bck->fd; |
4172 | |
4173 | /* maintain large bins in sorted order */ |
4174 | if (fwd != bck) |
4175 | { |
4176 | /* Or with inuse bit to speed comparisons */ |
4177 | size |= PREV_INUSE; |
4178 | /* if smaller than smallest, bypass loop below */ |
4179 | assert (chunk_main_arena (bck->bk)); |
4180 | if ((unsigned long) (size) |
4181 | < (unsigned long) chunksize_nomask (bck->bk)) |
4182 | { |
4183 | fwd = bck; |
4184 | bck = bck->bk; |
4185 | |
4186 | victim->fd_nextsize = fwd->fd; |
4187 | victim->bk_nextsize = fwd->fd->bk_nextsize; |
4188 | fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim; |
4189 | } |
4190 | else |
4191 | { |
4192 | assert (chunk_main_arena (fwd)); |
4193 | while ((unsigned long) size < chunksize_nomask (fwd)) |
4194 | { |
4195 | fwd = fwd->fd_nextsize; |
4196 | assert (chunk_main_arena (fwd)); |
4197 | } |
4198 | |
4199 | if ((unsigned long) size |
4200 | == (unsigned long) chunksize_nomask (fwd)) |
4201 | /* Always insert in the second position. */ |
4202 | fwd = fwd->fd; |
4203 | else |
4204 | { |
4205 | victim->fd_nextsize = fwd; |
4206 | victim->bk_nextsize = fwd->bk_nextsize; |
4207 | if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd)) |
4208 | malloc_printerr (str: "malloc(): largebin double linked list corrupted (nextsize)" ); |
4209 | fwd->bk_nextsize = victim; |
4210 | victim->bk_nextsize->fd_nextsize = victim; |
4211 | } |
4212 | bck = fwd->bk; |
4213 | if (bck->fd != fwd) |
4214 | malloc_printerr (str: "malloc(): largebin double linked list corrupted (bk)" ); |
4215 | } |
4216 | } |
4217 | else |
4218 | victim->fd_nextsize = victim->bk_nextsize = victim; |
4219 | } |
4220 | |
4221 | mark_bin (av, victim_index); |
4222 | victim->bk = bck; |
4223 | victim->fd = fwd; |
4224 | fwd->bk = victim; |
4225 | bck->fd = victim; |
4226 | |
4227 | #if USE_TCACHE |
4228 | /* If we've processed as many chunks as we're allowed while |
4229 | filling the cache, return one of the cached ones. */ |
4230 | ++tcache_unsorted_count; |
4231 | if (return_cached |
4232 | && mp_.tcache_unsorted_limit > 0 |
4233 | && tcache_unsorted_count > mp_.tcache_unsorted_limit) |
4234 | { |
4235 | return tcache_get (tc_idx); |
4236 | } |
4237 | #endif |
4238 | |
4239 | #define MAX_ITERS 10000 |
4240 | if (++iters >= MAX_ITERS) |
4241 | break; |
4242 | } |
4243 | |
4244 | #if USE_TCACHE |
4245 | /* If all the small chunks we found ended up cached, return one now. */ |
4246 | if (return_cached) |
4247 | { |
4248 | return tcache_get (tc_idx); |
4249 | } |
4250 | #endif |
4251 | |
4252 | /* |
4253 | If a large request, scan through the chunks of current bin in |
4254 | sorted order to find smallest that fits. Use the skip list for this. |
4255 | */ |
4256 | |
4257 | if (!in_smallbin_range (nb)) |
4258 | { |
4259 | bin = bin_at (av, idx); |
4260 | |
4261 | /* skip scan if empty or largest chunk is too small */ |
4262 | if ((victim = first (bin)) != bin |
4263 | && (unsigned long) chunksize_nomask (victim) |
4264 | >= (unsigned long) (nb)) |
4265 | { |
4266 | victim = victim->bk_nextsize; |
4267 | while (((unsigned long) (size = chunksize (victim)) < |
4268 | (unsigned long) (nb))) |
4269 | victim = victim->bk_nextsize; |
4270 | |
4271 | /* Avoid removing the first entry for a size so that the skip |
4272 | list does not have to be rerouted. */ |
4273 | if (victim != last (bin) |
4274 | && chunksize_nomask (victim) |
4275 | == chunksize_nomask (victim->fd)) |
4276 | victim = victim->fd; |
4277 | |
4278 | remainder_size = size - nb; |
4279 | unlink_chunk (av, p: victim); |
4280 | |
4281 | /* Exhaust */ |
4282 | if (remainder_size < MINSIZE) |
4283 | { |
4284 | set_inuse_bit_at_offset (victim, size); |
4285 | if (av != &main_arena) |
4286 | set_non_main_arena (victim); |
4287 | } |
4288 | /* Split */ |
4289 | else |
4290 | { |
4291 | remainder = chunk_at_offset (victim, nb); |
4292 | /* We cannot assume the unsorted list is empty and therefore |
4293 | have to perform a complete insert here. */ |
4294 | bck = unsorted_chunks (av); |
4295 | fwd = bck->fd; |
4296 | if (__glibc_unlikely (fwd->bk != bck)) |
4297 | malloc_printerr (str: "malloc(): corrupted unsorted chunks" ); |
4298 | remainder->bk = bck; |
4299 | remainder->fd = fwd; |
4300 | bck->fd = remainder; |
4301 | fwd->bk = remainder; |
4302 | if (!in_smallbin_range (remainder_size)) |
4303 | { |
4304 | remainder->fd_nextsize = NULL; |
4305 | remainder->bk_nextsize = NULL; |
4306 | } |
4307 | set_head (victim, nb | PREV_INUSE | |
4308 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4309 | set_head (remainder, remainder_size | PREV_INUSE); |
4310 | set_foot (remainder, remainder_size); |
4311 | } |
4312 | check_malloced_chunk (av, victim, nb); |
4313 | void *p = chunk2mem (victim); |
4314 | alloc_perturb (p, n: bytes); |
4315 | return p; |
4316 | } |
4317 | } |
4318 | |
4319 | /* |
4320 | Search for a chunk by scanning bins, starting with next largest |
4321 | bin. This search is strictly by best-fit; i.e., the smallest |
4322 | (with ties going to approximately the least recently used) chunk |
4323 | that fits is selected. |
4324 | |
4325 | The bitmap avoids needing to check that most blocks are nonempty. |
4326 | The particular case of skipping all bins during warm-up phases |
4327 | when no chunks have been returned yet is faster than it might look. |
4328 | */ |
4329 | |
4330 | ++idx; |
4331 | bin = bin_at (av, idx); |
4332 | block = idx2block (idx); |
4333 | map = av->binmap[block]; |
4334 | bit = idx2bit (idx); |
4335 | |
4336 | for (;; ) |
4337 | { |
4338 | /* Skip rest of block if there are no more set bits in this block. */ |
4339 | if (bit > map || bit == 0) |
4340 | { |
4341 | do |
4342 | { |
4343 | if (++block >= BINMAPSIZE) /* out of bins */ |
4344 | goto use_top; |
4345 | } |
4346 | while ((map = av->binmap[block]) == 0); |
4347 | |
4348 | bin = bin_at (av, (block << BINMAPSHIFT)); |
4349 | bit = 1; |
4350 | } |
4351 | |
4352 | /* Advance to bin with set bit. There must be one. */ |
4353 | while ((bit & map) == 0) |
4354 | { |
4355 | bin = next_bin (bin); |
4356 | bit <<= 1; |
4357 | assert (bit != 0); |
4358 | } |
4359 | |
4360 | /* Inspect the bin. It is likely to be non-empty */ |
4361 | victim = last (bin); |
4362 | |
4363 | /* If a false alarm (empty bin), clear the bit. */ |
4364 | if (victim == bin) |
4365 | { |
4366 | av->binmap[block] = map &= ~bit; /* Write through */ |
4367 | bin = next_bin (bin); |
4368 | bit <<= 1; |
4369 | } |
4370 | |
4371 | else |
4372 | { |
4373 | size = chunksize (victim); |
4374 | |
4375 | /* We know the first chunk in this bin is big enough to use. */ |
4376 | assert ((unsigned long) (size) >= (unsigned long) (nb)); |
4377 | |
4378 | remainder_size = size - nb; |
4379 | |
4380 | /* unlink */ |
4381 | unlink_chunk (av, p: victim); |
4382 | |
4383 | /* Exhaust */ |
4384 | if (remainder_size < MINSIZE) |
4385 | { |
4386 | set_inuse_bit_at_offset (victim, size); |
4387 | if (av != &main_arena) |
4388 | set_non_main_arena (victim); |
4389 | } |
4390 | |
4391 | /* Split */ |
4392 | else |
4393 | { |
4394 | remainder = chunk_at_offset (victim, nb); |
4395 | |
4396 | /* We cannot assume the unsorted list is empty and therefore |
4397 | have to perform a complete insert here. */ |
4398 | bck = unsorted_chunks (av); |
4399 | fwd = bck->fd; |
4400 | if (__glibc_unlikely (fwd->bk != bck)) |
4401 | malloc_printerr (str: "malloc(): corrupted unsorted chunks 2" ); |
4402 | remainder->bk = bck; |
4403 | remainder->fd = fwd; |
4404 | bck->fd = remainder; |
4405 | fwd->bk = remainder; |
4406 | |
4407 | /* advertise as last remainder */ |
4408 | if (in_smallbin_range (nb)) |
4409 | av->last_remainder = remainder; |
4410 | if (!in_smallbin_range (remainder_size)) |
4411 | { |
4412 | remainder->fd_nextsize = NULL; |
4413 | remainder->bk_nextsize = NULL; |
4414 | } |
4415 | set_head (victim, nb | PREV_INUSE | |
4416 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4417 | set_head (remainder, remainder_size | PREV_INUSE); |
4418 | set_foot (remainder, remainder_size); |
4419 | } |
4420 | check_malloced_chunk (av, victim, nb); |
4421 | void *p = chunk2mem (victim); |
4422 | alloc_perturb (p, n: bytes); |
4423 | return p; |
4424 | } |
4425 | } |
4426 | |
4427 | use_top: |
4428 | /* |
4429 | If large enough, split off the chunk bordering the end of memory |
4430 | (held in av->top). Note that this is in accord with the best-fit |
4431 | search rule. In effect, av->top is treated as larger (and thus |
4432 | less well fitting) than any other available chunk since it can |
4433 | be extended to be as large as necessary (up to system |
4434 | limitations). |
4435 | |
4436 | We require that av->top always exists (i.e., has size >= |
4437 | MINSIZE) after initialization, so if it would otherwise be |
4438 | exhausted by current request, it is replenished. (The main |
4439 | reason for ensuring it exists is that we may need MINSIZE space |
4440 | to put in fenceposts in sysmalloc.) |
4441 | */ |
4442 | |
4443 | victim = av->top; |
4444 | size = chunksize (victim); |
4445 | |
4446 | if (__glibc_unlikely (size > av->system_mem)) |
4447 | malloc_printerr (str: "malloc(): corrupted top size" ); |
4448 | |
4449 | if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE)) |
4450 | { |
4451 | remainder_size = size - nb; |
4452 | remainder = chunk_at_offset (victim, nb); |
4453 | av->top = remainder; |
4454 | set_head (victim, nb | PREV_INUSE | |
4455 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4456 | set_head (remainder, remainder_size | PREV_INUSE); |
4457 | |
4458 | check_malloced_chunk (av, victim, nb); |
4459 | void *p = chunk2mem (victim); |
4460 | alloc_perturb (p, n: bytes); |
4461 | return p; |
4462 | } |
4463 | |
4464 | /* When we are using atomic ops to free fast chunks we can get |
4465 | here for all block sizes. */ |
4466 | else if (atomic_load_relaxed (&av->have_fastchunks)) |
4467 | { |
4468 | malloc_consolidate (av); |
4469 | /* restore original bin index */ |
4470 | if (in_smallbin_range (nb)) |
4471 | idx = smallbin_index (nb); |
4472 | else |
4473 | idx = largebin_index (nb); |
4474 | } |
4475 | |
4476 | /* |
4477 | Otherwise, relay to handle system-dependent cases |
4478 | */ |
4479 | else |
4480 | { |
4481 | void *p = sysmalloc (nb, av); |
4482 | if (p != NULL) |
4483 | alloc_perturb (p, n: bytes); |
4484 | return p; |
4485 | } |
4486 | } |
4487 | } |
4488 | |
4489 | /* |
4490 | ------------------------------ free ------------------------------ |
4491 | */ |
4492 | |
4493 | static void |
4494 | _int_free (mstate av, mchunkptr p, int have_lock) |
4495 | { |
4496 | INTERNAL_SIZE_T size; /* its size */ |
4497 | mfastbinptr *fb; /* associated fastbin */ |
4498 | |
4499 | size = chunksize (p); |
4500 | |
4501 | /* Little security check which won't hurt performance: the |
4502 | allocator never wraps around at the end of the address space. |
4503 | Therefore we can exclude some size values which might appear |
4504 | here by accident or by "design" from some intruder. */ |
4505 | if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0) |
4506 | || __builtin_expect (misaligned_chunk (p), 0)) |
4507 | malloc_printerr (str: "free(): invalid pointer" ); |
4508 | /* We know that each chunk is at least MINSIZE bytes in size or a |
4509 | multiple of MALLOC_ALIGNMENT. */ |
4510 | if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size))) |
4511 | malloc_printerr (str: "free(): invalid size" ); |
4512 | |
4513 | check_inuse_chunk(av, p); |
4514 | |
4515 | #if USE_TCACHE |
4516 | { |
4517 | size_t tc_idx = csize2tidx (size); |
4518 | if (tcache != NULL && tc_idx < mp_.tcache_bins) |
4519 | { |
4520 | /* Check to see if it's already in the tcache. */ |
4521 | tcache_entry *e = (tcache_entry *) chunk2mem (p); |
4522 | |
4523 | /* This test succeeds on double free. However, we don't 100% |
4524 | trust it (it also matches random payload data at a 1 in |
4525 | 2^<size_t> chance), so verify it's not an unlikely |
4526 | coincidence before aborting. */ |
4527 | if (__glibc_unlikely (e->key == tcache_key)) |
4528 | { |
4529 | tcache_entry *tmp; |
4530 | size_t cnt = 0; |
4531 | LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); |
4532 | for (tmp = tcache->entries[tc_idx]; |
4533 | tmp; |
4534 | tmp = REVEAL_PTR (tmp->next), ++cnt) |
4535 | { |
4536 | if (cnt >= mp_.tcache_count) |
4537 | malloc_printerr ("free(): too many chunks detected in tcache" ); |
4538 | if (__glibc_unlikely (!aligned_OK (tmp))) |
4539 | malloc_printerr ("free(): unaligned chunk detected in tcache 2" ); |
4540 | if (tmp == e) |
4541 | malloc_printerr ("free(): double free detected in tcache 2" ); |
4542 | /* If we get here, it was a coincidence. We've wasted a |
4543 | few cycles, but don't abort. */ |
4544 | } |
4545 | } |
4546 | |
4547 | if (tcache->counts[tc_idx] < mp_.tcache_count) |
4548 | { |
4549 | tcache_put (p, tc_idx); |
4550 | return; |
4551 | } |
4552 | } |
4553 | } |
4554 | #endif |
4555 | |
4556 | /* |
4557 | If eligible, place chunk on a fastbin so it can be found |
4558 | and used quickly in malloc. |
4559 | */ |
4560 | |
4561 | if ((unsigned long)(size) <= (unsigned long)(get_max_fast ()) |
4562 | |
4563 | #if TRIM_FASTBINS |
4564 | /* |
4565 | If TRIM_FASTBINS set, don't place chunks |
4566 | bordering top into fastbins |
4567 | */ |
4568 | && (chunk_at_offset(p, size) != av->top) |
4569 | #endif |
4570 | ) { |
4571 | |
4572 | if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size)) |
4573 | <= CHUNK_HDR_SZ, 0) |
4574 | || __builtin_expect (chunksize (chunk_at_offset (p, size)) |
4575 | >= av->system_mem, 0)) |
4576 | { |
4577 | bool fail = true; |
4578 | /* We might not have a lock at this point and concurrent modifications |
4579 | of system_mem might result in a false positive. Redo the test after |
4580 | getting the lock. */ |
4581 | if (!have_lock) |
4582 | { |
4583 | __libc_lock_lock (av->mutex); |
4584 | fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ |
4585 | || chunksize (chunk_at_offset (p, size)) >= av->system_mem); |
4586 | __libc_lock_unlock (av->mutex); |
4587 | } |
4588 | |
4589 | if (fail) |
4590 | malloc_printerr (str: "free(): invalid next size (fast)" ); |
4591 | } |
4592 | |
4593 | free_perturb (chunk2mem(p), n: size - CHUNK_HDR_SZ); |
4594 | |
4595 | atomic_store_relaxed (&av->have_fastchunks, true); |
4596 | unsigned int idx = fastbin_index(size); |
4597 | fb = &fastbin (av, idx); |
4598 | |
4599 | /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ |
4600 | mchunkptr old = *fb, old2; |
4601 | |
4602 | if (SINGLE_THREAD_P) |
4603 | { |
4604 | /* Check that the top of the bin is not the record we are going to |
4605 | add (i.e., double free). */ |
4606 | if (__builtin_expect (old == p, 0)) |
4607 | malloc_printerr (str: "double free or corruption (fasttop)" ); |
4608 | p->fd = PROTECT_PTR (&p->fd, old); |
4609 | *fb = p; |
4610 | } |
4611 | else |
4612 | do |
4613 | { |
4614 | /* Check that the top of the bin is not the record we are going to |
4615 | add (i.e., double free). */ |
4616 | if (__builtin_expect (old == p, 0)) |
4617 | malloc_printerr (str: "double free or corruption (fasttop)" ); |
4618 | old2 = old; |
4619 | p->fd = PROTECT_PTR (&p->fd, old); |
4620 | } |
4621 | while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) |
4622 | != old2); |
4623 | |
4624 | /* Check that size of fastbin chunk at the top is the same as |
4625 | size of the chunk that we are adding. We can dereference OLD |
4626 | only if we have the lock, otherwise it might have already been |
4627 | allocated again. */ |
4628 | if (have_lock && old != NULL |
4629 | && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0)) |
4630 | malloc_printerr (str: "invalid fastbin entry (free)" ); |
4631 | } |
4632 | |
4633 | /* |
4634 | Consolidate other non-mmapped chunks as they arrive. |
4635 | */ |
4636 | |
4637 | else if (!chunk_is_mmapped(p)) { |
4638 | |
4639 | /* If we're single-threaded, don't lock the arena. */ |
4640 | if (SINGLE_THREAD_P) |
4641 | have_lock = true; |
4642 | |
4643 | if (!have_lock) |
4644 | __libc_lock_lock (av->mutex); |
4645 | |
4646 | _int_free_merge_chunk (av, p, size); |
4647 | |
4648 | if (!have_lock) |
4649 | __libc_lock_unlock (av->mutex); |
4650 | } |
4651 | /* |
4652 | If the chunk was allocated via mmap, release via munmap(). |
4653 | */ |
4654 | |
4655 | else { |
4656 | munmap_chunk (p); |
4657 | } |
4658 | } |
4659 | |
4660 | /* Try to merge chunk P of SIZE bytes with its neighbors. Put the |
4661 | resulting chunk on the appropriate bin list. P must not be on a |
4662 | bin list yet, and it can be in use. */ |
4663 | static void |
4664 | _int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size) |
4665 | { |
4666 | mchunkptr nextchunk = chunk_at_offset(p, size); |
4667 | |
4668 | /* Lightweight tests: check whether the block is already the |
4669 | top block. */ |
4670 | if (__glibc_unlikely (p == av->top)) |
4671 | malloc_printerr (str: "double free or corruption (top)" ); |
4672 | /* Or whether the next chunk is beyond the boundaries of the arena. */ |
4673 | if (__builtin_expect (contiguous (av) |
4674 | && (char *) nextchunk |
4675 | >= ((char *) av->top + chunksize(av->top)), 0)) |
4676 | malloc_printerr (str: "double free or corruption (out)" ); |
4677 | /* Or whether the block is actually not marked used. */ |
4678 | if (__glibc_unlikely (!prev_inuse(nextchunk))) |
4679 | malloc_printerr (str: "double free or corruption (!prev)" ); |
4680 | |
4681 | INTERNAL_SIZE_T nextsize = chunksize(nextchunk); |
4682 | if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0) |
4683 | || __builtin_expect (nextsize >= av->system_mem, 0)) |
4684 | malloc_printerr (str: "free(): invalid next size (normal)" ); |
4685 | |
4686 | free_perturb (chunk2mem(p), n: size - CHUNK_HDR_SZ); |
4687 | |
4688 | /* Consolidate backward. */ |
4689 | if (!prev_inuse(p)) |
4690 | { |
4691 | INTERNAL_SIZE_T prevsize = prev_size (p); |
4692 | size += prevsize; |
4693 | p = chunk_at_offset(p, -((long) prevsize)); |
4694 | if (__glibc_unlikely (chunksize(p) != prevsize)) |
4695 | malloc_printerr (str: "corrupted size vs. prev_size while consolidating" ); |
4696 | unlink_chunk (av, p); |
4697 | } |
4698 | |
4699 | /* Write the chunk header, maybe after merging with the following chunk. */ |
4700 | size = _int_free_create_chunk (av, p, size, nextchunk, nextsize); |
4701 | _int_free_maybe_consolidate (av, size); |
4702 | } |
4703 | |
4704 | /* Create a chunk at P of SIZE bytes, with SIZE potentially increased |
4705 | to cover the immediately following chunk NEXTCHUNK of NEXTSIZE |
4706 | bytes (if NEXTCHUNK is unused). The chunk at P is not actually |
4707 | read and does not have to be initialized. After creation, it is |
4708 | placed on the appropriate bin list. The function returns the size |
4709 | of the new chunk. */ |
4710 | static INTERNAL_SIZE_T |
4711 | _int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, |
4712 | mchunkptr nextchunk, INTERNAL_SIZE_T nextsize) |
4713 | { |
4714 | if (nextchunk != av->top) |
4715 | { |
4716 | /* get and clear inuse bit */ |
4717 | bool nextinuse = inuse_bit_at_offset (nextchunk, nextsize); |
4718 | |
4719 | /* consolidate forward */ |
4720 | if (!nextinuse) { |
4721 | unlink_chunk (av, p: nextchunk); |
4722 | size += nextsize; |
4723 | } else |
4724 | clear_inuse_bit_at_offset(nextchunk, 0); |
4725 | |
4726 | /* |
4727 | Place the chunk in unsorted chunk list. Chunks are |
4728 | not placed into regular bins until after they have |
4729 | been given one chance to be used in malloc. |
4730 | */ |
4731 | |
4732 | mchunkptr bck = unsorted_chunks (av); |
4733 | mchunkptr fwd = bck->fd; |
4734 | if (__glibc_unlikely (fwd->bk != bck)) |
4735 | malloc_printerr (str: "free(): corrupted unsorted chunks" ); |
4736 | p->fd = fwd; |
4737 | p->bk = bck; |
4738 | if (!in_smallbin_range(size)) |
4739 | { |
4740 | p->fd_nextsize = NULL; |
4741 | p->bk_nextsize = NULL; |
4742 | } |
4743 | bck->fd = p; |
4744 | fwd->bk = p; |
4745 | |
4746 | set_head(p, size | PREV_INUSE); |
4747 | set_foot(p, size); |
4748 | |
4749 | check_free_chunk(av, p); |
4750 | } |
4751 | |
4752 | else |
4753 | { |
4754 | /* If the chunk borders the current high end of memory, |
4755 | consolidate into top. */ |
4756 | size += nextsize; |
4757 | set_head(p, size | PREV_INUSE); |
4758 | av->top = p; |
4759 | check_chunk(av, p); |
4760 | } |
4761 | |
4762 | return size; |
4763 | } |
4764 | |
4765 | /* If freeing a large space, consolidate possibly-surrounding |
4766 | chunks. Then, if the total unused topmost memory exceeds trim |
4767 | threshold, ask malloc_trim to reduce top. */ |
4768 | static void |
4769 | _int_free_maybe_consolidate (mstate av, INTERNAL_SIZE_T size) |
4770 | { |
4771 | /* Unless max_fast is 0, we don't know if there are fastbins |
4772 | bordering top, so we cannot tell for sure whether threshold has |
4773 | been reached unless fastbins are consolidated. But we don't want |
4774 | to consolidate on each free. As a compromise, consolidation is |
4775 | performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */ |
4776 | if (size >= FASTBIN_CONSOLIDATION_THRESHOLD) |
4777 | { |
4778 | if (atomic_load_relaxed (&av->have_fastchunks)) |
4779 | malloc_consolidate(av); |
4780 | |
4781 | if (av == &main_arena) |
4782 | { |
4783 | #ifndef MORECORE_CANNOT_TRIM |
4784 | if (chunksize (av->top) >= mp_.trim_threshold) |
4785 | systrim (pad: mp_.top_pad, av); |
4786 | #endif |
4787 | } |
4788 | else |
4789 | { |
4790 | /* Always try heap_trim, even if the top chunk is not large, |
4791 | because the corresponding heap might go away. */ |
4792 | heap_info *heap = heap_for_ptr (top (av)); |
4793 | |
4794 | assert (heap->ar_ptr == av); |
4795 | heap_trim (heap, pad: mp_.top_pad); |
4796 | } |
4797 | } |
4798 | } |
4799 | |
4800 | /* |
4801 | ------------------------- malloc_consolidate ------------------------- |
4802 | |
4803 | malloc_consolidate is a specialized version of free() that tears |
4804 | down chunks held in fastbins. Free itself cannot be used for this |
4805 | purpose since, among other things, it might place chunks back onto |
4806 | fastbins. So, instead, we need to use a minor variant of the same |
4807 | code. |
4808 | */ |
4809 | |
4810 | static void malloc_consolidate(mstate av) |
4811 | { |
4812 | mfastbinptr* fb; /* current fastbin being consolidated */ |
4813 | mfastbinptr* maxfb; /* last fastbin (for loop control) */ |
4814 | mchunkptr p; /* current chunk being consolidated */ |
4815 | mchunkptr nextp; /* next chunk to consolidate */ |
4816 | mchunkptr unsorted_bin; /* bin header */ |
4817 | mchunkptr first_unsorted; /* chunk to link to */ |
4818 | |
4819 | /* These have same use as in free() */ |
4820 | mchunkptr nextchunk; |
4821 | INTERNAL_SIZE_T size; |
4822 | INTERNAL_SIZE_T nextsize; |
4823 | INTERNAL_SIZE_T prevsize; |
4824 | int nextinuse; |
4825 | |
4826 | atomic_store_relaxed (&av->have_fastchunks, false); |
4827 | |
4828 | unsorted_bin = unsorted_chunks(av); |
4829 | |
4830 | /* |
4831 | Remove each chunk from fast bin and consolidate it, placing it |
4832 | then in unsorted bin. Among other reasons for doing this, |
4833 | placing in unsorted bin avoids needing to calculate actual bins |
4834 | until malloc is sure that chunks aren't immediately going to be |
4835 | reused anyway. |
4836 | */ |
4837 | |
4838 | maxfb = &fastbin (av, NFASTBINS - 1); |
4839 | fb = &fastbin (av, 0); |
4840 | do { |
4841 | p = atomic_exchange_acquire (fb, NULL); |
4842 | if (p != 0) { |
4843 | do { |
4844 | { |
4845 | if (__glibc_unlikely (misaligned_chunk (p))) |
4846 | malloc_printerr (str: "malloc_consolidate(): " |
4847 | "unaligned fastbin chunk detected" ); |
4848 | |
4849 | unsigned int idx = fastbin_index (chunksize (p)); |
4850 | if ((&fastbin (av, idx)) != fb) |
4851 | malloc_printerr (str: "malloc_consolidate(): invalid chunk size" ); |
4852 | } |
4853 | |
4854 | check_inuse_chunk(av, p); |
4855 | nextp = REVEAL_PTR (p->fd); |
4856 | |
4857 | /* Slightly streamlined version of consolidation code in free() */ |
4858 | size = chunksize (p); |
4859 | nextchunk = chunk_at_offset(p, size); |
4860 | nextsize = chunksize(nextchunk); |
4861 | |
4862 | if (!prev_inuse(p)) { |
4863 | prevsize = prev_size (p); |
4864 | size += prevsize; |
4865 | p = chunk_at_offset(p, -((long) prevsize)); |
4866 | if (__glibc_unlikely (chunksize(p) != prevsize)) |
4867 | malloc_printerr (str: "corrupted size vs. prev_size in fastbins" ); |
4868 | unlink_chunk (av, p); |
4869 | } |
4870 | |
4871 | if (nextchunk != av->top) { |
4872 | nextinuse = inuse_bit_at_offset(nextchunk, nextsize); |
4873 | |
4874 | if (!nextinuse) { |
4875 | size += nextsize; |
4876 | unlink_chunk (av, p: nextchunk); |
4877 | } else |
4878 | clear_inuse_bit_at_offset(nextchunk, 0); |
4879 | |
4880 | first_unsorted = unsorted_bin->fd; |
4881 | unsorted_bin->fd = p; |
4882 | first_unsorted->bk = p; |
4883 | |
4884 | if (!in_smallbin_range (size)) { |
4885 | p->fd_nextsize = NULL; |
4886 | p->bk_nextsize = NULL; |
4887 | } |
4888 | |
4889 | set_head(p, size | PREV_INUSE); |
4890 | p->bk = unsorted_bin; |
4891 | p->fd = first_unsorted; |
4892 | set_foot(p, size); |
4893 | } |
4894 | |
4895 | else { |
4896 | size += nextsize; |
4897 | set_head(p, size | PREV_INUSE); |
4898 | av->top = p; |
4899 | } |
4900 | |
4901 | } while ( (p = nextp) != 0); |
4902 | |
4903 | } |
4904 | } while (fb++ != maxfb); |
4905 | } |
4906 | |
4907 | /* |
4908 | ------------------------------ realloc ------------------------------ |
4909 | */ |
4910 | |
4911 | static void * |
4912 | _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize, |
4913 | INTERNAL_SIZE_T nb) |
4914 | { |
4915 | mchunkptr newp; /* chunk to return */ |
4916 | INTERNAL_SIZE_T newsize; /* its size */ |
4917 | void* newmem; /* corresponding user mem */ |
4918 | |
4919 | mchunkptr next; /* next contiguous chunk after oldp */ |
4920 | |
4921 | mchunkptr remainder; /* extra space at end of newp */ |
4922 | unsigned long remainder_size; /* its size */ |
4923 | |
4924 | /* oldmem size */ |
4925 | if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0) |
4926 | || __builtin_expect (oldsize >= av->system_mem, 0) |
4927 | || __builtin_expect (oldsize != chunksize (oldp), 0)) |
4928 | malloc_printerr (str: "realloc(): invalid old size" ); |
4929 | |
4930 | check_inuse_chunk (av, oldp); |
4931 | |
4932 | /* All callers already filter out mmap'ed chunks. */ |
4933 | assert (!chunk_is_mmapped (oldp)); |
4934 | |
4935 | next = chunk_at_offset (oldp, oldsize); |
4936 | INTERNAL_SIZE_T nextsize = chunksize (next); |
4937 | if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0) |
4938 | || __builtin_expect (nextsize >= av->system_mem, 0)) |
4939 | malloc_printerr (str: "realloc(): invalid next size" ); |
4940 | |
4941 | if ((unsigned long) (oldsize) >= (unsigned long) (nb)) |
4942 | { |
4943 | /* already big enough; split below */ |
4944 | newp = oldp; |
4945 | newsize = oldsize; |
4946 | } |
4947 | |
4948 | else |
4949 | { |
4950 | /* Try to expand forward into top */ |
4951 | if (next == av->top && |
4952 | (unsigned long) (newsize = oldsize + nextsize) >= |
4953 | (unsigned long) (nb + MINSIZE)) |
4954 | { |
4955 | set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
4956 | av->top = chunk_at_offset (oldp, nb); |
4957 | set_head (av->top, (newsize - nb) | PREV_INUSE); |
4958 | check_inuse_chunk (av, oldp); |
4959 | return tag_new_usable (chunk2mem (oldp)); |
4960 | } |
4961 | |
4962 | /* Try to expand forward into next chunk; split off remainder below */ |
4963 | else if (next != av->top && |
4964 | !inuse (next) && |
4965 | (unsigned long) (newsize = oldsize + nextsize) >= |
4966 | (unsigned long) (nb)) |
4967 | { |
4968 | newp = oldp; |
4969 | unlink_chunk (av, p: next); |
4970 | } |
4971 | |
4972 | /* allocate, copy, free */ |
4973 | else |
4974 | { |
4975 | newmem = _int_malloc (av, bytes: nb - MALLOC_ALIGN_MASK); |
4976 | if (newmem == 0) |
4977 | return 0; /* propagate failure */ |
4978 | |
4979 | newp = mem2chunk (newmem); |
4980 | newsize = chunksize (newp); |
4981 | |
4982 | /* |
4983 | Avoid copy if newp is next chunk after oldp. |
4984 | */ |
4985 | if (newp == next) |
4986 | { |
4987 | newsize += oldsize; |
4988 | newp = oldp; |
4989 | } |
4990 | else |
4991 | { |
4992 | void *oldmem = chunk2mem (oldp); |
4993 | size_t sz = memsize (oldp); |
4994 | (void) tag_region (ptr: oldmem, size: sz); |
4995 | newmem = tag_new_usable (ptr: newmem); |
4996 | memcpy (dest: newmem, src: oldmem, n: sz); |
4997 | _int_free (av, p: oldp, have_lock: 1); |
4998 | check_inuse_chunk (av, newp); |
4999 | return newmem; |
5000 | } |
5001 | } |
5002 | } |
5003 | |
5004 | /* If possible, free extra space in old or extended chunk */ |
5005 | |
5006 | assert ((unsigned long) (newsize) >= (unsigned long) (nb)); |
5007 | |
5008 | remainder_size = newsize - nb; |
5009 | |
5010 | if (remainder_size < MINSIZE) /* not enough extra to split off */ |
5011 | { |
5012 | set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
5013 | set_inuse_bit_at_offset (newp, newsize); |
5014 | } |
5015 | else /* split remainder */ |
5016 | { |
5017 | remainder = chunk_at_offset (newp, nb); |
5018 | /* Clear any user-space tags before writing the header. */ |
5019 | remainder = tag_region (ptr: remainder, size: remainder_size); |
5020 | set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
5021 | set_head (remainder, remainder_size | PREV_INUSE | |
5022 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
5023 | /* Mark remainder as inuse so free() won't complain */ |
5024 | set_inuse_bit_at_offset (remainder, remainder_size); |
5025 | _int_free (av, p: remainder, have_lock: 1); |
5026 | } |
5027 | |
5028 | check_inuse_chunk (av, newp); |
5029 | return tag_new_usable (chunk2mem (newp)); |
5030 | } |
5031 | |
5032 | /* |
5033 | ------------------------------ memalign ------------------------------ |
5034 | */ |
5035 | |
5036 | /* BYTES is user requested bytes, not requested chunksize bytes. */ |
5037 | static void * |
5038 | _int_memalign (mstate av, size_t alignment, size_t bytes) |
5039 | { |
5040 | INTERNAL_SIZE_T nb; /* padded request size */ |
5041 | char *m; /* memory returned by malloc call */ |
5042 | mchunkptr p; /* corresponding chunk */ |
5043 | char *brk; /* alignment point within p */ |
5044 | mchunkptr newp; /* chunk to return */ |
5045 | INTERNAL_SIZE_T newsize; /* its size */ |
5046 | INTERNAL_SIZE_T leadsize; /* leading space before alignment point */ |
5047 | mchunkptr remainder; /* spare room at end to split off */ |
5048 | unsigned long remainder_size; /* its size */ |
5049 | INTERNAL_SIZE_T size; |
5050 | |
5051 | nb = checked_request2size (req: bytes); |
5052 | if (nb == 0) |
5053 | { |
5054 | __set_errno (ENOMEM); |
5055 | return NULL; |
5056 | } |
5057 | |
5058 | /* We can't check tcache here because we hold the arena lock, which |
5059 | tcache doesn't expect. We expect it has been checked |
5060 | earlier. */ |
5061 | |
5062 | /* Strategy: search the bins looking for an existing block that |
5063 | meets our needs. We scan a range of bins from "exact size" to |
5064 | "just under 2x", spanning the small/large barrier if needed. If |
5065 | we don't find anything in those bins, the common malloc code will |
5066 | scan starting at 2x. */ |
5067 | |
5068 | /* Call malloc with worst case padding to hit alignment. */ |
5069 | m = (char *) (_int_malloc (av, bytes: nb + alignment + MINSIZE)); |
5070 | |
5071 | if (m == 0) |
5072 | return 0; /* propagate failure */ |
5073 | |
5074 | p = mem2chunk (m); |
5075 | |
5076 | if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */ |
5077 | { |
5078 | /* Find an aligned spot inside chunk. Since we need to give back |
5079 | leading space in a chunk of at least MINSIZE, if the first |
5080 | calculation places us at a spot with less than MINSIZE leader, |
5081 | we can move to the next aligned spot -- we've allocated enough |
5082 | total room so that this is always possible. */ |
5083 | brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) & |
5084 | - ((signed long) alignment)); |
5085 | if ((unsigned long) (brk - (char *) (p)) < MINSIZE) |
5086 | brk += alignment; |
5087 | |
5088 | newp = (mchunkptr) brk; |
5089 | leadsize = brk - (char *) (p); |
5090 | newsize = chunksize (p) - leadsize; |
5091 | |
5092 | /* For mmapped chunks, just adjust offset */ |
5093 | if (chunk_is_mmapped (p)) |
5094 | { |
5095 | set_prev_size (newp, prev_size (p) + leadsize); |
5096 | set_head (newp, newsize | IS_MMAPPED); |
5097 | return chunk2mem (newp); |
5098 | } |
5099 | |
5100 | /* Otherwise, give back leader, use the rest */ |
5101 | set_head (newp, newsize | PREV_INUSE | |
5102 | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
5103 | set_inuse_bit_at_offset (newp, newsize); |
5104 | set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0)); |
5105 | _int_free_merge_chunk (av, p, size: leadsize); |
5106 | p = newp; |
5107 | |
5108 | assert (newsize >= nb && |
5109 | (((unsigned long) (chunk2mem (p))) % alignment) == 0); |
5110 | } |
5111 | |
5112 | /* Also give back spare room at the end */ |
5113 | if (!chunk_is_mmapped (p)) |
5114 | { |
5115 | size = chunksize (p); |
5116 | mchunkptr nextchunk = chunk_at_offset(p, size); |
5117 | INTERNAL_SIZE_T nextsize = chunksize(nextchunk); |
5118 | if (size > nb) |
5119 | { |
5120 | remainder_size = size - nb; |
5121 | if (remainder_size >= MINSIZE |
5122 | || nextchunk == av->top |
5123 | || !inuse_bit_at_offset (nextchunk, nextsize)) |
5124 | { |
5125 | /* We can only give back the tail if it is larger than |
5126 | MINSIZE, or if the following chunk is unused (top |
5127 | chunk or unused in-heap chunk). Otherwise we would |
5128 | create a chunk that is smaller than MINSIZE. */ |
5129 | remainder = chunk_at_offset (p, nb); |
5130 | set_head_size (p, nb); |
5131 | remainder_size = _int_free_create_chunk (av, p: remainder, |
5132 | size: remainder_size, |
5133 | nextchunk, nextsize); |
5134 | _int_free_maybe_consolidate (av, size: remainder_size); |
5135 | } |
5136 | } |
5137 | } |
5138 | |
5139 | check_inuse_chunk (av, p); |
5140 | return chunk2mem (p); |
5141 | } |
5142 | |
5143 | |
5144 | /* |
5145 | ------------------------------ malloc_trim ------------------------------ |
5146 | */ |
5147 | |
5148 | static int |
5149 | mtrim (mstate av, size_t pad) |
5150 | { |
5151 | /* Ensure all blocks are consolidated. */ |
5152 | malloc_consolidate (av); |
5153 | |
5154 | const size_t ps = GLRO (dl_pagesize); |
5155 | int psindex = bin_index (ps); |
5156 | const size_t psm1 = ps - 1; |
5157 | |
5158 | int result = 0; |
5159 | for (int i = 1; i < NBINS; ++i) |
5160 | if (i == 1 || i >= psindex) |
5161 | { |
5162 | mbinptr bin = bin_at (av, i); |
5163 | |
5164 | for (mchunkptr p = last (bin); p != bin; p = p->bk) |
5165 | { |
5166 | INTERNAL_SIZE_T size = chunksize (p); |
5167 | |
5168 | if (size > psm1 + sizeof (struct malloc_chunk)) |
5169 | { |
5170 | /* See whether the chunk contains at least one unused page. */ |
5171 | char *paligned_mem = (char *) (((uintptr_t) p |
5172 | + sizeof (struct malloc_chunk) |
5173 | + psm1) & ~psm1); |
5174 | |
5175 | assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ |
5176 | <= paligned_mem); |
5177 | assert ((char *) p + size > paligned_mem); |
5178 | |
5179 | /* This is the size we could potentially free. */ |
5180 | size -= paligned_mem - (char *) p; |
5181 | |
5182 | if (size > psm1) |
5183 | { |
5184 | #if MALLOC_DEBUG |
5185 | /* When debugging we simulate destroying the memory |
5186 | content. */ |
5187 | memset (paligned_mem, 0x89, size & ~psm1); |
5188 | #endif |
5189 | __madvise (addr: paligned_mem, len: size & ~psm1, MADV_DONTNEED); |
5190 | |
5191 | result = 1; |
5192 | } |
5193 | } |
5194 | } |
5195 | } |
5196 | |
5197 | #ifndef MORECORE_CANNOT_TRIM |
5198 | return result | (av == &main_arena ? systrim (pad, av) : 0); |
5199 | |
5200 | #else |
5201 | return result; |
5202 | #endif |
5203 | } |
5204 | |
5205 | |
5206 | int |
5207 | __malloc_trim (size_t s) |
5208 | { |
5209 | int result = 0; |
5210 | |
5211 | if (!__malloc_initialized) |
5212 | ptmalloc_init (); |
5213 | |
5214 | mstate ar_ptr = &main_arena; |
5215 | do |
5216 | { |
5217 | __libc_lock_lock (ar_ptr->mutex); |
5218 | result |= mtrim (av: ar_ptr, pad: s); |
5219 | __libc_lock_unlock (ar_ptr->mutex); |
5220 | |
5221 | ar_ptr = ar_ptr->next; |
5222 | } |
5223 | while (ar_ptr != &main_arena); |
5224 | |
5225 | return result; |
5226 | } |
5227 | |
5228 | |
5229 | /* |
5230 | ------------------------- malloc_usable_size ------------------------- |
5231 | */ |
5232 | |
5233 | static size_t |
5234 | musable (void *mem) |
5235 | { |
5236 | mchunkptr p = mem2chunk (mem); |
5237 | |
5238 | if (chunk_is_mmapped (p)) |
5239 | return chunksize (p) - CHUNK_HDR_SZ; |
5240 | else if (inuse (p)) |
5241 | return memsize (p); |
5242 | |
5243 | return 0; |
5244 | } |
5245 | |
5246 | #if IS_IN (libc) |
5247 | size_t |
5248 | __malloc_usable_size (void *m) |
5249 | { |
5250 | if (m == NULL) |
5251 | return 0; |
5252 | return musable (m); |
5253 | } |
5254 | #endif |
5255 | |
5256 | /* |
5257 | ------------------------------ mallinfo ------------------------------ |
5258 | Accumulate malloc statistics for arena AV into M. |
5259 | */ |
5260 | static void |
5261 | int_mallinfo (mstate av, struct mallinfo2 *m) |
5262 | { |
5263 | size_t i; |
5264 | mbinptr b; |
5265 | mchunkptr p; |
5266 | INTERNAL_SIZE_T avail; |
5267 | INTERNAL_SIZE_T fastavail; |
5268 | int nblocks; |
5269 | int nfastblocks; |
5270 | |
5271 | check_malloc_state (av); |
5272 | |
5273 | /* Account for top */ |
5274 | avail = chunksize (av->top); |
5275 | nblocks = 1; /* top always exists */ |
5276 | |
5277 | /* traverse fastbins */ |
5278 | nfastblocks = 0; |
5279 | fastavail = 0; |
5280 | |
5281 | for (i = 0; i < NFASTBINS; ++i) |
5282 | { |
5283 | for (p = fastbin (av, i); |
5284 | p != 0; |
5285 | p = REVEAL_PTR (p->fd)) |
5286 | { |
5287 | if (__glibc_unlikely (misaligned_chunk (p))) |
5288 | malloc_printerr (str: "int_mallinfo(): " |
5289 | "unaligned fastbin chunk detected" ); |
5290 | ++nfastblocks; |
5291 | fastavail += chunksize (p); |
5292 | } |
5293 | } |
5294 | |
5295 | avail += fastavail; |
5296 | |
5297 | /* traverse regular bins */ |
5298 | for (i = 1; i < NBINS; ++i) |
5299 | { |
5300 | b = bin_at (av, i); |
5301 | for (p = last (b); p != b; p = p->bk) |
5302 | { |
5303 | ++nblocks; |
5304 | avail += chunksize (p); |
5305 | } |
5306 | } |
5307 | |
5308 | m->smblks += nfastblocks; |
5309 | m->ordblks += nblocks; |
5310 | m->fordblks += avail; |
5311 | m->uordblks += av->system_mem - avail; |
5312 | m->arena += av->system_mem; |
5313 | m->fsmblks += fastavail; |
5314 | if (av == &main_arena) |
5315 | { |
5316 | m->hblks = mp_.n_mmaps; |
5317 | m->hblkhd = mp_.mmapped_mem; |
5318 | m->usmblks = 0; |
5319 | m->keepcost = chunksize (av->top); |
5320 | } |
5321 | } |
5322 | |
5323 | |
5324 | struct mallinfo2 |
5325 | __libc_mallinfo2 (void) |
5326 | { |
5327 | struct mallinfo2 m; |
5328 | mstate ar_ptr; |
5329 | |
5330 | if (!__malloc_initialized) |
5331 | ptmalloc_init (); |
5332 | |
5333 | memset (s: &m, c: 0, n: sizeof (m)); |
5334 | ar_ptr = &main_arena; |
5335 | do |
5336 | { |
5337 | __libc_lock_lock (ar_ptr->mutex); |
5338 | int_mallinfo (av: ar_ptr, m: &m); |
5339 | __libc_lock_unlock (ar_ptr->mutex); |
5340 | |
5341 | ar_ptr = ar_ptr->next; |
5342 | } |
5343 | while (ar_ptr != &main_arena); |
5344 | |
5345 | return m; |
5346 | } |
5347 | libc_hidden_def (__libc_mallinfo2) |
5348 | |
5349 | struct mallinfo |
5350 | __libc_mallinfo (void) |
5351 | { |
5352 | struct mallinfo m; |
5353 | struct mallinfo2 m2 = __libc_mallinfo2 (); |
5354 | |
5355 | m.arena = m2.arena; |
5356 | m.ordblks = m2.ordblks; |
5357 | m.smblks = m2.smblks; |
5358 | m.hblks = m2.hblks; |
5359 | m.hblkhd = m2.hblkhd; |
5360 | m.usmblks = m2.usmblks; |
5361 | m.fsmblks = m2.fsmblks; |
5362 | m.uordblks = m2.uordblks; |
5363 | m.fordblks = m2.fordblks; |
5364 | m.keepcost = m2.keepcost; |
5365 | |
5366 | return m; |
5367 | } |
5368 | |
5369 | |
5370 | /* |
5371 | ------------------------------ malloc_stats ------------------------------ |
5372 | */ |
5373 | |
5374 | void |
5375 | __malloc_stats (void) |
5376 | { |
5377 | int i; |
5378 | mstate ar_ptr; |
5379 | unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b; |
5380 | |
5381 | if (!__malloc_initialized) |
5382 | ptmalloc_init (); |
5383 | _IO_flockfile (stderr); |
5384 | int old_flags2 = stderr->_flags2; |
5385 | stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL; |
5386 | for (i = 0, ar_ptr = &main_arena;; i++) |
5387 | { |
5388 | struct mallinfo2 mi; |
5389 | |
5390 | memset (s: &mi, c: 0, n: sizeof (mi)); |
5391 | __libc_lock_lock (ar_ptr->mutex); |
5392 | int_mallinfo (av: ar_ptr, m: &mi); |
5393 | fprintf (stderr, format: "Arena %d:\n" , i); |
5394 | fprintf (stderr, format: "system bytes = %10u\n" , (unsigned int) mi.arena); |
5395 | fprintf (stderr, format: "in use bytes = %10u\n" , (unsigned int) mi.uordblks); |
5396 | #if MALLOC_DEBUG > 1 |
5397 | if (i > 0) |
5398 | dump_heap (heap_for_ptr (top (ar_ptr))); |
5399 | #endif |
5400 | system_b += mi.arena; |
5401 | in_use_b += mi.uordblks; |
5402 | __libc_lock_unlock (ar_ptr->mutex); |
5403 | ar_ptr = ar_ptr->next; |
5404 | if (ar_ptr == &main_arena) |
5405 | break; |
5406 | } |
5407 | fprintf (stderr, format: "Total (incl. mmap):\n" ); |
5408 | fprintf (stderr, format: "system bytes = %10u\n" , system_b); |
5409 | fprintf (stderr, format: "in use bytes = %10u\n" , in_use_b); |
5410 | fprintf (stderr, format: "max mmap regions = %10u\n" , (unsigned int) mp_.max_n_mmaps); |
5411 | fprintf (stderr, format: "max mmap bytes = %10lu\n" , |
5412 | (unsigned long) mp_.max_mmapped_mem); |
5413 | stderr->_flags2 = old_flags2; |
5414 | _IO_funlockfile (stderr); |
5415 | } |
5416 | |
5417 | |
5418 | /* |
5419 | ------------------------------ mallopt ------------------------------ |
5420 | */ |
5421 | static __always_inline int |
5422 | do_set_trim_threshold (size_t value) |
5423 | { |
5424 | LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold, |
5425 | mp_.no_dyn_threshold); |
5426 | mp_.trim_threshold = value; |
5427 | mp_.no_dyn_threshold = 1; |
5428 | return 1; |
5429 | } |
5430 | |
5431 | static __always_inline int |
5432 | do_set_top_pad (size_t value) |
5433 | { |
5434 | LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad, |
5435 | mp_.no_dyn_threshold); |
5436 | mp_.top_pad = value; |
5437 | mp_.no_dyn_threshold = 1; |
5438 | return 1; |
5439 | } |
5440 | |
5441 | static __always_inline int |
5442 | do_set_mmap_threshold (size_t value) |
5443 | { |
5444 | LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold, |
5445 | mp_.no_dyn_threshold); |
5446 | mp_.mmap_threshold = value; |
5447 | mp_.no_dyn_threshold = 1; |
5448 | return 1; |
5449 | } |
5450 | |
5451 | static __always_inline int |
5452 | do_set_mmaps_max (int32_t value) |
5453 | { |
5454 | LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max, |
5455 | mp_.no_dyn_threshold); |
5456 | mp_.n_mmaps_max = value; |
5457 | mp_.no_dyn_threshold = 1; |
5458 | return 1; |
5459 | } |
5460 | |
5461 | static __always_inline int |
5462 | do_set_mallopt_check (int32_t value) |
5463 | { |
5464 | return 1; |
5465 | } |
5466 | |
5467 | static __always_inline int |
5468 | do_set_perturb_byte (int32_t value) |
5469 | { |
5470 | LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte); |
5471 | perturb_byte = value; |
5472 | return 1; |
5473 | } |
5474 | |
5475 | static __always_inline int |
5476 | do_set_arena_test (size_t value) |
5477 | { |
5478 | LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test); |
5479 | mp_.arena_test = value; |
5480 | return 1; |
5481 | } |
5482 | |
5483 | static __always_inline int |
5484 | do_set_arena_max (size_t value) |
5485 | { |
5486 | LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max); |
5487 | mp_.arena_max = value; |
5488 | return 1; |
5489 | } |
5490 | |
5491 | #if USE_TCACHE |
5492 | static __always_inline int |
5493 | do_set_tcache_max (size_t value) |
5494 | { |
5495 | if (value <= MAX_TCACHE_SIZE) |
5496 | { |
5497 | LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes); |
5498 | mp_.tcache_max_bytes = value; |
5499 | mp_.tcache_bins = csize2tidx (request2size(value)) + 1; |
5500 | return 1; |
5501 | } |
5502 | return 0; |
5503 | } |
5504 | |
5505 | static __always_inline int |
5506 | do_set_tcache_count (size_t value) |
5507 | { |
5508 | if (value <= MAX_TCACHE_COUNT) |
5509 | { |
5510 | LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count); |
5511 | mp_.tcache_count = value; |
5512 | return 1; |
5513 | } |
5514 | return 0; |
5515 | } |
5516 | |
5517 | static __always_inline int |
5518 | do_set_tcache_unsorted_limit (size_t value) |
5519 | { |
5520 | LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit); |
5521 | mp_.tcache_unsorted_limit = value; |
5522 | return 1; |
5523 | } |
5524 | #endif |
5525 | |
5526 | static __always_inline int |
5527 | do_set_mxfast (size_t value) |
5528 | { |
5529 | if (value <= MAX_FAST_SIZE) |
5530 | { |
5531 | LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ()); |
5532 | set_max_fast (value); |
5533 | return 1; |
5534 | } |
5535 | return 0; |
5536 | } |
5537 | |
5538 | static __always_inline int |
5539 | do_set_hugetlb (size_t value) |
5540 | { |
5541 | if (value == 1) |
5542 | { |
5543 | enum malloc_thp_mode_t thp_mode = __malloc_thp_mode (); |
5544 | /* |
5545 | Only enable THP madvise usage if system does support it and |
5546 | has 'madvise' mode. Otherwise the madvise() call is wasteful. |
5547 | */ |
5548 | if (thp_mode == malloc_thp_mode_madvise) |
5549 | mp_.thp_pagesize = __malloc_default_thp_pagesize (); |
5550 | } |
5551 | else if (value >= 2) |
5552 | __malloc_hugepage_config (requested: value == 2 ? 0 : value, pagesize: &mp_.hp_pagesize, |
5553 | flags: &mp_.hp_flags); |
5554 | return 0; |
5555 | } |
5556 | |
5557 | int |
5558 | __libc_mallopt (int param_number, int value) |
5559 | { |
5560 | mstate av = &main_arena; |
5561 | int res = 1; |
5562 | |
5563 | if (!__malloc_initialized) |
5564 | ptmalloc_init (); |
5565 | __libc_lock_lock (av->mutex); |
5566 | |
5567 | LIBC_PROBE (memory_mallopt, 2, param_number, value); |
5568 | |
5569 | /* We must consolidate main arena before changing max_fast |
5570 | (see definition of set_max_fast). */ |
5571 | malloc_consolidate (av); |
5572 | |
5573 | /* Many of these helper functions take a size_t. We do not worry |
5574 | about overflow here, because negative int values will wrap to |
5575 | very large size_t values and the helpers have sufficient range |
5576 | checking for such conversions. Many of these helpers are also |
5577 | used by the tunables macros in arena.c. */ |
5578 | |
5579 | switch (param_number) |
5580 | { |
5581 | case M_MXFAST: |
5582 | res = do_set_mxfast (value); |
5583 | break; |
5584 | |
5585 | case M_TRIM_THRESHOLD: |
5586 | res = do_set_trim_threshold (value); |
5587 | break; |
5588 | |
5589 | case M_TOP_PAD: |
5590 | res = do_set_top_pad (value); |
5591 | break; |
5592 | |
5593 | case M_MMAP_THRESHOLD: |
5594 | res = do_set_mmap_threshold (value); |
5595 | break; |
5596 | |
5597 | case M_MMAP_MAX: |
5598 | res = do_set_mmaps_max (value); |
5599 | break; |
5600 | |
5601 | case M_CHECK_ACTION: |
5602 | res = do_set_mallopt_check (value); |
5603 | break; |
5604 | |
5605 | case M_PERTURB: |
5606 | res = do_set_perturb_byte (value); |
5607 | break; |
5608 | |
5609 | case M_ARENA_TEST: |
5610 | if (value > 0) |
5611 | res = do_set_arena_test (value); |
5612 | break; |
5613 | |
5614 | case M_ARENA_MAX: |
5615 | if (value > 0) |
5616 | res = do_set_arena_max (value); |
5617 | break; |
5618 | } |
5619 | __libc_lock_unlock (av->mutex); |
5620 | return res; |
5621 | } |
5622 | libc_hidden_def (__libc_mallopt) |
5623 | |
5624 | |
5625 | /* |
5626 | -------------------- Alternative MORECORE functions -------------------- |
5627 | */ |
5628 | |
5629 | |
5630 | /* |
5631 | General Requirements for MORECORE. |
5632 | |
5633 | The MORECORE function must have the following properties: |
5634 | |
5635 | If MORECORE_CONTIGUOUS is false: |
5636 | |
5637 | * MORECORE must allocate in multiples of pagesize. It will |
5638 | only be called with arguments that are multiples of pagesize. |
5639 | |
5640 | * MORECORE(0) must return an address that is at least |
5641 | MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.) |
5642 | |
5643 | else (i.e. If MORECORE_CONTIGUOUS is true): |
5644 | |
5645 | * Consecutive calls to MORECORE with positive arguments |
5646 | return increasing addresses, indicating that space has been |
5647 | contiguously extended. |
5648 | |
5649 | * MORECORE need not allocate in multiples of pagesize. |
5650 | Calls to MORECORE need not have args of multiples of pagesize. |
5651 | |
5652 | * MORECORE need not page-align. |
5653 | |
5654 | In either case: |
5655 | |
5656 | * MORECORE may allocate more memory than requested. (Or even less, |
5657 | but this will generally result in a malloc failure.) |
5658 | |
5659 | * MORECORE must not allocate memory when given argument zero, but |
5660 | instead return one past the end address of memory from previous |
5661 | nonzero call. This malloc does NOT call MORECORE(0) |
5662 | until at least one call with positive arguments is made, so |
5663 | the initial value returned is not important. |
5664 | |
5665 | * Even though consecutive calls to MORECORE need not return contiguous |
5666 | addresses, it must be OK for malloc'ed chunks to span multiple |
5667 | regions in those cases where they do happen to be contiguous. |
5668 | |
5669 | * MORECORE need not handle negative arguments -- it may instead |
5670 | just return MORECORE_FAILURE when given negative arguments. |
5671 | Negative arguments are always multiples of pagesize. MORECORE |
5672 | must not misinterpret negative args as large positive unsigned |
5673 | args. You can suppress all such calls from even occurring by defining |
5674 | MORECORE_CANNOT_TRIM, |
5675 | |
5676 | There is some variation across systems about the type of the |
5677 | argument to sbrk/MORECORE. If size_t is unsigned, then it cannot |
5678 | actually be size_t, because sbrk supports negative args, so it is |
5679 | normally the signed type of the same width as size_t (sometimes |
5680 | declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much |
5681 | matter though. Internally, we use "long" as arguments, which should |
5682 | work across all reasonable possibilities. |
5683 | |
5684 | Additionally, if MORECORE ever returns failure for a positive |
5685 | request, then mmap is used as a noncontiguous system allocator. This |
5686 | is a useful backup strategy for systems with holes in address spaces |
5687 | -- in this case sbrk cannot contiguously expand the heap, but mmap |
5688 | may be able to map noncontiguous space. |
5689 | |
5690 | If you'd like mmap to ALWAYS be used, you can define MORECORE to be |
5691 | a function that always returns MORECORE_FAILURE. |
5692 | |
5693 | If you are using this malloc with something other than sbrk (or its |
5694 | emulation) to supply memory regions, you probably want to set |
5695 | MORECORE_CONTIGUOUS as false. As an example, here is a custom |
5696 | allocator kindly contributed for pre-OSX macOS. It uses virtually |
5697 | but not necessarily physically contiguous non-paged memory (locked |
5698 | in, present and won't get swapped out). You can use it by |
5699 | uncommenting this section, adding some #includes, and setting up the |
5700 | appropriate defines above: |
5701 | |
5702 | *#define MORECORE osMoreCore |
5703 | *#define MORECORE_CONTIGUOUS 0 |
5704 | |
5705 | There is also a shutdown routine that should somehow be called for |
5706 | cleanup upon program exit. |
5707 | |
5708 | *#define MAX_POOL_ENTRIES 100 |
5709 | *#define MINIMUM_MORECORE_SIZE (64 * 1024) |
5710 | static int next_os_pool; |
5711 | void *our_os_pools[MAX_POOL_ENTRIES]; |
5712 | |
5713 | void *osMoreCore(int size) |
5714 | { |
5715 | void *ptr = 0; |
5716 | static void *sbrk_top = 0; |
5717 | |
5718 | if (size > 0) |
5719 | { |
5720 | if (size < MINIMUM_MORECORE_SIZE) |
5721 | size = MINIMUM_MORECORE_SIZE; |
5722 | if (CurrentExecutionLevel() == kTaskLevel) |
5723 | ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
5724 | if (ptr == 0) |
5725 | { |
5726 | return (void *) MORECORE_FAILURE; |
5727 | } |
5728 | // save ptrs so they can be freed during cleanup |
5729 | our_os_pools[next_os_pool] = ptr; |
5730 | next_os_pool++; |
5731 | ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); |
5732 | sbrk_top = (char *) ptr + size; |
5733 | return ptr; |
5734 | } |
5735 | else if (size < 0) |
5736 | { |
5737 | // we don't currently support shrink behavior |
5738 | return (void *) MORECORE_FAILURE; |
5739 | } |
5740 | else |
5741 | { |
5742 | return sbrk_top; |
5743 | } |
5744 | } |
5745 | |
5746 | // cleanup any allocated memory pools |
5747 | // called as last thing before shutting down driver |
5748 | |
5749 | void osCleanupMem(void) |
5750 | { |
5751 | void **ptr; |
5752 | |
5753 | for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
5754 | if (*ptr) |
5755 | { |
5756 | PoolDeallocate(*ptr); |
5757 | * ptr = 0; |
5758 | } |
5759 | } |
5760 | |
5761 | */ |
5762 | |
5763 | |
5764 | /* Helper code. */ |
5765 | |
5766 | extern char **__libc_argv attribute_hidden; |
5767 | |
5768 | static void |
5769 | malloc_printerr (const char *str) |
5770 | { |
5771 | #if IS_IN (libc) |
5772 | __libc_message ("%s\n" , str); |
5773 | #else |
5774 | __libc_fatal (message: str); |
5775 | #endif |
5776 | __builtin_unreachable (); |
5777 | } |
5778 | |
5779 | #if IS_IN (libc) |
5780 | /* We need a wrapper function for one of the additions of POSIX. */ |
5781 | int |
5782 | __posix_memalign (void **memptr, size_t alignment, size_t size) |
5783 | { |
5784 | void *mem; |
5785 | |
5786 | if (!__malloc_initialized) |
5787 | ptmalloc_init (); |
5788 | |
5789 | /* Test whether the SIZE argument is valid. It must be a power of |
5790 | two multiple of sizeof (void *). */ |
5791 | if (alignment % sizeof (void *) != 0 |
5792 | || !powerof2 (alignment / sizeof (void *)) |
5793 | || alignment == 0) |
5794 | return EINVAL; |
5795 | |
5796 | |
5797 | void *address = RETURN_ADDRESS (0); |
5798 | mem = _mid_memalign (alignment, size, address); |
5799 | |
5800 | if (mem != NULL) |
5801 | { |
5802 | *memptr = mem; |
5803 | return 0; |
5804 | } |
5805 | |
5806 | return ENOMEM; |
5807 | } |
5808 | weak_alias (__posix_memalign, posix_memalign) |
5809 | #endif |
5810 | |
5811 | |
5812 | int |
5813 | __malloc_info (int options, FILE *fp) |
5814 | { |
5815 | /* For now, at least. */ |
5816 | if (options != 0) |
5817 | return EINVAL; |
5818 | |
5819 | int n = 0; |
5820 | size_t total_nblocks = 0; |
5821 | size_t total_nfastblocks = 0; |
5822 | size_t total_avail = 0; |
5823 | size_t total_fastavail = 0; |
5824 | size_t total_system = 0; |
5825 | size_t total_max_system = 0; |
5826 | size_t total_aspace = 0; |
5827 | size_t total_aspace_mprotect = 0; |
5828 | |
5829 | |
5830 | |
5831 | if (!__malloc_initialized) |
5832 | ptmalloc_init (); |
5833 | |
5834 | fputs (s: "<malloc version=\"1\">\n" , stream: fp); |
5835 | |
5836 | /* Iterate over all arenas currently in use. */ |
5837 | mstate ar_ptr = &main_arena; |
5838 | do |
5839 | { |
5840 | fprintf (stream: fp, format: "<heap nr=\"%d\">\n<sizes>\n" , n++); |
5841 | |
5842 | size_t nblocks = 0; |
5843 | size_t nfastblocks = 0; |
5844 | size_t avail = 0; |
5845 | size_t fastavail = 0; |
5846 | struct |
5847 | { |
5848 | size_t from; |
5849 | size_t to; |
5850 | size_t total; |
5851 | size_t count; |
5852 | } sizes[NFASTBINS + NBINS - 1]; |
5853 | #define nsizes (sizeof (sizes) / sizeof (sizes[0])) |
5854 | |
5855 | __libc_lock_lock (ar_ptr->mutex); |
5856 | |
5857 | /* Account for top chunk. The top-most available chunk is |
5858 | treated specially and is never in any bin. See "initial_top" |
5859 | comments. */ |
5860 | avail = chunksize (ar_ptr->top); |
5861 | nblocks = 1; /* Top always exists. */ |
5862 | |
5863 | for (size_t i = 0; i < NFASTBINS; ++i) |
5864 | { |
5865 | mchunkptr p = fastbin (ar_ptr, i); |
5866 | if (p != NULL) |
5867 | { |
5868 | size_t nthissize = 0; |
5869 | size_t thissize = chunksize (p); |
5870 | |
5871 | while (p != NULL) |
5872 | { |
5873 | if (__glibc_unlikely (misaligned_chunk (p))) |
5874 | malloc_printerr (str: "__malloc_info(): " |
5875 | "unaligned fastbin chunk detected" ); |
5876 | ++nthissize; |
5877 | p = REVEAL_PTR (p->fd); |
5878 | } |
5879 | |
5880 | fastavail += nthissize * thissize; |
5881 | nfastblocks += nthissize; |
5882 | sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1); |
5883 | sizes[i].to = thissize; |
5884 | sizes[i].count = nthissize; |
5885 | } |
5886 | else |
5887 | sizes[i].from = sizes[i].to = sizes[i].count = 0; |
5888 | |
5889 | sizes[i].total = sizes[i].count * sizes[i].to; |
5890 | } |
5891 | |
5892 | |
5893 | mbinptr bin; |
5894 | struct malloc_chunk *r; |
5895 | |
5896 | for (size_t i = 1; i < NBINS; ++i) |
5897 | { |
5898 | bin = bin_at (ar_ptr, i); |
5899 | r = bin->fd; |
5900 | sizes[NFASTBINS - 1 + i].from = ~((size_t) 0); |
5901 | sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total |
5902 | = sizes[NFASTBINS - 1 + i].count = 0; |
5903 | |
5904 | if (r != NULL) |
5905 | while (r != bin) |
5906 | { |
5907 | size_t r_size = chunksize_nomask (r); |
5908 | ++sizes[NFASTBINS - 1 + i].count; |
5909 | sizes[NFASTBINS - 1 + i].total += r_size; |
5910 | sizes[NFASTBINS - 1 + i].from |
5911 | = MIN (sizes[NFASTBINS - 1 + i].from, r_size); |
5912 | sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to, |
5913 | r_size); |
5914 | |
5915 | r = r->fd; |
5916 | } |
5917 | |
5918 | if (sizes[NFASTBINS - 1 + i].count == 0) |
5919 | sizes[NFASTBINS - 1 + i].from = 0; |
5920 | nblocks += sizes[NFASTBINS - 1 + i].count; |
5921 | avail += sizes[NFASTBINS - 1 + i].total; |
5922 | } |
5923 | |
5924 | size_t heap_size = 0; |
5925 | size_t heap_mprotect_size = 0; |
5926 | size_t heap_count = 0; |
5927 | if (ar_ptr != &main_arena) |
5928 | { |
5929 | /* Iterate over the arena heaps from back to front. */ |
5930 | heap_info *heap = heap_for_ptr (top (ar_ptr)); |
5931 | do |
5932 | { |
5933 | heap_size += heap->size; |
5934 | heap_mprotect_size += heap->mprotect_size; |
5935 | heap = heap->prev; |
5936 | ++heap_count; |
5937 | } |
5938 | while (heap != NULL); |
5939 | } |
5940 | |
5941 | __libc_lock_unlock (ar_ptr->mutex); |
5942 | |
5943 | total_nfastblocks += nfastblocks; |
5944 | total_fastavail += fastavail; |
5945 | |
5946 | total_nblocks += nblocks; |
5947 | total_avail += avail; |
5948 | |
5949 | for (size_t i = 0; i < nsizes; ++i) |
5950 | if (sizes[i].count != 0 && i != NFASTBINS) |
5951 | fprintf (stream: fp, format: "\ |
5952 | <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n" , |
5953 | sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count); |
5954 | |
5955 | if (sizes[NFASTBINS].count != 0) |
5956 | fprintf (stream: fp, format: "\ |
5957 | <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n" , |
5958 | sizes[NFASTBINS].from, sizes[NFASTBINS].to, |
5959 | sizes[NFASTBINS].total, sizes[NFASTBINS].count); |
5960 | |
5961 | total_system += ar_ptr->system_mem; |
5962 | total_max_system += ar_ptr->max_system_mem; |
5963 | |
5964 | fprintf (stream: fp, |
5965 | format: "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" |
5966 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" |
5967 | "<system type=\"current\" size=\"%zu\"/>\n" |
5968 | "<system type=\"max\" size=\"%zu\"/>\n" , |
5969 | nfastblocks, fastavail, nblocks, avail, |
5970 | ar_ptr->system_mem, ar_ptr->max_system_mem); |
5971 | |
5972 | if (ar_ptr != &main_arena) |
5973 | { |
5974 | fprintf (stream: fp, |
5975 | format: "<aspace type=\"total\" size=\"%zu\"/>\n" |
5976 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" |
5977 | "<aspace type=\"subheaps\" size=\"%zu\"/>\n" , |
5978 | heap_size, heap_mprotect_size, heap_count); |
5979 | total_aspace += heap_size; |
5980 | total_aspace_mprotect += heap_mprotect_size; |
5981 | } |
5982 | else |
5983 | { |
5984 | fprintf (stream: fp, |
5985 | format: "<aspace type=\"total\" size=\"%zu\"/>\n" |
5986 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" , |
5987 | ar_ptr->system_mem, ar_ptr->system_mem); |
5988 | total_aspace += ar_ptr->system_mem; |
5989 | total_aspace_mprotect += ar_ptr->system_mem; |
5990 | } |
5991 | |
5992 | fputs (s: "</heap>\n" , stream: fp); |
5993 | ar_ptr = ar_ptr->next; |
5994 | } |
5995 | while (ar_ptr != &main_arena); |
5996 | |
5997 | fprintf (stream: fp, |
5998 | format: "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n" |
5999 | "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n" |
6000 | "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n" |
6001 | "<system type=\"current\" size=\"%zu\"/>\n" |
6002 | "<system type=\"max\" size=\"%zu\"/>\n" |
6003 | "<aspace type=\"total\" size=\"%zu\"/>\n" |
6004 | "<aspace type=\"mprotect\" size=\"%zu\"/>\n" |
6005 | "</malloc>\n" , |
6006 | total_nfastblocks, total_fastavail, total_nblocks, total_avail, |
6007 | mp_.n_mmaps, mp_.mmapped_mem, |
6008 | total_system, total_max_system, |
6009 | total_aspace, total_aspace_mprotect); |
6010 | |
6011 | return 0; |
6012 | } |
6013 | #if IS_IN (libc) |
6014 | weak_alias (__malloc_info, malloc_info) |
6015 | |
6016 | strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc) |
6017 | strong_alias (__libc_free, __free) strong_alias (__libc_free, free) |
6018 | strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc) |
6019 | strong_alias (__libc_memalign, __memalign) |
6020 | weak_alias (__libc_memalign, memalign) |
6021 | strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc) |
6022 | strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc) |
6023 | strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc) |
6024 | strong_alias (__libc_mallinfo, __mallinfo) |
6025 | weak_alias (__libc_mallinfo, mallinfo) |
6026 | strong_alias (__libc_mallinfo2, __mallinfo2) |
6027 | weak_alias (__libc_mallinfo2, mallinfo2) |
6028 | strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt) |
6029 | |
6030 | weak_alias (__malloc_stats, malloc_stats) |
6031 | weak_alias (__malloc_usable_size, malloc_usable_size) |
6032 | weak_alias (__malloc_trim, malloc_trim) |
6033 | #endif |
6034 | |
6035 | #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26) |
6036 | compat_symbol (libc, __libc_free, cfree, GLIBC_2_0); |
6037 | #endif |
6038 | |
6039 | /* ------------------------------------------------------------ |
6040 | History: |
6041 | |
6042 | [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc] |
6043 | |
6044 | */ |
6045 | /* |
6046 | * Local variables: |
6047 | * c-basic-offset: 2 |
6048 | * End: |
6049 | */ |
6050 | |