1//////////////////////////////////////////////////////////////////////////////
2//
3// (C) Copyright Ion Gaztanaga 2007-2015. Distributed under the Boost
4// Software License, Version 1.0. (See accompanying file
5// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6//
7// See http://www.boost.org/libs/container for documentation.
8//
9//////////////////////////////////////////////////////////////////////////////
10
11#include <boost/container/detail/alloc_lib.h>
12
13#include "errno.h" //dlmalloc bug EINVAL is used in posix_memalign without checking LACKS_ERRNO_H
14#include "limits.h" //CHAR_BIT
15#ifdef BOOST_CONTAINER_DLMALLOC_FOOTERS
16#define FOOTERS 1
17#endif
18#define USE_LOCKS 1
19#define MSPACES 1
20#define NO_MALLINFO 1
21#define NO_MALLOC_STATS 1
22//disable sbrk as it's deprecated in some systems and weakens ASLR
23#define HAVE_MORECORE 0
24
25
26#if !defined(NDEBUG)
27 #if !defined(DEBUG)
28 #define DEBUG 1
29 #define DL_DEBUG_DEFINED
30 #endif
31#endif
32
33#define USE_DL_PREFIX
34
35#ifdef __GNUC__
36#define FORCEINLINE inline
37#endif
38
39#ifdef _MSC_VER
40#pragma warning (push)
41#pragma warning (disable : 4127)
42#pragma warning (disable : 4267)
43#pragma warning (disable : 4127)
44#pragma warning (disable : 4702)
45#pragma warning (disable : 4390) /*empty controlled statement found; is this the intent?*/
46#pragma warning (disable : 4251 4231 4660) /*dll warnings*/
47#pragma warning (disable : 4057) /*differs in indirection to slightly different base types from*/
48#pragma warning (disable : 4702) /*unreachable code*/
49#pragma warning (disable : 4127) /*conditional expression is constant*/
50#endif
51
52#include "dlmalloc_2_8_6.c"
53
54#define DL_SIZE_IMPL(p) (chunksize(mem2chunk(p)) - overhead_for(mem2chunk(p)))
55
56static size_t s_allocated_memory;
57
58///////////////////////////////////////////////////////////////
59///////////////////////////////////////////////////////////////
60///////////////////////////////////////////////////////////////
61//
62// SLIGHTLY MODIFIED DLMALLOC FUNCTIONS
63//
64///////////////////////////////////////////////////////////////
65///////////////////////////////////////////////////////////////
66///////////////////////////////////////////////////////////////
67
68//This function is equal to mspace_free
69//replacing PREACTION with 0 and POSTACTION with nothing
70static void mspace_free_lockless(mspace msp, void* mem)
71{
72 if (mem != 0) {
73 mchunkptr p = mem2chunk(mem);
74#if FOOTERS
75 mstate fm = get_mstate_for(p);
76 msp = msp; /* placate people compiling -Wunused */
77#else /* FOOTERS */
78 mstate fm = (mstate)msp;
79#endif /* FOOTERS */
80 if (!ok_magic(fm)) {
81 USAGE_ERROR_ACTION(fm, p);
82 return;
83 }
84 if (!0){//PREACTION(fm)) {
85 check_inuse_chunk(fm, p);
86 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
87 size_t psize = chunksize(p);
88 mchunkptr next = chunk_plus_offset(p, psize);
89 if (!pinuse(p)) {
90 size_t prevsize = p->prev_foot;
91 if (is_mmapped(p)) {
92 psize += prevsize + MMAP_FOOT_PAD;
93 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
94 fm->footprint -= psize;
95 goto postaction;
96 }
97 else {
98 mchunkptr prev = chunk_minus_offset(p, prevsize);
99 psize += prevsize;
100 p = prev;
101 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
102 if (p != fm->dv) {
103 unlink_chunk(fm, p, prevsize);
104 }
105 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
106 fm->dvsize = psize;
107 set_free_with_pinuse(p, psize, next);
108 goto postaction;
109 }
110 }
111 else
112 goto erroraction;
113 }
114 }
115
116 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
117 if (!cinuse(next)) { /* consolidate forward */
118 if (next == fm->top) {
119 size_t tsize = fm->topsize += psize;
120 fm->top = p;
121 p->head = tsize | PINUSE_BIT;
122 if (p == fm->dv) {
123 fm->dv = 0;
124 fm->dvsize = 0;
125 }
126 if (should_trim(fm, tsize))
127 sys_trim(m: fm, pad: 0);
128 goto postaction;
129 }
130 else if (next == fm->dv) {
131 size_t dsize = fm->dvsize += psize;
132 fm->dv = p;
133 set_size_and_pinuse_of_free_chunk(p, dsize);
134 goto postaction;
135 }
136 else {
137 size_t nsize = chunksize(next);
138 psize += nsize;
139 unlink_chunk(fm, next, nsize);
140 set_size_and_pinuse_of_free_chunk(p, psize);
141 if (p == fm->dv) {
142 fm->dvsize = psize;
143 goto postaction;
144 }
145 }
146 }
147 else
148 set_free_with_pinuse(p, psize, next);
149
150 if (is_small(psize)) {
151 insert_small_chunk(fm, p, psize);
152 check_free_chunk(fm, p);
153 }
154 else {
155 tchunkptr tp = (tchunkptr)p;
156 insert_large_chunk(fm, tp, psize);
157 check_free_chunk(fm, p);
158 if (--fm->release_checks == 0)
159 release_unused_segments(m: fm);
160 }
161 goto postaction;
162 }
163 }
164 erroraction:
165 USAGE_ERROR_ACTION(fm, p);
166 postaction:
167 ;//POSTACTION(fm);
168 }
169 }
170}
171
172//This function is equal to mspace_malloc
173//replacing PREACTION with 0 and POSTACTION with nothing
174void* mspace_malloc_lockless(mspace msp, size_t bytes)
175{
176 mstate ms = (mstate)msp;
177 if (!ok_magic(ms)) {
178 USAGE_ERROR_ACTION(ms,ms);
179 return 0;
180 }
181 if (!0){//PREACTION(ms)) {
182 void* mem;
183 size_t nb;
184 if (bytes <= MAX_SMALL_REQUEST) {
185 bindex_t idx;
186 binmap_t smallbits;
187 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
188 idx = small_index(nb);
189 smallbits = ms->smallmap >> idx;
190
191 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
192 mchunkptr b, p;
193 idx += ~smallbits & 1; /* Uses next bin if idx empty */
194 b = smallbin_at(ms, idx);
195 p = b->fd;
196 assert(chunksize(p) == small_index2size(idx));
197 unlink_first_small_chunk(ms, b, p, idx);
198 set_inuse_and_pinuse(ms, p, small_index2size(idx));
199 mem = chunk2mem(p);
200 check_malloced_chunk(ms, mem, nb);
201 goto postaction;
202 }
203
204 else if (nb > ms->dvsize) {
205 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
206 mchunkptr b, p, r;
207 size_t rsize;
208 bindex_t i;
209 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
210 binmap_t leastbit = least_bit(leftbits);
211 compute_bit2idx(leastbit, i);
212 b = smallbin_at(ms, i);
213 p = b->fd;
214 assert(chunksize(p) == small_index2size(i));
215 unlink_first_small_chunk(ms, b, p, i);
216 rsize = small_index2size(i) - nb;
217 /* Fit here cannot be remainderless if 4byte sizes */
218 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
219 set_inuse_and_pinuse(ms, p, small_index2size(i));
220 else {
221 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
222 r = chunk_plus_offset(p, nb);
223 set_size_and_pinuse_of_free_chunk(r, rsize);
224 replace_dv(ms, r, rsize);
225 }
226 mem = chunk2mem(p);
227 check_malloced_chunk(ms, mem, nb);
228 goto postaction;
229 }
230
231 else if (ms->treemap != 0 && (mem = tmalloc_small(m: ms, nb)) != 0) {
232 check_malloced_chunk(ms, mem, nb);
233 goto postaction;
234 }
235 }
236 }
237 else if (bytes >= MAX_REQUEST)
238 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
239 else {
240 nb = pad_request(bytes);
241 if (ms->treemap != 0 && (mem = tmalloc_large(m: ms, nb)) != 0) {
242 check_malloced_chunk(ms, mem, nb);
243 goto postaction;
244 }
245 }
246
247 if (nb <= ms->dvsize) {
248 size_t rsize = ms->dvsize - nb;
249 mchunkptr p = ms->dv;
250 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
251 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
252 ms->dvsize = rsize;
253 set_size_and_pinuse_of_free_chunk(r, rsize);
254 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
255 }
256 else { /* exhaust dv */
257 size_t dvs = ms->dvsize;
258 ms->dvsize = 0;
259 ms->dv = 0;
260 set_inuse_and_pinuse(ms, p, dvs);
261 }
262 mem = chunk2mem(p);
263 check_malloced_chunk(ms, mem, nb);
264 goto postaction;
265 }
266
267 else if (nb < ms->topsize) { /* Split top */
268 size_t rsize = ms->topsize -= nb;
269 mchunkptr p = ms->top;
270 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
271 r->head = rsize | PINUSE_BIT;
272 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
273 mem = chunk2mem(p);
274 check_top_chunk(ms, ms->top);
275 check_malloced_chunk(ms, mem, nb);
276 goto postaction;
277 }
278
279 mem = sys_alloc(m: ms, nb);
280
281 postaction:
282 ;//POSTACTION(ms);
283 return mem;
284 }
285
286 return 0;
287}
288
289//This function is equal to try_realloc_chunk but handling
290//minimum and desired bytes
291static mchunkptr try_realloc_chunk_with_min(mstate m, mchunkptr p, size_t min_nb, size_t des_nb, int can_move)
292{
293 mchunkptr newp = 0;
294 size_t oldsize = chunksize(p);
295 mchunkptr next = chunk_plus_offset(p, oldsize);
296 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
297 ok_next(p, next) && ok_pinuse(next))) {
298 if (is_mmapped(p)) {
299 newp = mmap_resize(m, oldp: p, nb: des_nb, flags: can_move);
300 if(!newp) //mmap does not return how many bytes we could reallocate, so go the minimum
301 newp = mmap_resize(m, oldp: p, nb: min_nb, flags: can_move);
302 }
303 else if (oldsize >= min_nb) { /* already big enough */
304 size_t nb = oldsize >= des_nb ? des_nb : oldsize;
305 size_t rsize = oldsize - nb;
306 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
307 mchunkptr r = chunk_plus_offset(p, nb);
308 set_inuse(m, p, nb);
309 set_inuse(m, r, rsize);
310 dispose_chunk(m, p: r, psize: rsize);
311 }
312 newp = p;
313 }
314 else if (next == m->top) { /* extend into top */
315 if (oldsize + m->topsize > min_nb) {
316 size_t nb = (oldsize + m->topsize) > des_nb ? des_nb : (oldsize + m->topsize - MALLOC_ALIGNMENT);
317 size_t newsize = oldsize + m->topsize;
318 size_t newtopsize = newsize - nb;
319 mchunkptr newtop = chunk_plus_offset(p, nb);
320 set_inuse(m, p, nb);
321 newtop->head = newtopsize |PINUSE_BIT;
322 m->top = newtop;
323 m->topsize = newtopsize;
324 newp = p;
325 }
326 }
327 else if (next == m->dv) { /* extend into dv */
328 size_t dvs = m->dvsize;
329 if (oldsize + dvs >= min_nb) {
330 size_t nb = (oldsize + dvs) >= des_nb ? des_nb : (oldsize + dvs);
331 size_t dsize = oldsize + dvs - nb;
332 if (dsize >= MIN_CHUNK_SIZE) {
333 mchunkptr r = chunk_plus_offset(p, nb);
334 mchunkptr n = chunk_plus_offset(r, dsize);
335 set_inuse(m, p, nb);
336 set_size_and_pinuse_of_free_chunk(r, dsize);
337 clear_pinuse(n);
338 m->dvsize = dsize;
339 m->dv = r;
340 }
341 else { /* exhaust dv */
342 size_t newsize = oldsize + dvs;
343 set_inuse(m, p, newsize);
344 m->dvsize = 0;
345 m->dv = 0;
346 }
347 newp = p;
348 }
349 }
350 else if (!cinuse(next)) { /* extend into next free chunk */
351 size_t nextsize = chunksize(next);
352 if (oldsize + nextsize >= min_nb) {
353 size_t nb = (oldsize + nextsize) >= des_nb ? des_nb : (oldsize + nextsize);
354 size_t rsize = oldsize + nextsize - nb;
355 unlink_chunk(m, next, nextsize);
356 if (rsize < MIN_CHUNK_SIZE) {
357 size_t newsize = oldsize + nextsize;
358 set_inuse(m, p, newsize);
359 }
360 else {
361 mchunkptr r = chunk_plus_offset(p, nb);
362 set_inuse(m, p, nb);
363 set_inuse(m, r, rsize);
364 dispose_chunk(m, p: r, psize: rsize);
365 }
366 newp = p;
367 }
368 }
369 }
370 else {
371 USAGE_ERROR_ACTION(m, chunk2mem(p));
372 }
373 return newp;
374}
375
376///////////////////////////////////////////////////////////////
377///////////////////////////////////////////////////////////////
378///////////////////////////////////////////////////////////////
379//
380// NEW FUNCTIONS BASED ON DLMALLOC INTERNALS
381//
382///////////////////////////////////////////////////////////////
383///////////////////////////////////////////////////////////////
384///////////////////////////////////////////////////////////////
385
386#define GET_TRUNCATED_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE)/(ROUNDTO)*(ROUNDTO))
387#define GET_ROUNDED_SIZE(ORIG_SIZE, ROUNDTO) ((((ORIG_SIZE)-1)/(ROUNDTO)+1)*(ROUNDTO))
388#define GET_TRUNCATED_PO2_SIZE(ORIG_SIZE, ROUNDTO) ((ORIG_SIZE) & (~(ROUNDTO-1)))
389#define GET_ROUNDED_PO2_SIZE(ORIG_SIZE, ROUNDTO) (((ORIG_SIZE - 1) & (~(ROUNDTO-1))) + ROUNDTO)
390
391/* Greatest common divisor and least common multiple
392 gcd is an algorithm that calculates the greatest common divisor of two
393 integers, using Euclid's algorithm.
394
395 Pre: A > 0 && B > 0
396 Recommended: A > B*/
397#define CALCULATE_GCD(A, B, OUT)\
398{\
399 size_t a = A;\
400 size_t b = B;\
401 do\
402 {\
403 size_t tmp = b;\
404 b = a % b;\
405 a = tmp;\
406 } while (b != 0);\
407\
408 OUT = a;\
409}
410
411/* lcm is an algorithm that calculates the least common multiple of two
412 integers.
413
414 Pre: A > 0 && B > 0
415 Recommended: A > B*/
416#define CALCULATE_LCM(A, B, OUT)\
417{\
418 CALCULATE_GCD(A, B, OUT);\
419 OUT = (A / OUT)*B;\
420}
421
422static int calculate_lcm_and_needs_backwards_lcmed
423 (size_t backwards_multiple, size_t received_size, size_t size_to_achieve,
424 size_t *plcm, size_t *pneeds_backwards_lcmed)
425{
426 /* Now calculate lcm */
427 size_t max = backwards_multiple;
428 size_t min = MALLOC_ALIGNMENT;
429 size_t needs_backwards;
430 size_t needs_backwards_lcmed;
431 size_t lcm;
432 size_t current_forward;
433 /*Swap if necessary*/
434 if(max < min){
435 size_t tmp = min;
436 min = max;
437 max = tmp;
438 }
439 /*Check if it's power of two*/
440 if((backwards_multiple & (backwards_multiple-1)) == 0){
441 if(0 != (size_to_achieve & ((backwards_multiple-1)))){
442 USAGE_ERROR_ACTION(m, oldp);
443 return 0;
444 }
445
446 lcm = max;
447 /*If we want to use minbytes data to get a buffer between maxbytes
448 and minbytes if maxbytes can't be achieved, calculate the
449 biggest of all possibilities*/
450 current_forward = GET_TRUNCATED_PO2_SIZE(received_size, backwards_multiple);
451 needs_backwards = size_to_achieve - current_forward;
452 assert((needs_backwards % backwards_multiple) == 0);
453 needs_backwards_lcmed = GET_ROUNDED_PO2_SIZE(needs_backwards, lcm);
454 *plcm = lcm;
455 *pneeds_backwards_lcmed = needs_backwards_lcmed;
456 return 1;
457 }
458 /*Check if it's multiple of alignment*/
459 else if((backwards_multiple & (MALLOC_ALIGNMENT - 1u)) == 0){
460 lcm = backwards_multiple;
461 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
462 //No need to round needs_backwards because backwards_multiple == lcm
463 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
464 assert((needs_backwards_lcmed & (MALLOC_ALIGNMENT - 1u)) == 0);
465 *plcm = lcm;
466 *pneeds_backwards_lcmed = needs_backwards_lcmed;
467 return 1;
468 }
469 /*Check if it's multiple of the half of the alignmment*/
470 else if((backwards_multiple & ((MALLOC_ALIGNMENT/2u) - 1u)) == 0){
471 lcm = backwards_multiple*2u;
472 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
473 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
474 if(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
475 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
476 needs_backwards_lcmed += backwards_multiple;
477 assert((needs_backwards_lcmed % lcm) == 0);
478 *plcm = lcm;
479 *pneeds_backwards_lcmed = needs_backwards_lcmed;
480 return 1;
481 }
482 /*Check if it's multiple of the quarter of the alignmment*/
483 else if((backwards_multiple & ((MALLOC_ALIGNMENT/4u) - 1u)) == 0){
484 size_t remainder;
485 lcm = backwards_multiple*4u;
486 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
487 needs_backwards_lcmed = needs_backwards = size_to_achieve - current_forward;
488 //while(0 != (needs_backwards_lcmed & (MALLOC_ALIGNMENT-1)))
489 //needs_backwards_lcmed += backwards_multiple;
490 if(0 != (remainder = ((needs_backwards_lcmed & (MALLOC_ALIGNMENT-1))>>(MALLOC_ALIGNMENT/8u)))){
491 if(backwards_multiple & MALLOC_ALIGNMENT/2u){
492 needs_backwards_lcmed += (remainder)*backwards_multiple;
493 }
494 else{
495 needs_backwards_lcmed += (4-remainder)*backwards_multiple;
496 }
497 }
498 assert((needs_backwards_lcmed % lcm) == 0);
499 *plcm = lcm;
500 *pneeds_backwards_lcmed = needs_backwards_lcmed;
501 return 1;
502 }
503 else{
504 CALCULATE_LCM(max, min, lcm);
505 /*If we want to use minbytes data to get a buffer between maxbytes
506 and minbytes if maxbytes can't be achieved, calculate the
507 biggest of all possibilities*/
508 current_forward = GET_TRUNCATED_SIZE(received_size, backwards_multiple);
509 needs_backwards = size_to_achieve - current_forward;
510 assert((needs_backwards % backwards_multiple) == 0);
511 needs_backwards_lcmed = GET_ROUNDED_SIZE(needs_backwards, lcm);
512 *plcm = lcm;
513 *pneeds_backwards_lcmed = needs_backwards_lcmed;
514 return 1;
515 }
516}
517
518static void *internal_grow_both_sides
519 (mstate m
520 ,allocation_type command
521 ,void *oldmem
522 ,size_t minbytes
523 ,size_t maxbytes
524 ,size_t *received_size
525 ,size_t backwards_multiple
526 ,int only_preferred_backwards)
527{
528 mchunkptr oldp = mem2chunk(oldmem);
529 size_t oldsize = chunksize(oldp);
530 *received_size = oldsize - overhead_for(oldp);
531 if(minbytes <= *received_size)
532 return oldmem;
533
534 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp))) {
535 if(command & BOOST_CONTAINER_EXPAND_FWD){
536 if(try_realloc_chunk_with_min(m, p: oldp, request2size(minbytes), request2size(maxbytes), can_move: 0)){
537 check_inuse_chunk(m, oldp);
538 *received_size = DL_SIZE_IMPL(oldmem);
539 s_allocated_memory += chunksize(oldp) - oldsize;
540 return oldmem;
541 }
542 }
543 else{
544 *received_size = DL_SIZE_IMPL(oldmem);
545 if(*received_size >= maxbytes)
546 return oldmem;
547 }
548/*
549 Should we check this?
550 if(backwards_multiple &&
551 (0 != (minbytes % backwards_multiple) &&
552 0 != (maxbytes % backwards_multiple)) ){
553 USAGE_ERROR_ACTION(m, oldp);
554 return 0;
555 }
556*/
557 /* We reach here only if forward expansion fails */
558 if(!(command & BOOST_CONTAINER_EXPAND_BWD) || pinuse(oldp)){
559 return 0;
560 }
561 {
562 size_t prevsize = oldp->prev_foot;
563 if ((prevsize & USE_MMAP_BIT) != 0){
564 /*Return failure the previous chunk was mmapped.
565 mremap does not allow expanding to a fixed address (MREMAP_MAYMOVE) without
566 copying (MREMAP_MAYMOVE must be also set).*/
567 return 0;
568 }
569 else {
570 mchunkptr prev = chunk_minus_offset(oldp, prevsize);
571 size_t dsize = oldsize + prevsize;
572 size_t needs_backwards_lcmed;
573 size_t lcm;
574
575 /* Let's calculate the number of extra bytes of data before the current
576 block's begin. The value is a multiple of backwards_multiple
577 and the alignment*/
578 if(!calculate_lcm_and_needs_backwards_lcmed
579 ( backwards_multiple, received_size: *received_size
580 , size_to_achieve: only_preferred_backwards ? maxbytes : minbytes
581 , plcm: &lcm, pneeds_backwards_lcmed: &needs_backwards_lcmed)
582 || !RTCHECK(ok_address(m, prev))){
583 USAGE_ERROR_ACTION(m, oldp);
584 return 0;
585 }
586 /* Check if previous block has enough size */
587 else if(prevsize < needs_backwards_lcmed){
588 /* preferred size? */
589 return 0;
590 }
591 /* Now take all next space. This must succeed, as we've previously calculated the correct size */
592 if(command & BOOST_CONTAINER_EXPAND_FWD){
593 if(!try_realloc_chunk_with_min(m, p: oldp, request2size(*received_size), request2size(*received_size), can_move: 0)){
594 assert(0);
595 }
596 check_inuse_chunk(m, oldp);
597 *received_size = DL_SIZE_IMPL(oldmem);
598 s_allocated_memory += chunksize(oldp) - oldsize;
599 oldsize = chunksize(oldp);
600 dsize = oldsize + prevsize;
601 }
602 /* We need a minimum size to split the previous one */
603 if(prevsize >= (needs_backwards_lcmed + MIN_CHUNK_SIZE)){
604 mchunkptr r = chunk_minus_offset(oldp, needs_backwards_lcmed);
605 size_t rsize = oldsize + needs_backwards_lcmed;
606 size_t newprevsize = dsize - rsize;
607 int prev_was_dv = prev == m->dv;
608
609 assert(newprevsize >= MIN_CHUNK_SIZE);
610
611 if (prev_was_dv) {
612 m->dvsize = newprevsize;
613 }
614 else{/* if ((next->head & INUSE_BITS) == INUSE_BITS) { */
615 unlink_chunk(m, prev, prevsize);
616 insert_chunk(m, prev, newprevsize);
617 }
618
619 set_size_and_pinuse_of_free_chunk(prev, newprevsize);
620 clear_pinuse(r);
621 set_inuse(m, r, rsize);
622 check_malloced_chunk(m, chunk2mem(r), rsize);
623 *received_size = chunksize(r) - overhead_for(r);
624 s_allocated_memory += chunksize(r) - oldsize;
625 return chunk2mem(r);
626 }
627 /* Check if there is no place to create a new block and
628 the whole new block is multiple of the backwards expansion multiple */
629 else if(prevsize >= needs_backwards_lcmed && !(prevsize % lcm)) {
630 /* Just merge the whole previous block */
631 /* prevsize is multiple of lcm (and backwards_multiple)*/
632 *received_size += prevsize;
633
634 if (prev != m->dv) {
635 unlink_chunk(m, prev, prevsize);
636 }
637 else{
638 m->dvsize = 0;
639 m->dv = 0;
640 }
641 set_inuse(m, prev, dsize);
642 check_malloced_chunk(m, chunk2mem(prev), dsize);
643 s_allocated_memory += chunksize(prev) - oldsize;
644 return chunk2mem(prev);
645 }
646 else{
647 /* Previous block was big enough but there is no room
648 to create an empty block and taking the whole block does
649 not fulfill alignment requirements */
650 return 0;
651 }
652 }
653 }
654 }
655 else{
656 USAGE_ERROR_ACTION(m, oldmem);
657 return 0;
658 }
659 return 0;
660}
661
662/* This is similar to mmap_resize but:
663 * Only to shrink
664 * It takes min and max sizes
665 * Takes additional 'do_commit' argument to obtain the final
666 size before doing the real shrink operation.
667*/
668static int internal_mmap_shrink_in_place(mstate m, mchunkptr oldp, size_t nbmin, size_t nbmax, size_t *received_size, int do_commit)
669{
670 size_t oldsize = chunksize(oldp);
671 *received_size = oldsize;
672 #if HAVE_MREMAP
673 if (is_small(nbmax)) /* Can't shrink mmap regions below small size */
674 return 0;
675 {
676 size_t effective_min = nbmin > MIN_LARGE_SIZE ? nbmin : MIN_LARGE_SIZE;
677 /* Keep old chunk if big enough but not too big */
678 if (oldsize >= effective_min + SIZE_T_SIZE &&
679 (oldsize - effective_min) <= (mparams.granularity << 1))
680 return 0;
681 /* Now calculate new sizes */
682 {
683 size_t offset = oldp->prev_foot;
684 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
685 size_t newmmsize = mmap_align(effective_min + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
686 *received_size = newmmsize;
687 if(!do_commit){
688 const int flags = 0; /* placate people compiling -Wunused */
689 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
690 oldmmsize, newmmsize, flags);
691 /*This must always succeed */
692 if(!cp){
693 USAGE_ERROR_ACTION(m, m);
694 return 0;
695 }
696 {
697 mchunkptr newp = (mchunkptr)(cp + offset);
698 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
699 newp->head = psize;
700 mark_inuse_foot(m, newp, psize);
701 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
702 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
703
704 if (cp < m->least_addr)
705 m->least_addr = cp;
706 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
707 m->max_footprint = m->footprint;
708 check_mmapped_chunk(m, newp);
709 }
710 }
711 }
712 return 1;
713 }
714 #else //#if HAVE_MREMAP
715 (void)m;
716 (void)oldp;
717 (void)nbmin;
718 (void)nbmax;
719 (void)received_size;
720 (void)do_commit;
721 return 0;
722 #endif //#if HAVE_MREMAP
723}
724
725static int internal_shrink(mstate m, void* oldmem, size_t minbytes, size_t maxbytes, size_t *received_size, int do_commit)
726{
727 *received_size = chunksize(mem2chunk(oldmem)) - overhead_for(mem2chunk(oldmem));
728 if (minbytes >= MAX_REQUEST || maxbytes >= MAX_REQUEST) {
729 MALLOC_FAILURE_ACTION;
730 return 0;
731 }
732 else if(minbytes < MIN_REQUEST){
733 minbytes = MIN_REQUEST;
734 }
735 if (minbytes > maxbytes) {
736 return 0;
737 }
738
739 {
740 mchunkptr oldp = mem2chunk(oldmem);
741 size_t oldsize = chunksize(oldp);
742 mchunkptr next = chunk_plus_offset(oldp, oldsize);
743 void* extra = 0;
744
745 /* Try to either shrink or extend into top. Else malloc-copy-free*/
746 if (RTCHECK(ok_address(m, oldp) && ok_inuse(oldp) &&
747 ok_next(oldp, next) && ok_pinuse(next))) {
748 size_t nbmin = request2size(minbytes);
749 size_t nbmax = request2size(maxbytes);
750
751 if (nbmin > oldsize){
752 /* Return error if old size is too small */
753 }
754 else if (is_mmapped(oldp)){
755 return internal_mmap_shrink_in_place(m, oldp, nbmin, nbmax, received_size, do_commit);
756 }
757 else{ // nbmin <= oldsize /* already big enough*/
758 size_t nb = nbmin;
759 size_t rsize = oldsize - nb;
760 if (rsize >= MIN_CHUNK_SIZE) {
761 if(do_commit){
762 mchunkptr remainder = chunk_plus_offset(oldp, nb);
763 set_inuse(m, oldp, nb);
764 set_inuse(m, remainder, rsize);
765 s_allocated_memory -= rsize;
766 extra = chunk2mem(remainder);
767 mspace_free_lockless(msp: m, mem: extra);
768 check_inuse_chunk(m, oldp);
769 }
770 *received_size = nb - overhead_for(oldp);
771 return 1;
772 }
773 }
774 }
775 else {
776 USAGE_ERROR_ACTION(m, oldmem);
777 }
778 return 0;
779 }
780}
781
782#define INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM 4096
783#define SQRT_MAX_SIZE_T (((size_t)-1)>>(sizeof(size_t)*CHAR_BIT/2))
784
785static int internal_node_multialloc
786(mstate m, size_t n_elements, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
787 void* mem; /* malloced aggregate space */
788 mchunkptr p; /* corresponding chunk */
789 size_t remainder_size; /* remaining bytes while splitting */
790 flag_t was_enabled; /* to disable mmap */
791 size_t elements_per_segment = 0;
792 size_t element_req_size = request2size(element_size);
793 boost_cont_memchain_it prev_last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
794
795 /*Error if wrong element_size parameter */
796 if (!element_size ||
797 /*OR Error if n_elements less than contiguous_elements */
798 ((contiguous_elements + 1) > (BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS + 1) && n_elements < contiguous_elements) ||
799 /* OR Error if integer overflow */
800 (SQRT_MAX_SIZE_T < (element_req_size | contiguous_elements) &&
801 (MAX_SIZE_T / element_req_size) < contiguous_elements)) {
802 return 0;
803 }
804 switch (contiguous_elements) {
805 case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
806 {
807 /* Default contiguous, just check that we can store at least one element */
808 elements_per_segment = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM / element_req_size;
809 elements_per_segment += (size_t)(!elements_per_segment);
810 }
811 break;
812 case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS:
813 /* All elements should be allocated in a single call */
814 elements_per_segment = n_elements;
815 break;
816 default:
817 /* Allocate in chunks of "contiguous_elements" */
818 elements_per_segment = contiguous_elements;
819 }
820
821 {
822 size_t i;
823 size_t next_i;
824 /*
825 Allocate the aggregate chunk. First disable direct-mmapping so
826 malloc won't use it, since we would not be able to later
827 free/realloc space internal to a segregated mmap region.
828 */
829 was_enabled = use_mmap(m);
830 disable_mmap(m);
831 for (i = 0; i != n_elements; i = next_i)
832 {
833 size_t accum_size;
834 size_t n_elements_left = n_elements - i;
835 next_i = i + ((n_elements_left < elements_per_segment) ? n_elements_left : elements_per_segment);
836 accum_size = element_req_size * (next_i - i);
837
838 mem = mspace_malloc_lockless(msp: m, bytes: accum_size - CHUNK_OVERHEAD);
839 if (mem == 0) {
840 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
841 while (i) {
842 void *addr = BOOST_CONTAINER_MEMIT_ADDR(prev_last_it);
843 --i;
844 BOOST_CONTAINER_MEMIT_NEXT(prev_last_it);
845 s_allocated_memory -= chunksize(mem2chunk(addr));
846 mspace_free_lockless(msp: m, mem: addr);
847 }
848 if (was_enabled)
849 enable_mmap(m);
850 return 0;
851 }
852 p = mem2chunk(mem);
853 remainder_size = chunksize(p);
854 s_allocated_memory += remainder_size;
855
856 assert(!is_mmapped(p));
857 { /* split out elements */
858 //void *mem_orig = mem;
859 //boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
860 size_t num_elements = next_i - i;
861
862 size_t num_loops = num_elements - 1;
863 remainder_size -= element_req_size * num_loops;
864 while (num_loops) {
865 --num_loops;
866 //void **mem_prev = ((void**)mem);
867 set_size_and_pinuse_of_inuse_chunk(m, p, element_req_size);
868 BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem);
869 p = chunk_plus_offset(p, element_req_size);
870 mem = chunk2mem(p);
871 //*mem_prev = mem;
872 }
873 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
874 BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(pchain, mem);
875 //BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
876 }
877 }
878 if (was_enabled)
879 enable_mmap(m);
880 }
881 return 1;
882}
883
884#define BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
885#ifndef BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
886
887#define BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(THISMEM, NEXTMEM) \
888 *((void**)(THISMEM)) = *((void**)((NEXTMEM)))
889
890//This function is based on internal_bulk_free
891//replacing iteration over array[] with boost_cont_memchain.
892//Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
893//After forward merging, backwards merging is also tried
894static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
895{
896#if FOOTERS
897 boost_cont_memchain ret_chain;
898 BOOST_CONTAINER_MEMCHAIN_INIT(&ret_chain);
899#endif
900 if (!PREACTION(m)) {
901 boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
902 while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
903 void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
904 mchunkptr a_p = mem2chunk(a_mem);
905 size_t psize = chunksize(a_p);
906#if FOOTERS
907 if (get_mstate_for(a_p) != m) {
908 BOOST_CONTAINER_MEMIT_NEXT(a_it);
909 BOOST_CONTAINER_MEMCHAIN_PUSH_BACK(&ret_chain, a_mem);
910 continue;
911 }
912#endif
913 check_inuse_chunk(m, a_p);
914 if (RTCHECK(ok_address(m, a_p) && ok_inuse(a_p))) {
915 while (1) { /* Internal loop to speed up forward and backward merging (avoids some redundant checks) */
916 boost_cont_memchain_it b_it = a_it;
917 BOOST_CONTAINER_MEMIT_NEXT(b_it);
918 if (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, b_it)) {
919 void *b_mem = BOOST_CONTAINER_MEMIT_ADDR(b_it);
920 mchunkptr b_p = mem2chunk(b_mem);
921 if (b_p == next_chunk(a_p)) { /* b chunk is contiguous and next so b's size can be added to a */
922 psize += chunksize(b_p);
923 set_inuse(m, a_p, psize);
924 BOOST_ALLOC_PLUS_MEMCHAIN_MEM_JUMP_NEXT(a_mem, b_mem);
925 continue;
926 }
927 if (RTCHECK(ok_address(m, b_p) && ok_inuse(b_p))) {
928 /* b chunk is contiguous and previous so a's size can be added to b */
929 if (a_p == next_chunk(b_p)) {
930 psize += chunksize(b_p);
931 set_inuse(m, b_p, psize);
932 a_it = b_it;
933 a_p = b_p;
934 a_mem = b_mem;
935 continue;
936 }
937 }
938 }
939 /* Normal deallocation starts again in the outer loop */
940 a_it = b_it;
941 s_allocated_memory -= psize;
942 dispose_chunk(m, a_p, psize);
943 break;
944 }
945 }
946 else {
947 CORRUPTION_ERROR_ACTION(m);
948 break;
949 }
950 }
951 if (should_trim(m, m->topsize))
952 sys_trim(m, 0);
953 POSTACTION(m);
954 }
955#if FOOTERS
956 {
957 boost_cont_memchain_it last_pchain = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
958 BOOST_CONTAINER_MEMCHAIN_INIT(pchain);
959 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER
960 (pchain
961 , last_pchain
962 , BOOST_CONTAINER_MEMCHAIN_FIRSTMEM(&ret_chain)
963 , BOOST_CONTAINER_MEMCHAIN_LASTMEM(&ret_chain)
964 , BOOST_CONTAINER_MEMCHAIN_SIZE(&ret_chain)
965 );
966 }
967#endif
968}
969
970#else //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
971
972//This function is based on internal_bulk_free
973//replacing iteration over array[] with boost_cont_memchain.
974//Instead of returning the unallocated nodes, returns a chain of non-deallocated nodes.
975//After forward merging, backwards merging is also tried
976static void internal_multialloc_free(mstate m, boost_cont_memchain *pchain)
977{
978 if (!PREACTION(m)) {
979 boost_cont_memchain_it a_it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
980 while (!BOOST_CONTAINER_MEMCHAIN_IS_END_IT(pchain, a_it)) { /* Iterate though all memory holded by the chain */
981 void* a_mem = BOOST_CONTAINER_MEMIT_ADDR(a_it);
982 BOOST_CONTAINER_MEMIT_NEXT(a_it);
983 s_allocated_memory -= chunksize(mem2chunk(a_mem));
984 mspace_free_lockless(msp: m, mem: a_mem);
985 }
986 POSTACTION(m);
987 }
988}
989
990#endif //BOOST_CONTAINER_DLMALLOC_SIMPLE_MULTIDEALLOC
991
992static int internal_multialloc_arrays
993 (mstate m, size_t n_elements, const size_t* sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain) {
994 void* mem; /* malloced aggregate space */
995 mchunkptr p; /* corresponding chunk */
996 size_t remainder_size; /* remaining bytes while splitting */
997 flag_t was_enabled; /* to disable mmap */
998 size_t size;
999 size_t boost_cont_multialloc_segmented_malloc_size;
1000 size_t max_size;
1001
1002 /* Check overflow */
1003 if(!element_size){
1004 return 0;
1005 }
1006 max_size = MAX_REQUEST/element_size;
1007 /* Different sizes*/
1008 switch(contiguous_elements){
1009 case BOOST_CONTAINER_DL_MULTIALLOC_DEFAULT_CONTIGUOUS:
1010 /* Use default contiguous mem */
1011 boost_cont_multialloc_segmented_malloc_size = INTERNAL_MULTIALLOC_DEFAULT_CONTIGUOUS_MEM;
1012 break;
1013 case BOOST_CONTAINER_DL_MULTIALLOC_ALL_CONTIGUOUS:
1014 boost_cont_multialloc_segmented_malloc_size = MAX_REQUEST + CHUNK_OVERHEAD;
1015 break;
1016 default:
1017 if(max_size < contiguous_elements){
1018 return 0;
1019 }
1020 else{
1021 /* The suggested buffer is just the the element count by the size */
1022 boost_cont_multialloc_segmented_malloc_size = element_size*contiguous_elements;
1023 }
1024 }
1025
1026 {
1027 size_t i;
1028 size_t next_i;
1029 /*
1030 Allocate the aggregate chunk. First disable direct-mmapping so
1031 malloc won't use it, since we would not be able to later
1032 free/realloc space internal to a segregated mmap region.
1033 */
1034 was_enabled = use_mmap(m);
1035 disable_mmap(m);
1036 for(i = 0, next_i = 0; i != n_elements; i = next_i)
1037 {
1038 int error = 0;
1039 size_t accum_size;
1040 for(accum_size = 0; next_i != n_elements; ++next_i){
1041 size_t cur_array_size = sizes[next_i];
1042 if(max_size < cur_array_size){
1043 error = 1;
1044 break;
1045 }
1046 else{
1047 size_t reqsize = request2size(cur_array_size*element_size);
1048 if(((boost_cont_multialloc_segmented_malloc_size - CHUNK_OVERHEAD) - accum_size) < reqsize){
1049 if(!accum_size){
1050 accum_size += reqsize;
1051 ++next_i;
1052 }
1053 break;
1054 }
1055 accum_size += reqsize;
1056 }
1057 }
1058
1059 mem = error ? 0 : mspace_malloc_lockless(msp: m, bytes: accum_size - CHUNK_OVERHEAD);
1060 if (mem == 0){
1061 boost_cont_memchain_it it = BOOST_CONTAINER_MEMCHAIN_BEGIN_IT(pchain);
1062 while(i--){
1063 void *addr = BOOST_CONTAINER_MEMIT_ADDR(it);
1064 BOOST_CONTAINER_MEMIT_NEXT(it);
1065 s_allocated_memory -= chunksize(mem2chunk(addr));
1066 mspace_free_lockless(msp: m, mem: addr);
1067 }
1068 if (was_enabled)
1069 enable_mmap(m);
1070 return 0;
1071 }
1072 p = mem2chunk(mem);
1073 remainder_size = chunksize(p);
1074 s_allocated_memory += remainder_size;
1075
1076 assert(!is_mmapped(p));
1077
1078 { /* split out elements */
1079 void *mem_orig = mem;
1080 boost_cont_memchain_it last_it = BOOST_CONTAINER_MEMCHAIN_LAST_IT(pchain);
1081 size_t num_elements = next_i-i;
1082
1083 for(++i; i != next_i; ++i) {
1084 void **mem_prev = ((void**)mem);
1085 size = request2size(sizes[i]*element_size);
1086 remainder_size -= size;
1087 set_size_and_pinuse_of_inuse_chunk(m, p, size);
1088 p = chunk_plus_offset(p, size);
1089 mem = chunk2mem(p);
1090 *mem_prev = mem;
1091 }
1092 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
1093 BOOST_CONTAINER_MEMCHAIN_INCORPORATE_AFTER(pchain, last_it, mem_orig, mem, num_elements);
1094 }
1095 }
1096 if (was_enabled)
1097 enable_mmap(m);
1098 }
1099 return 1;
1100}
1101
1102int boost_cont_multialloc_arrays
1103 (size_t n_elements, const size_t *sizes, size_t element_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1104{
1105 int ret = 0;
1106 mstate ms = (mstate)gm;
1107 ensure_initialization();
1108 if (!ok_magic(ms)) {
1109 USAGE_ERROR_ACTION(ms,ms);
1110 }
1111 else if (!PREACTION(ms)) {
1112 ret = internal_multialloc_arrays(m: ms, n_elements, sizes, element_size, contiguous_elements, pchain);
1113 POSTACTION(ms);
1114 }
1115 return ret;
1116}
1117
1118
1119/*Doug Lea malloc extensions*/
1120static boost_cont_malloc_stats_t get_malloc_stats(mstate m)
1121{
1122 boost_cont_malloc_stats_t ret = { 0, 0, 0 };
1123 ensure_initialization();
1124 if (!PREACTION(m)) {
1125 size_t maxfp = 0;
1126 size_t fp = 0;
1127 size_t used = 0;
1128 check_malloc_state(m);
1129 if (is_initialized(m)) {
1130 msegmentptr s = &m->seg;
1131 maxfp = m->max_footprint;
1132 fp = m->footprint;
1133 used = fp - (m->topsize + TOP_FOOT_SIZE);
1134
1135 while (s != 0) {
1136 mchunkptr q = align_as_chunk(s->base);
1137 while (segment_holds(s, q) &&
1138 q != m->top && q->head != FENCEPOST_HEAD) {
1139 if (!cinuse(q))
1140 used -= chunksize(q);
1141 q = next_chunk(q);
1142 }
1143 s = s->next;
1144 }
1145 }
1146
1147 ret.max_system_bytes = maxfp;
1148 ret.system_bytes = fp;
1149 ret.in_use_bytes = used;
1150 POSTACTION(m);
1151 }
1152 return ret;
1153}
1154
1155size_t boost_cont_size(const void *p)
1156{ return DL_SIZE_IMPL(p); }
1157
1158void* boost_cont_malloc(size_t bytes)
1159{
1160 size_t received_bytes;
1161 ensure_initialization();
1162 return boost_cont_allocation_command
1163 (command: BOOST_CONTAINER_ALLOCATE_NEW, sizeof_object: 1, limit_objects: bytes, preferred_objects: bytes, received_objects: &received_bytes, reuse_ptr: 0).first;
1164}
1165
1166void boost_cont_free(void* mem)
1167{
1168 mstate ms = (mstate)gm;
1169 if (!ok_magic(ms)) {
1170 USAGE_ERROR_ACTION(ms,ms);
1171 }
1172 else if (!PREACTION(ms)) {
1173 if(mem)
1174 s_allocated_memory -= chunksize(mem2chunk(mem));
1175 mspace_free_lockless(msp: ms, mem);
1176 POSTACTION(ms);
1177 }
1178}
1179
1180void* boost_cont_memalign(size_t bytes, size_t alignment)
1181{
1182 void *addr;
1183 ensure_initialization();
1184 addr = mspace_memalign(gm, alignment, bytes);
1185 if(addr){
1186 s_allocated_memory += chunksize(mem2chunk(addr));
1187 }
1188 return addr;
1189}
1190
1191int boost_cont_multialloc_nodes
1192 (size_t n_elements, size_t elem_size, size_t contiguous_elements, boost_cont_memchain *pchain)
1193{
1194 int ret = 0;
1195 mstate ms = (mstate)gm;
1196 ensure_initialization();
1197 if (!ok_magic(ms)) {
1198 USAGE_ERROR_ACTION(ms,ms);
1199 }
1200 else if (!PREACTION(ms)) {
1201 ret = internal_node_multialloc(m: ms, n_elements, element_size: elem_size, contiguous_elements, pchain);
1202 POSTACTION(ms);
1203 }
1204 return ret;
1205}
1206
1207size_t boost_cont_footprint()
1208{
1209 return ((mstate)gm)->footprint;
1210}
1211
1212size_t boost_cont_allocated_memory()
1213{
1214 size_t alloc_mem = 0;
1215 mstate m = (mstate)gm;
1216 ensure_initialization();
1217 if (!ok_magic(ms)) {
1218 USAGE_ERROR_ACTION(ms,ms);
1219 }
1220
1221
1222 if (!PREACTION(m)) {
1223 check_malloc_state(m);
1224 if (is_initialized(m)) {
1225 size_t nfree = SIZE_T_ONE; /* top always free */
1226 size_t mfree = m->topsize + TOP_FOOT_SIZE;
1227 msegmentptr s = &m->seg;
1228 while (s != 0) {
1229 mchunkptr q = align_as_chunk(s->base);
1230 while (segment_holds(s, q) &&
1231 q != m->top && q->head != FENCEPOST_HEAD) {
1232 size_t sz = chunksize(q);
1233 if (!is_inuse(q)) {
1234 mfree += sz;
1235 ++nfree;
1236 }
1237 q = next_chunk(q);
1238 }
1239 s = s->next;
1240 }
1241 {
1242 size_t uordblks = m->footprint - mfree;
1243 if(nfree)
1244 alloc_mem = (size_t)(uordblks - (nfree-1)*TOP_FOOT_SIZE);
1245 else
1246 alloc_mem = uordblks;
1247 }
1248 }
1249
1250 POSTACTION(m);
1251 }
1252 return alloc_mem;
1253}
1254
1255size_t boost_cont_chunksize(const void *p)
1256{ return chunksize(mem2chunk(p)); }
1257
1258int boost_cont_all_deallocated()
1259{ return !s_allocated_memory; }
1260
1261boost_cont_malloc_stats_t boost_cont_malloc_stats()
1262{
1263 mstate ms = (mstate)gm;
1264 if (ok_magic(ms)) {
1265 return get_malloc_stats(m: ms);
1266 }
1267 else {
1268 boost_cont_malloc_stats_t r = { 0, 0, 0 };
1269 USAGE_ERROR_ACTION(ms,ms);
1270 return r;
1271 }
1272}
1273
1274size_t boost_cont_in_use_memory()
1275{ return s_allocated_memory; }
1276
1277int boost_cont_trim(size_t pad)
1278{
1279 ensure_initialization();
1280 return dlmalloc_trim(pad);
1281}
1282
1283int boost_cont_grow
1284 (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received)
1285{
1286 mstate ms = (mstate)gm;
1287 if (!ok_magic(ms)) {
1288 USAGE_ERROR_ACTION(ms,ms);
1289 return 0;
1290 }
1291
1292 if (!PREACTION(ms)) {
1293 mchunkptr p = mem2chunk(oldmem);
1294 size_t oldsize = chunksize(p);
1295 p = try_realloc_chunk_with_min(m: ms, p, request2size(minbytes), request2size(maxbytes), can_move: 0);
1296 POSTACTION(ms);
1297 if(p){
1298 check_inuse_chunk(ms, p);
1299 *received = DL_SIZE_IMPL(oldmem);
1300 s_allocated_memory += chunksize(p) - oldsize;
1301 }
1302 return 0 != p;
1303 }
1304 return 0;
1305}
1306
1307int boost_cont_shrink
1308 (void* oldmem, size_t minbytes, size_t maxbytes, size_t *received, int do_commit)
1309{
1310 mstate ms = (mstate)gm;
1311 if (!ok_magic(ms)) {
1312 USAGE_ERROR_ACTION(ms,ms);
1313 return 0;
1314 }
1315
1316 if (!PREACTION(ms)) {
1317 int ret = internal_shrink(m: ms, oldmem, minbytes, maxbytes, received_size: received, do_commit);
1318 POSTACTION(ms);
1319 return 0 != ret;
1320 }
1321 return 0;
1322}
1323
1324
1325void* boost_cont_alloc
1326 (size_t minbytes, size_t preferred_bytes, size_t *received_bytes)
1327{
1328 //ensure_initialization provided by boost_cont_allocation_command
1329 return boost_cont_allocation_command
1330 (command: BOOST_CONTAINER_ALLOCATE_NEW, sizeof_object: 1, limit_objects: minbytes, preferred_objects: preferred_bytes, received_objects: received_bytes, reuse_ptr: 0).first;
1331}
1332
1333void boost_cont_multidealloc(boost_cont_memchain *pchain)
1334{
1335 mstate ms = (mstate)gm;
1336 if (!ok_magic(ms)) {
1337 (void)ms;
1338 USAGE_ERROR_ACTION(ms,ms);
1339 }
1340 internal_multialloc_free(m: ms, pchain);
1341}
1342
1343int boost_cont_malloc_check()
1344{
1345#ifdef DEBUG
1346 mstate ms = (mstate)gm;
1347 ensure_initialization();
1348 if (!ok_magic(ms)) {
1349 (void)ms;
1350 USAGE_ERROR_ACTION(ms,ms);
1351 return 0;
1352 }
1353 check_malloc_state(ms);
1354 return 1;
1355#else
1356 return 1;
1357#endif
1358}
1359
1360
1361boost_cont_command_ret_t boost_cont_allocation_command
1362 (allocation_type command, size_t sizeof_object, size_t limit_size
1363 , size_t preferred_size, size_t *received_size, void *reuse_ptr)
1364{
1365 boost_cont_command_ret_t ret = { 0, 0 };
1366 ensure_initialization();
1367 if(command & (BOOST_CONTAINER_SHRINK_IN_PLACE | BOOST_CONTAINER_TRY_SHRINK_IN_PLACE)){
1368 int success = boost_cont_shrink( oldmem: reuse_ptr, minbytes: preferred_size, maxbytes: limit_size
1369 , received: received_size, do_commit: (command & BOOST_CONTAINER_SHRINK_IN_PLACE));
1370 ret.first = success ? reuse_ptr : 0;
1371 return ret;
1372 }
1373
1374 *received_size = 0;
1375
1376 if(limit_size > preferred_size)
1377 return ret;
1378
1379 {
1380 mstate ms = (mstate)gm;
1381
1382 /*Expand in place*/
1383 if (!PREACTION(ms)) {
1384 #if FOOTERS
1385 if(reuse_ptr){
1386 mstate m = get_mstate_for(mem2chunk(reuse_ptr));
1387 if (!ok_magic(m)) {
1388 USAGE_ERROR_ACTION(m, reuse_ptr);
1389 return ret;
1390 }
1391 }
1392 #endif
1393 if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1394 void *r = internal_grow_both_sides
1395 ( m: ms, command, oldmem: reuse_ptr, minbytes: limit_size
1396 , maxbytes: preferred_size, received_size, backwards_multiple: sizeof_object, only_preferred_backwards: 1);
1397 if(r){
1398 ret.first = r;
1399 ret.second = 1;
1400 goto postaction;
1401 }
1402 }
1403
1404 if(command & BOOST_CONTAINER_ALLOCATE_NEW){
1405 void *addr = mspace_malloc_lockless(msp: ms, bytes: preferred_size);
1406 if(!addr) addr = mspace_malloc_lockless(msp: ms, bytes: limit_size);
1407 if(addr){
1408 s_allocated_memory += chunksize(mem2chunk(addr));
1409 *received_size = DL_SIZE_IMPL(addr);
1410 }
1411 ret.first = addr;
1412 ret.second = 0;
1413 if(addr){
1414 goto postaction;
1415 }
1416 }
1417
1418 //Now try to expand both sides with min size
1419 if(reuse_ptr && (command & (BOOST_CONTAINER_EXPAND_FWD | BOOST_CONTAINER_EXPAND_BWD))){
1420 void *r = internal_grow_both_sides
1421 ( m: ms, command, oldmem: reuse_ptr, minbytes: limit_size
1422 , maxbytes: preferred_size, received_size, backwards_multiple: sizeof_object, only_preferred_backwards: 0);
1423 if(r){
1424 ret.first = r;
1425 ret.second = 1;
1426 goto postaction;
1427 }
1428 }
1429 postaction:
1430 POSTACTION(ms);
1431 }
1432 }
1433 return ret;
1434}
1435
1436int boost_cont_mallopt(int param_number, int value)
1437{
1438 return change_mparam(param_number, value);
1439}
1440
1441void *boost_cont_sync_create()
1442{
1443 void *p = boost_cont_malloc(bytes: sizeof(MLOCK_T));
1444 if(p){
1445 if(0 != INITIAL_LOCK((MLOCK_T*)p)){
1446 boost_cont_free(mem: p);
1447 p = 0;
1448 }
1449 }
1450 return p;
1451}
1452
1453void boost_cont_sync_destroy(void *sync)
1454{
1455 if(sync){
1456 (void)DESTROY_LOCK((MLOCK_T*)sync);
1457 boost_cont_free(mem: sync);
1458 }
1459}
1460
1461int boost_cont_sync_lock(void *sync)
1462{ return 0 == (ACQUIRE_LOCK((MLOCK_T*)sync)); }
1463
1464void boost_cont_sync_unlock(void *sync)
1465{ RELEASE_LOCK((MLOCK_T*)sync); }
1466
1467int boost_cont_global_sync_lock()
1468{
1469 int ret;
1470 ensure_initialization();
1471 ret = ACQUIRE_MALLOC_GLOBAL_LOCK();
1472 return 0 == ret;
1473}
1474
1475void boost_cont_global_sync_unlock()
1476{
1477 RELEASE_MALLOC_GLOBAL_LOCK()
1478}
1479
1480//#ifdef DL_DEBUG_DEFINED
1481// #undef DEBUG
1482//#endif
1483
1484#ifdef _MSC_VER
1485#pragma warning (pop)
1486#endif
1487

source code of boost/libs/container/src/dlmalloc_ext_2_8_6.c