1/* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999-2025 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "alias.h"
25#include "tree.h"
26#include "rtl.h"
27#include "memmodel.h"
28#include "tm_p.h"
29#include "diagnostic-core.h"
30#include "flags.h"
31#include "ggc-internal.h"
32#include "timevar.h"
33#include "cgraph.h"
34#include "cfgloop.h"
35#include "plugin.h"
36
37/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 file open. Prefer either to valloc. */
39#ifdef HAVE_MMAP_ANON
40# undef HAVE_MMAP_DEV_ZERO
41# define USING_MMAP
42#endif
43
44#ifdef HAVE_MMAP_DEV_ZERO
45# define USING_MMAP
46#endif
47
48#ifndef USING_MMAP
49#define USING_MALLOC_PAGE_GROUPS
50#endif
51
52#if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 && defined(USING_MMAP)
54# define USING_MADVISE
55#endif
56
57/* Strategy:
58
59 This garbage-collecting allocator allocates objects on one of a set
60 of pages. Each page can allocate objects of a single size only;
61 available sizes are powers of two starting at four bytes. The size
62 of an allocation request is rounded up to the next power of two
63 (`order'), and satisfied from the appropriate page.
64
65 Each page is recorded in a page-entry, which also maintains an
66 in-use bitmap of object positions on the page. This allows the
67 allocation state of a particular object to be flipped without
68 touching the page itself.
69
70 Each page-entry also has a context depth, which is used to track
71 pushing and popping of allocation contexts. Only objects allocated
72 in the current (highest-numbered) context may be collected.
73
74 Page entries are arranged in an array of singly-linked lists. The
75 array is indexed by the allocation size, in bits, of the pages on
76 it; i.e. all pages on a list allocate objects of the same size.
77 Pages are ordered on the list such that all non-full pages precede
78 all full pages, with non-full pages arranged in order of decreasing
79 context depth.
80
81 Empty pages (of all orders) are kept on a single page cache list,
82 and are considered first when new pages are required; they are
83 deallocated at the start of the next collection if they haven't
84 been recycled by then. */
85
86/* Define GGC_DEBUG_LEVEL to print debugging information.
87 0: No debugging output.
88 1: GC statistics only.
89 2: Page-entry allocations/deallocations as well.
90 3: Object allocations as well.
91 4: Object marks as well. */
92#define GGC_DEBUG_LEVEL (0)
93
94/* A two-level tree is used to look up the page-entry for a given
95 pointer. Two chunks of the pointer's bits are extracted to index
96 the first and second levels of the tree, as follows:
97
98 HOST_PAGE_SIZE_BITS
99 32 | |
100 msb +----------------+----+------+------+ lsb
101 | | |
102 PAGE_L1_BITS |
103 | |
104 PAGE_L2_BITS
105
106 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107 pages are aligned on system page boundaries. The next most
108 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109 index values in the lookup table, respectively.
110
111 For 32-bit architectures and the settings below, there are no
112 leftover bits. For architectures with wider pointers, the lookup
113 tree points to a list of pages, which must be scanned to find the
114 correct one. */
115
116#define PAGE_L1_BITS (8)
117#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
118#define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
119#define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
120
121#define LOOKUP_L1(p) \
122 (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
123
124#define LOOKUP_L2(p) \
125 (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
126
127/* The number of objects per allocation page, for objects on a page of
128 the indicated ORDER. */
129#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
130
131/* The number of objects in P. */
132#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
133
134/* The size of an object on a page of the indicated ORDER. */
135#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
136
137/* For speed, we avoid doing a general integer divide to locate the
138 offset in the allocation bitmap, by precalculating numbers M, S
139 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140 within the page which is evenly divisible by the object size Z. */
141#define DIV_MULT(ORDER) inverse_table[ORDER].mult
142#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143#define OFFSET_TO_BIT(OFFSET, ORDER) \
144 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
145
146/* We use this structure to determine the alignment required for
147 allocations. For power-of-two sized allocations, that's not a
148 problem, but it does matter for odd-sized allocations.
149 We do not care about alignment for floating-point types. */
150
151struct max_alignment {
152 char c;
153 union {
154 int64_t i;
155 void *p;
156 } u;
157};
158
159/* The biggest alignment required. */
160
161#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
162
163
164/* The number of extra orders, not corresponding to power-of-two sized
165 objects. */
166
167#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
168
169#define RTL_SIZE(NSLOTS) \
170 (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
171
172#define TREE_EXP_SIZE(OPS) \
173 (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
174
175/* The Ith entry is the maximum size of an object to be stored in the
176 Ith extra order. Adding a new entry to this array is the *only*
177 thing you need to do to add a new special allocation size. */
178
179static const size_t extra_order_size_table[] = {
180 /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181 There are a lot of structures with these sizes and explicitly
182 listing them risks orders being dropped because they changed size. */
183 MAX_ALIGNMENT * 3,
184 MAX_ALIGNMENT * 5,
185 MAX_ALIGNMENT * 6,
186 MAX_ALIGNMENT * 7,
187 MAX_ALIGNMENT * 9,
188 MAX_ALIGNMENT * 10,
189 MAX_ALIGNMENT * 11,
190 MAX_ALIGNMENT * 12,
191 MAX_ALIGNMENT * 13,
192 MAX_ALIGNMENT * 14,
193 MAX_ALIGNMENT * 15,
194 sizeof (struct tree_decl_non_common),
195 sizeof (struct tree_field_decl),
196 sizeof (struct tree_parm_decl),
197 sizeof (struct tree_var_decl),
198 sizeof (struct tree_type_non_common),
199 sizeof (struct function),
200 sizeof (struct basic_block_def),
201 sizeof (struct cgraph_node),
202 sizeof (class loop),
203};
204
205/* The total number of orders. */
206
207#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
208
209/* Compute the smallest nonnegative number which when added to X gives
210 a multiple of F. */
211
212#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213
214/* Round X to next multiple of the page size */
215
216#define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
217
218/* The Ith entry is the number of objects on a page or order I. */
219
220static unsigned objects_per_page_table[NUM_ORDERS];
221
222/* The Ith entry is the size of an object on a page of order I. */
223
224static size_t object_size_table[NUM_ORDERS];
225
226/* The Ith entry is a pair of numbers (mult, shift) such that
227 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228 for all k evenly divisible by OBJECT_SIZE(I). */
229
230static struct
231{
232 size_t mult;
233 unsigned int shift;
234}
235inverse_table[NUM_ORDERS];
236
237struct free_list;
238
239/* A page_entry records the status of an allocation page. This
240 structure is dynamically sized to fit the bitmap in_use_p. */
241struct page_entry
242{
243 /* The next page-entry with objects of the same size, or NULL if
244 this is the last page-entry. */
245 struct page_entry *next;
246
247 /* The previous page-entry with objects of the same size, or NULL if
248 this is the first page-entry. The PREV pointer exists solely to
249 keep the cost of ggc_free manageable. */
250 struct page_entry *prev;
251
252 /* The number of bytes allocated. (This will always be a multiple
253 of the host system page size.) */
254 size_t bytes;
255
256 /* Free list of this page size. */
257 struct free_list *free_list;
258
259 /* The address at which the memory is allocated. */
260 char *page;
261
262#ifdef USING_MALLOC_PAGE_GROUPS
263 /* Back pointer to the page group this page came from. */
264 struct page_group *group;
265#endif
266
267 /* This is the index in the by_depth varray where this page table
268 can be found. */
269 unsigned long index_by_depth;
270
271 /* Context depth of this page. */
272 unsigned short context_depth;
273
274 /* The number of free objects remaining on this page. */
275 unsigned short num_free_objects;
276
277 /* A likely candidate for the bit position of a free object for the
278 next allocation from this page. */
279 unsigned short next_bit_hint;
280
281 /* The lg of size of objects allocated from this page. */
282 unsigned char order;
283
284 /* Discarded page? */
285 bool discarded;
286
287 /* A bit vector indicating whether or not objects are in use. The
288 Nth bit is one if the Nth object on this page is allocated. This
289 array is dynamically sized. */
290 unsigned long in_use_p[1];
291};
292
293#ifdef USING_MALLOC_PAGE_GROUPS
294/* A page_group describes a large allocation from malloc, from which
295 we parcel out aligned pages. */
296struct page_group
297{
298 /* A linked list of all extant page groups. */
299 struct page_group *next;
300
301 /* The address we received from malloc. */
302 char *allocation;
303
304 /* The size of the block. */
305 size_t alloc_size;
306
307 /* A bitmask of pages in use. */
308 unsigned int in_use;
309};
310#endif
311
312#if HOST_BITS_PER_PTR <= 32
313
314/* On 32-bit hosts, we use a two level page table, as pictured above. */
315typedef page_entry **page_table[PAGE_L1_SIZE];
316
317#else
318
319/* On 64-bit hosts, we use the same two level page tables plus a linked
320 list that disambiguates the top 32-bits. There will almost always be
321 exactly one entry in the list. */
322typedef struct page_table_chain
323{
324 struct page_table_chain *next;
325 size_t high_bits;
326 page_entry **table[PAGE_L1_SIZE];
327} *page_table;
328
329#endif
330
331class finalizer
332{
333public:
334 finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
335
336 void *addr () const { return m_addr; }
337
338 void call () const { m_function (m_addr); }
339
340private:
341 void *m_addr;
342 void (*m_function)(void *);
343};
344
345class vec_finalizer
346{
347public:
348 vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
349 m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
350
351 void call () const
352 {
353 for (size_t i = 0; i < m_n_objects; i++)
354 m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
355 }
356
357 void *addr () const { return reinterpret_cast<void *> (m_addr); }
358
359private:
360 uintptr_t m_addr;
361 void (*m_function)(void *);
362 size_t m_object_size;
363 size_t m_n_objects;
364};
365
366#ifdef ENABLE_GC_ALWAYS_COLLECT
367/* List of free objects to be verified as actually free on the
368 next collection. */
369struct free_object
370{
371 void *object;
372 struct free_object *next;
373};
374#endif
375
376constexpr int num_free_list = 8;
377
378/* A free_list for pages with BYTES size. */
379struct free_list
380{
381 size_t bytes;
382 page_entry *free_pages;
383};
384
385/* The rest of the global variables. */
386static struct ggc_globals
387{
388 /* The Nth element in this array is a page with objects of size 2^N.
389 If there are any pages with free objects, they will be at the
390 head of the list. NULL if there are no page-entries for this
391 object size. */
392 page_entry *pages[NUM_ORDERS];
393
394 /* The Nth element in this array is the last page with objects of
395 size 2^N. NULL if there are no page-entries for this object
396 size. */
397 page_entry *page_tails[NUM_ORDERS];
398
399 /* Lookup table for associating allocation pages with object addresses. */
400 page_table lookup;
401
402 /* The system's page size. */
403 size_t pagesize;
404 size_t lg_pagesize;
405
406 /* Bytes currently allocated. */
407 size_t allocated;
408
409 /* Bytes currently allocated at the end of the last collection. */
410 size_t allocated_last_gc;
411
412 /* Total amount of memory mapped. */
413 size_t bytes_mapped;
414
415 /* Bit N set if any allocations have been done at context depth N. */
416 unsigned long context_depth_allocations;
417
418 /* Bit N set if any collections have been done at context depth N. */
419 unsigned long context_depth_collections;
420
421 /* The current depth in the context stack. */
422 unsigned short context_depth;
423
424 /* A file descriptor open to /dev/zero for reading. */
425#if defined (HAVE_MMAP_DEV_ZERO)
426 int dev_zero_fd;
427#endif
428
429 /* A cache of free system pages. Entry 0 is fallback. */
430 struct free_list free_lists[num_free_list];
431
432#ifdef USING_MALLOC_PAGE_GROUPS
433 page_group *page_groups;
434#endif
435
436 /* The file descriptor for debugging output. */
437 FILE *debug_file;
438
439 /* Current number of elements in use in depth below. */
440 unsigned int depth_in_use;
441
442 /* Maximum number of elements that can be used before resizing. */
443 unsigned int depth_max;
444
445 /* Each element of this array is an index in by_depth where the given
446 depth starts. This structure is indexed by that given depth we
447 are interested in. */
448 unsigned int *depth;
449
450 /* Current number of elements in use in by_depth below. */
451 unsigned int by_depth_in_use;
452
453 /* Maximum number of elements that can be used before resizing. */
454 unsigned int by_depth_max;
455
456 /* Each element of this array is a pointer to a page_entry, all
457 page_entries can be found in here by increasing depth.
458 index_by_depth in the page_entry is the index into this data
459 structure where that page_entry can be found. This is used to
460 speed up finding all page_entries at a particular depth. */
461 page_entry **by_depth;
462
463 /* Each element is a pointer to the saved in_use_p bits, if any,
464 zero otherwise. We allocate them all together, to enable a
465 better runtime data access pattern. */
466 unsigned long **save_in_use;
467
468 /* Finalizers for single objects. The first index is collection_depth. */
469 vec<vec<finalizer> > finalizers;
470
471 /* Finalizers for vectors of objects. */
472 vec<vec<vec_finalizer> > vec_finalizers;
473
474#ifdef ENABLE_GC_ALWAYS_COLLECT
475 /* List of free objects to be verified as actually free on the
476 next collection. */
477 struct free_object *free_object_list;
478#endif
479
480 struct
481 {
482 /* Total GC-allocated memory. */
483 unsigned long long total_allocated;
484 /* Total overhead for GC-allocated memory. */
485 unsigned long long total_overhead;
486
487 /* Total allocations and overhead for sizes less than 32, 64 and 128.
488 These sizes are interesting because they are typical cache line
489 sizes. */
490
491 unsigned long long total_allocated_under32;
492 unsigned long long total_overhead_under32;
493
494 unsigned long long total_allocated_under64;
495 unsigned long long total_overhead_under64;
496
497 unsigned long long total_allocated_under128;
498 unsigned long long total_overhead_under128;
499
500 /* The allocations for each of the allocation orders. */
501 unsigned long long total_allocated_per_order[NUM_ORDERS];
502
503 /* The overhead for each of the allocation orders. */
504 unsigned long long total_overhead_per_order[NUM_ORDERS];
505
506 /* Number of fallbacks. */
507 unsigned long long fallback;
508 } stats;
509} G;
510
511/* True if a gc is currently taking place. */
512
513static bool in_gc = false;
514
515/* The size in bytes required to maintain a bitmap for the objects
516 on a page-entry. */
517#define BITMAP_SIZE(Num_objects) \
518 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
519
520/* Allocate pages in chunks of this size, to throttle calls to memory
521 allocation routines. The first page is used, the rest go onto the
522 free list. This cannot be larger than HOST_BITS_PER_INT for the
523 in_use bitmask for page_group. Hosts that need a different value
524 can override this by defining GGC_QUIRE_SIZE explicitly. */
525#ifndef GGC_QUIRE_SIZE
526# ifdef USING_MMAP
527# define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
528# else
529# define GGC_QUIRE_SIZE 16
530# endif
531#endif
532
533/* Initial guess as to how many page table entries we might need. */
534#define INITIAL_PTE_COUNT 128
535
536static page_entry *lookup_page_table_entry (const void *);
537static void set_page_table_entry (void *, page_entry *);
538#ifdef USING_MMAP
539static char *alloc_anon (char *, size_t, bool check);
540#endif
541#ifdef USING_MALLOC_PAGE_GROUPS
542static size_t page_group_index (char *, char *);
543static void set_page_group_in_use (page_group *, char *);
544static void clear_page_group_in_use (page_group *, char *);
545#endif
546static struct page_entry * alloc_page (unsigned);
547static void free_page (struct page_entry *);
548static void clear_marks (void);
549static void sweep_pages (void);
550static void ggc_recalculate_in_use_p (page_entry *);
551static void compute_inverse (unsigned);
552static inline void adjust_depth (void);
553static void move_ptes_to_front (int, int);
554
555void debug_print_page_list (int);
556static void push_depth (unsigned int);
557static void push_by_depth (page_entry *, unsigned long *);
558
559/* Push an entry onto G.depth. */
560
561inline static void
562push_depth (unsigned int i)
563{
564 if (G.depth_in_use >= G.depth_max)
565 {
566 G.depth_max *= 2;
567 G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
568 }
569 G.depth[G.depth_in_use++] = i;
570}
571
572/* Push an entry onto G.by_depth and G.save_in_use. */
573
574inline static void
575push_by_depth (page_entry *p, unsigned long *s)
576{
577 if (G.by_depth_in_use >= G.by_depth_max)
578 {
579 G.by_depth_max *= 2;
580 G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
581 G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
582 G.by_depth_max);
583 }
584 G.by_depth[G.by_depth_in_use] = p;
585 G.save_in_use[G.by_depth_in_use++] = s;
586}
587
588#if (GCC_VERSION < 3001)
589#define prefetch(X) ((void) X)
590#else
591#define prefetch(X) __builtin_prefetch (X)
592#endif
593
594#define save_in_use_p_i(__i) \
595 (G.save_in_use[__i])
596#define save_in_use_p(__p) \
597 (save_in_use_p_i (__p->index_by_depth))
598
599/* Traverse the page table and find the entry for a page.
600 If the object wasn't allocated in GC return NULL. */
601
602static inline page_entry *
603safe_lookup_page_table_entry (const void *p)
604{
605 page_entry ***base;
606 size_t L1, L2;
607
608#if HOST_BITS_PER_PTR <= 32
609 base = &G.lookup[0];
610#else
611 page_table table = G.lookup;
612 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
613 while (1)
614 {
615 if (table == NULL)
616 return NULL;
617 if (table->high_bits == high_bits)
618 break;
619 table = table->next;
620 }
621 base = &table->table[0];
622#endif
623
624 /* Extract the level 1 and 2 indices. */
625 L1 = LOOKUP_L1 (p);
626 L2 = LOOKUP_L2 (p);
627 if (! base[L1])
628 return NULL;
629
630 return base[L1][L2];
631}
632
633/* Traverse the page table and find the entry for a page.
634 Die (probably) if the object wasn't allocated via GC. */
635
636static inline page_entry *
637lookup_page_table_entry (const void *p)
638{
639 page_entry ***base;
640 size_t L1, L2;
641
642#if HOST_BITS_PER_PTR <= 32
643 base = &G.lookup[0];
644#else
645 page_table table = G.lookup;
646 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
647 while (table->high_bits != high_bits)
648 table = table->next;
649 base = &table->table[0];
650#endif
651
652 /* Extract the level 1 and 2 indices. */
653 L1 = LOOKUP_L1 (p);
654 L2 = LOOKUP_L2 (p);
655
656 return base[L1][L2];
657}
658
659/* Set the page table entry for a page. */
660
661static void
662set_page_table_entry (void *p, page_entry *entry)
663{
664 page_entry ***base;
665 size_t L1, L2;
666
667#if HOST_BITS_PER_PTR <= 32
668 base = &G.lookup[0];
669#else
670 page_table table;
671 uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
672 for (table = G.lookup; table; table = table->next)
673 if (table->high_bits == high_bits)
674 goto found;
675
676 /* Not found -- allocate a new table. */
677 table = XCNEW (struct page_table_chain);
678 table->next = G.lookup;
679 table->high_bits = high_bits;
680 G.lookup = table;
681found:
682 base = &table->table[0];
683#endif
684
685 /* Extract the level 1 and 2 indices. */
686 L1 = LOOKUP_L1 (p);
687 L2 = LOOKUP_L2 (p);
688
689 if (base[L1] == NULL)
690 base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
691
692 base[L1][L2] = entry;
693}
694
695/* Prints the page-entry for object size ORDER, for debugging. */
696
697DEBUG_FUNCTION void
698debug_print_page_list (int order)
699{
700 page_entry *p;
701 printf (format: "Head=%p, Tail=%p:\n", (void *) G.pages[order],
702 (void *) G.page_tails[order]);
703 p = G.pages[order];
704 while (p != NULL)
705 {
706 printf (format: "%p(%1d|%3d) -> ", (void *) p, p->context_depth,
707 p->num_free_objects);
708 p = p->next;
709 }
710 printf (format: "NULL\n");
711 fflush (stdout);
712}
713
714#ifdef USING_MMAP
715/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
716 (if non-null). The ifdef structure here is intended to cause a
717 compile error unless exactly one of the HAVE_* is defined. */
718
719static inline char *
720alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
721{
722#ifdef HAVE_MMAP_ANON
723 char *page = (char *) mmap (addr: pref, len: size, PROT_READ | PROT_WRITE,
724 MAP_PRIVATE | MAP_ANONYMOUS, fd: -1, offset: 0);
725#endif
726#ifdef HAVE_MMAP_DEV_ZERO
727 char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
728 MAP_PRIVATE, G.dev_zero_fd, 0);
729#endif
730
731 if (page == (char *) MAP_FAILED)
732 {
733 if (!check)
734 return NULL;
735 perror (s: "virtual memory exhausted");
736 exit (FATAL_EXIT_CODE);
737 }
738
739 /* Remember that we allocated this memory. */
740 G.bytes_mapped += size;
741
742 /* Pretend we don't have access to the allocated pages. We'll enable
743 access to smaller pieces of the area in ggc_internal_alloc. Discard the
744 handle to avoid handle leak. */
745 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
746
747 return page;
748}
749#endif
750#ifdef USING_MALLOC_PAGE_GROUPS
751/* Compute the index for this page into the page group. */
752
753static inline size_t
754page_group_index (char *allocation, char *page)
755{
756 return (size_t) (page - allocation) >> G.lg_pagesize;
757}
758
759/* Set and clear the in_use bit for this page in the page group. */
760
761static inline void
762set_page_group_in_use (page_group *group, char *page)
763{
764 group->in_use |= 1 << page_group_index (group->allocation, page);
765}
766
767static inline void
768clear_page_group_in_use (page_group *group, char *page)
769{
770 group->in_use &= ~(1 << page_group_index (group->allocation, page));
771}
772#endif
773
774/* Find a free list for ENTRY_SIZE. */
775
776static inline struct free_list *
777find_free_list (size_t entry_size)
778{
779 int i;
780 for (i = 1; i < num_free_list; i++)
781 {
782 if (G.free_lists[i].bytes == entry_size)
783 return &G.free_lists[i];
784 if (G.free_lists[i].bytes == 0)
785 {
786 G.free_lists[i].bytes = entry_size;
787 return &G.free_lists[i];
788 }
789 }
790 /* Fallback. Does this happen? */
791 if (GATHER_STATISTICS)
792 G.stats.fallback++;
793 return &G.free_lists[0];
794}
795
796/* Fast lookup of free_list by order. */
797
798static struct free_list *cache_free_list[NUM_ORDERS];
799
800/* Faster way to find a free list by ORDER for BYTES. */
801
802static inline struct free_list *
803find_free_list_order (unsigned order, size_t bytes)
804{
805 if (cache_free_list[order] == NULL)
806 cache_free_list[order] = find_free_list (entry_size: bytes);
807 return cache_free_list[order];
808}
809
810/* Allocate a new page for allocating objects of size 2^ORDER,
811 and return an entry for it. The entry is not added to the
812 appropriate page_table list. */
813
814static inline struct page_entry *
815alloc_page (unsigned order)
816{
817 struct page_entry *entry, *p, **pp;
818 char *page;
819 size_t num_objects;
820 size_t bitmap_size;
821 size_t page_entry_size;
822 size_t entry_size;
823#ifdef USING_MALLOC_PAGE_GROUPS
824 page_group *group;
825#endif
826 struct free_list *free_list;
827
828 num_objects = OBJECTS_PER_PAGE (order);
829 bitmap_size = BITMAP_SIZE (num_objects + 1);
830 page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
831 entry_size = num_objects * OBJECT_SIZE (order);
832 if (entry_size < G.pagesize)
833 entry_size = G.pagesize;
834 entry_size = PAGE_ALIGN (entry_size);
835
836 entry = NULL;
837 page = NULL;
838
839 free_list = find_free_list_order (order, bytes: entry_size);
840
841 /* Check the list of free pages for one we can use. */
842 for (pp = &free_list->free_pages, p = *pp; p; pp = &p->next, p = *pp)
843 if (p->bytes == entry_size)
844 break;
845
846 if (p != NULL)
847 {
848 if (p->discarded)
849 G.bytes_mapped += p->bytes;
850 p->discarded = false;
851
852 /* Recycle the allocated memory from this page ... */
853 *pp = p->next;
854 page = p->page;
855
856#ifdef USING_MALLOC_PAGE_GROUPS
857 group = p->group;
858#endif
859
860 /* ... and, if possible, the page entry itself. */
861 if (p->order == order)
862 {
863 entry = p;
864 memset (s: entry, c: 0, n: page_entry_size);
865 }
866 else
867 free (ptr: p);
868 }
869#ifdef USING_MMAP
870 else if (entry_size == G.pagesize)
871 {
872 /* We want just one page. Allocate a bunch of them and put the
873 extras on the freelist. (Can only do this optimization with
874 mmap for backing store.) */
875 struct page_entry *e, *f = free_list->free_pages;
876 int i, entries = GGC_QUIRE_SIZE;
877
878 page = alloc_anon (NULL, size: G.pagesize * GGC_QUIRE_SIZE, check: false);
879 if (page == NULL)
880 {
881 page = alloc_anon (NULL, size: G.pagesize, check: true);
882 entries = 1;
883 }
884
885 /* This loop counts down so that the chain will be in ascending
886 memory order. */
887 for (i = entries - 1; i >= 1; i--)
888 {
889 e = XCNEWVAR (struct page_entry, page_entry_size);
890 e->order = order;
891 e->bytes = G.pagesize;
892 e->free_list = free_list;
893 e->page = page + (i << G.lg_pagesize);
894 e->next = f;
895 f = e;
896 }
897
898 free_list->free_pages = f;
899 }
900 else
901 page = alloc_anon (NULL, size: entry_size, check: true);
902#endif
903#ifdef USING_MALLOC_PAGE_GROUPS
904 else
905 {
906 /* Allocate a large block of memory and serve out the aligned
907 pages therein. This results in much less memory wastage
908 than the traditional implementation of valloc. */
909
910 char *allocation, *a, *enda;
911 size_t alloc_size, head_slop, tail_slop;
912 int multiple_pages = (entry_size == G.pagesize);
913
914 if (multiple_pages)
915 alloc_size = GGC_QUIRE_SIZE * G.pagesize;
916 else
917 alloc_size = entry_size + G.pagesize - 1;
918 allocation = XNEWVEC (char, alloc_size);
919
920 page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
921 head_slop = page - allocation;
922 if (multiple_pages)
923 tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
924 else
925 tail_slop = alloc_size - entry_size - head_slop;
926 enda = allocation + alloc_size - tail_slop;
927
928 /* We allocated N pages, which are likely not aligned, leaving
929 us with N-1 usable pages. We plan to place the page_group
930 structure somewhere in the slop. */
931 if (head_slop >= sizeof (page_group))
932 group = (page_group *)page - 1;
933 else
934 {
935 /* We magically got an aligned allocation. Too bad, we have
936 to waste a page anyway. */
937 if (tail_slop == 0)
938 {
939 enda -= G.pagesize;
940 tail_slop += G.pagesize;
941 }
942 gcc_assert (tail_slop >= sizeof (page_group));
943 group = (page_group *)enda;
944 tail_slop -= sizeof (page_group);
945 }
946
947 /* Remember that we allocated this memory. */
948 group->next = G.page_groups;
949 group->allocation = allocation;
950 group->alloc_size = alloc_size;
951 group->in_use = 0;
952 G.page_groups = group;
953 G.bytes_mapped += alloc_size;
954
955 /* If we allocated multiple pages, put the rest on the free list. */
956 if (multiple_pages)
957 {
958 struct page_entry *e, *f = free_list->free_pages;
959 for (a = enda - G.pagesize; a != page; a -= G.pagesize)
960 {
961 e = XCNEWVAR (struct page_entry, page_entry_size);
962 e->order = order;
963 e->bytes = G.pagesize;
964 e->free_list = free_list;
965 e->page = a;
966 e->group = group;
967 e->next = f;
968 f = e;
969 }
970 free_list->free_pages = f;
971 }
972 }
973#endif
974
975 if (entry == NULL)
976 entry = XCNEWVAR (struct page_entry, page_entry_size);
977
978 entry->bytes = entry_size;
979 entry->free_list = free_list;
980 entry->page = page;
981 entry->context_depth = G.context_depth;
982 entry->order = order;
983 entry->num_free_objects = num_objects;
984 entry->next_bit_hint = 1;
985
986 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
987
988#ifdef USING_MALLOC_PAGE_GROUPS
989 entry->group = group;
990 set_page_group_in_use (group, page);
991#endif
992
993 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
994 increment the hint. */
995 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
996 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
997
998 set_page_table_entry (p: page, entry);
999
1000 if (GGC_DEBUG_LEVEL >= 2)
1001 fprintf (stream: G.debug_file,
1002 format: "Allocating page at %p, object size="
1003 HOST_SIZE_T_PRINT_UNSIGNED ", data %p-%p\n",
1004 (void *) entry, (fmt_size_t) OBJECT_SIZE (order),
1005 (void *) page, (void *) (page + entry_size - 1));
1006
1007 return entry;
1008}
1009
1010/* Adjust the size of G.depth so that no index greater than the one
1011 used by the top of the G.by_depth is used. */
1012
1013static inline void
1014adjust_depth (void)
1015{
1016 page_entry *top;
1017
1018 if (G.by_depth_in_use)
1019 {
1020 top = G.by_depth[G.by_depth_in_use-1];
1021
1022 /* Peel back indices in depth that index into by_depth, so that
1023 as new elements are added to by_depth, we note the indices
1024 of those elements, if they are for new context depths. */
1025 while (G.depth_in_use > (size_t)top->context_depth+1)
1026 --G.depth_in_use;
1027 }
1028}
1029
1030/* For a page that is no longer needed, put it on the free page list. */
1031
1032static void
1033free_page (page_entry *entry)
1034{
1035 if (GGC_DEBUG_LEVEL >= 2)
1036 fprintf (stream: G.debug_file,
1037 format: "Deallocating page at %p, data %p-%p\n", (void *) entry,
1038 (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
1039
1040 /* Mark the page as inaccessible. Discard the handle to avoid handle
1041 leak. */
1042 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
1043
1044 set_page_table_entry (p: entry->page, NULL);
1045
1046#ifdef USING_MALLOC_PAGE_GROUPS
1047 clear_page_group_in_use (entry->group, entry->page);
1048#endif
1049
1050 if (G.by_depth_in_use > 1)
1051 {
1052 page_entry *top = G.by_depth[G.by_depth_in_use-1];
1053 int i = entry->index_by_depth;
1054
1055 /* We cannot free a page from a context deeper than the current
1056 one. */
1057 gcc_assert (entry->context_depth == top->context_depth);
1058
1059 /* Put top element into freed slot. */
1060 G.by_depth[i] = top;
1061 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1062 top->index_by_depth = i;
1063 }
1064 --G.by_depth_in_use;
1065
1066 adjust_depth ();
1067
1068 struct free_list *free_list = entry->free_list;
1069 entry->next = free_list->free_pages;
1070 free_list->free_pages = entry;
1071}
1072
1073/* Release the free page cache for FREE_LIST to the system. */
1074
1075static void
1076do_release_pages (struct free_list *free_list, size_t &n1, size_t &n2)
1077{
1078 (void) n1;
1079 (void) n2;
1080#ifdef USING_MADVISE
1081 page_entry *p, *start_p;
1082 char *start;
1083 size_t len;
1084 size_t mapped_len;
1085 page_entry *next, *prev, *newprev;
1086 size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1087
1088 /* First free larger continuous areas to the OS.
1089 This allows other allocators to grab these areas if needed.
1090 This is only done on larger chunks to avoid fragmentation.
1091 This does not always work because the free_pages list is only
1092 approximately sorted. */
1093
1094 p = free_list->free_pages;
1095 prev = NULL;
1096 while (p)
1097 {
1098 start = p->page;
1099 start_p = p;
1100 len = 0;
1101 mapped_len = 0;
1102 newprev = prev;
1103 while (p && p->page == start + len)
1104 {
1105 len += p->bytes;
1106 if (!p->discarded)
1107 mapped_len += p->bytes;
1108 newprev = p;
1109 p = p->next;
1110 }
1111 if (len >= free_unit)
1112 {
1113 while (start_p != p)
1114 {
1115 next = start_p->next;
1116 free (ptr: start_p);
1117 start_p = next;
1118 }
1119 munmap (addr: start, len: len);
1120 if (prev)
1121 prev->next = p;
1122 else
1123 free_list->free_pages = p;
1124 G.bytes_mapped -= mapped_len;
1125 n1 += len;
1126 continue;
1127 }
1128 prev = newprev;
1129 }
1130
1131 /* Now give back the fragmented pages to the OS, but keep the address
1132 space to reuse it next time. */
1133
1134 for (p = free_list->free_pages; p; )
1135 {
1136 if (p->discarded)
1137 {
1138 p = p->next;
1139 continue;
1140 }
1141 start = p->page;
1142 len = p->bytes;
1143 start_p = p;
1144 p = p->next;
1145 while (p && p->page == start + len)
1146 {
1147 len += p->bytes;
1148 p = p->next;
1149 }
1150 /* Give the page back to the kernel, but don't free the mapping.
1151 This avoids fragmentation in the virtual memory map of the
1152 process. Next time we can reuse it by just touching it. */
1153 madvise (addr: start, len: len, MADV_DONTNEED);
1154 /* Don't count those pages as mapped to not touch the garbage collector
1155 unnecessarily. */
1156 G.bytes_mapped -= len;
1157 n2 += len;
1158 while (start_p != p)
1159 {
1160 start_p->discarded = true;
1161 start_p = start_p->next;
1162 }
1163 }
1164#endif
1165#if defined(USING_MMAP) && !defined(USING_MADVISE)
1166 page_entry *p, *next;
1167 char *start;
1168 size_t len;
1169
1170 /* Gather up adjacent pages so they are unmapped together. */
1171 p = free_list->free_pages;
1172
1173 while (p)
1174 {
1175 start = p->page;
1176 next = p->next;
1177 len = p->bytes;
1178 free (p);
1179 p = next;
1180
1181 while (p && p->page == start + len)
1182 {
1183 next = p->next;
1184 len += p->bytes;
1185 free (p);
1186 p = next;
1187 }
1188
1189 munmap (start, len);
1190 n1 += len;
1191 G.bytes_mapped -= len;
1192 }
1193
1194 free_list->free_pages = NULL;
1195#endif
1196#ifdef USING_MALLOC_PAGE_GROUPS
1197 page_entry **pp, *p;
1198
1199 /* Remove all pages from free page groups from the list. */
1200 pp = &free_list->free_pages;
1201 while ((p = *pp) != NULL)
1202 if (p->group->in_use == 0)
1203 {
1204 *pp = p->next;
1205 free (p);
1206 }
1207 else
1208 pp = &p->next;
1209#endif
1210}
1211
1212/* Release the free page cache to the system. */
1213
1214static void
1215release_pages ()
1216{
1217 size_t n1 = 0;
1218 size_t n2 = 0;
1219 for (int i = 0; i < num_free_list; i++)
1220 do_release_pages (free_list: &G.free_lists[i], n1, n2);
1221#ifdef USING_MALLOC_PAGE_GROUPS
1222 page_group **gp, *g;
1223
1224 /* Remove all free page groups, and release the storage. */
1225 gp = &G.page_groups;
1226 while ((g = *gp) != NULL)
1227 if (g->in_use == 0)
1228 {
1229 *gp = g->next;
1230 G.bytes_mapped -= g->alloc_size;
1231 n1 += g->alloc_size;
1232 free (g->allocation);
1233 }
1234 else
1235 gp = &g->next;
1236#endif
1237 if (!quiet_flag && (n1 || n2))
1238 {
1239 fprintf (stderr, format: " {GC");
1240 if (n1)
1241 fprintf (stderr, format: " released " PRsa (0), SIZE_AMOUNT (n1));
1242 if (n2)
1243 fprintf (stderr, format: " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
1244 fprintf (stderr, format: "}");
1245 }
1246}
1247
1248/* This table provides a fast way to determine ceil(log_2(size)) for
1249 allocation requests. The minimum allocation size is eight bytes. */
1250#define NUM_SIZE_LOOKUP 512
1251static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1252{
1253 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1254 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1255 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1256 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1257 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1258 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1259 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1260 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1261 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1262 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1263 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1264 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1265 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1266 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1267 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1268 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1269 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1270 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1271 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1272 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1273 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1274 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1275 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1276 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1277 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1278 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1279 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1280 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1281 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1282 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1283 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1284 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1285};
1286
1287/* For a given size of memory requested for allocation, return the
1288 actual size that is going to be allocated, as well as the size
1289 order. */
1290
1291static void
1292ggc_round_alloc_size_1 (size_t requested_size,
1293 size_t *size_order,
1294 size_t *alloced_size)
1295{
1296 size_t order, object_size;
1297
1298 if (requested_size < NUM_SIZE_LOOKUP)
1299 {
1300 order = size_lookup[requested_size];
1301 object_size = OBJECT_SIZE (order);
1302 }
1303 else
1304 {
1305 order = 10;
1306 while (requested_size > (object_size = OBJECT_SIZE (order)))
1307 order++;
1308 }
1309
1310 if (size_order)
1311 *size_order = order;
1312 if (alloced_size)
1313 *alloced_size = object_size;
1314}
1315
1316/* For a given size of memory requested for allocation, return the
1317 actual size that is going to be allocated. */
1318
1319size_t
1320ggc_round_alloc_size (size_t requested_size)
1321{
1322 size_t size = 0;
1323
1324 ggc_round_alloc_size_1 (requested_size, NULL, alloced_size: &size);
1325 return size;
1326}
1327
1328/* Push a finalizer onto the appropriate vec. */
1329
1330static void
1331add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1332{
1333 if (f == NULL)
1334 /* No finalizer. */;
1335 else if (n == 1)
1336 {
1337 finalizer fin (result, f);
1338 G.finalizers[G.context_depth].safe_push (obj: fin);
1339 }
1340 else
1341 {
1342 vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1343 G.vec_finalizers[G.context_depth].safe_push (obj: fin);
1344 }
1345}
1346
1347/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1348
1349#ifdef HAVE_ATTRIBUTE_ALIAS
1350extern "C" void *
1351ggc_internal_alloc_ (size_t size, void (*f)(void *), size_t s, size_t n
1352 MEM_STAT_DECL)
1353#else
1354void *
1355ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1356 MEM_STAT_DECL)
1357#endif
1358{
1359 size_t order, word, bit, object_offset, object_size;
1360 struct page_entry *entry;
1361 void *result;
1362
1363 ggc_round_alloc_size_1 (requested_size: size, size_order: &order, alloced_size: &object_size);
1364
1365 /* If there are non-full pages for this size allocation, they are at
1366 the head of the list. */
1367 entry = G.pages[order];
1368
1369 /* If there is no page for this object size, or all pages in this
1370 context are full, allocate a new page. */
1371 if (entry == NULL || entry->num_free_objects == 0)
1372 {
1373 struct page_entry *new_entry;
1374 new_entry = alloc_page (order);
1375
1376 new_entry->index_by_depth = G.by_depth_in_use;
1377 push_by_depth (p: new_entry, s: 0);
1378
1379 /* We can skip context depths, if we do, make sure we go all the
1380 way to the new depth. */
1381 while (new_entry->context_depth >= G.depth_in_use)
1382 push_depth (i: G.by_depth_in_use-1);
1383
1384 /* If this is the only entry, it's also the tail. If it is not
1385 the only entry, then we must update the PREV pointer of the
1386 ENTRY (G.pages[order]) to point to our new page entry. */
1387 if (entry == NULL)
1388 G.page_tails[order] = new_entry;
1389 else
1390 entry->prev = new_entry;
1391
1392 /* Put new pages at the head of the page list. By definition the
1393 entry at the head of the list always has a NULL pointer. */
1394 new_entry->next = entry;
1395 new_entry->prev = NULL;
1396 entry = new_entry;
1397 G.pages[order] = new_entry;
1398
1399 /* For a new page, we know the word and bit positions (in the
1400 in_use bitmap) of the first available object -- they're zero. */
1401 new_entry->next_bit_hint = 1;
1402 word = 0;
1403 bit = 0;
1404 object_offset = 0;
1405 }
1406 else
1407 {
1408 /* First try to use the hint left from the previous allocation
1409 to locate a clear bit in the in-use bitmap. We've made sure
1410 that the one-past-the-end bit is always set, so if the hint
1411 has run over, this test will fail. */
1412 unsigned hint = entry->next_bit_hint;
1413 word = hint / HOST_BITS_PER_LONG;
1414 bit = hint % HOST_BITS_PER_LONG;
1415
1416 /* If the hint didn't work, scan the bitmap from the beginning. */
1417 if ((entry->in_use_p[word] >> bit) & 1)
1418 {
1419 word = bit = 0;
1420 while (~entry->in_use_p[word] == 0)
1421 ++word;
1422
1423#if GCC_VERSION >= 3004
1424 bit = __builtin_ctzl (~entry->in_use_p[word]);
1425#else
1426 while ((entry->in_use_p[word] >> bit) & 1)
1427 ++bit;
1428#endif
1429
1430 hint = word * HOST_BITS_PER_LONG + bit;
1431 }
1432
1433 /* Next time, try the next bit. */
1434 entry->next_bit_hint = hint + 1;
1435
1436 object_offset = hint * object_size;
1437 }
1438
1439 /* Set the in-use bit. */
1440 entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1441
1442 /* Keep a running total of the number of free objects. If this page
1443 fills up, we may have to move it to the end of the list if the
1444 next page isn't full. If the next page is full, all subsequent
1445 pages are full, so there's no need to move it. */
1446 if (--entry->num_free_objects == 0
1447 && entry->next != NULL
1448 && entry->next->num_free_objects > 0)
1449 {
1450 /* We have a new head for the list. */
1451 G.pages[order] = entry->next;
1452
1453 /* We are moving ENTRY to the end of the page table list.
1454 The new page at the head of the list will have NULL in
1455 its PREV field and ENTRY will have NULL in its NEXT field. */
1456 entry->next->prev = NULL;
1457 entry->next = NULL;
1458
1459 /* Append ENTRY to the tail of the list. */
1460 entry->prev = G.page_tails[order];
1461 G.page_tails[order]->next = entry;
1462 G.page_tails[order] = entry;
1463 }
1464
1465 /* Calculate the object's address. */
1466 result = entry->page + object_offset;
1467 if (GATHER_STATISTICS)
1468 ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1469 result FINAL_PASS_MEM_STAT);
1470
1471#ifdef ENABLE_GC_CHECKING
1472 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1473 exact same semantics in presence of memory bugs, regardless of
1474 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1475 handle to avoid handle leak. */
1476 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1477
1478 /* `Poison' the entire allocated object, including any padding at
1479 the end. */
1480 memset (s: result, c: 0xaf, n: object_size);
1481
1482 /* Make the bytes after the end of the object unaccessible. Discard the
1483 handle to avoid handle leak. */
1484 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1485 object_size - size));
1486#endif
1487
1488 /* Tell Valgrind that the memory is there, but its content isn't
1489 defined. The bytes at the end of the object are still marked
1490 unaccessible. */
1491 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1492
1493 /* Keep track of how many bytes are being allocated. This
1494 information is used in deciding when to collect. */
1495 G.allocated += object_size;
1496
1497 /* For timevar statistics. */
1498 timevar_ggc_mem_total += object_size;
1499
1500 if (f)
1501 add_finalizer (result, f, s, n);
1502
1503 if (GATHER_STATISTICS)
1504 {
1505 size_t overhead = object_size - size;
1506
1507 G.stats.total_overhead += overhead;
1508 G.stats.total_allocated += object_size;
1509 G.stats.total_overhead_per_order[order] += overhead;
1510 G.stats.total_allocated_per_order[order] += object_size;
1511
1512 if (size <= 32)
1513 {
1514 G.stats.total_overhead_under32 += overhead;
1515 G.stats.total_allocated_under32 += object_size;
1516 }
1517 if (size <= 64)
1518 {
1519 G.stats.total_overhead_under64 += overhead;
1520 G.stats.total_allocated_under64 += object_size;
1521 }
1522 if (size <= 128)
1523 {
1524 G.stats.total_overhead_under128 += overhead;
1525 G.stats.total_allocated_under128 += object_size;
1526 }
1527 }
1528
1529 if (GGC_DEBUG_LEVEL >= 3)
1530 fprintf (stream: G.debug_file,
1531 format: "Allocating object, requested size="
1532 HOST_SIZE_T_PRINT_UNSIGNED ", actual=" HOST_SIZE_T_PRINT_UNSIGNED
1533 " at %p on %p\n",
1534 (fmt_size_t) size, (fmt_size_t) object_size, result,
1535 (void *) entry);
1536
1537 return result;
1538}
1539
1540#ifdef HAVE_ATTRIBUTE_ALIAS
1541extern void *
1542ggc_internal_alloc (size_t size, void (*f)(void *), size_t s,
1543 size_t n MEM_STAT_DECL)
1544 __attribute__((__alias__ ("ggc_internal_alloc_")));
1545extern void *
1546ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1547 size_t n MEM_STAT_DECL)
1548 __attribute__((__alias__ ("ggc_internal_alloc_")));
1549#else
1550#ifdef __GNUC__
1551__attribute__ ((__noinline__))
1552#endif
1553void *
1554ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1555 size_t n MEM_STAT_DECL)
1556{
1557 return ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
1558}
1559#endif
1560
1561/* Mark function for strings. */
1562
1563void
1564gt_ggc_m_S (const void *p)
1565{
1566 page_entry *entry;
1567 unsigned bit, word;
1568 unsigned long mask;
1569 unsigned long offset;
1570
1571 if (!p)
1572 return;
1573
1574 /* Look up the page on which the object is alloced. If it was not
1575 GC allocated, gracefully bail out. */
1576 entry = safe_lookup_page_table_entry (p);
1577 if (!entry)
1578 return;
1579
1580 /* Calculate the index of the object on the page; this is its bit
1581 position in the in_use_p bitmap. Note that because a char* might
1582 point to the middle of an object, we need special code here to
1583 make sure P points to the start of an object. */
1584 offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1585 if (offset)
1586 {
1587 /* Here we've seen a char* which does not point to the beginning
1588 of an allocated object. We assume it points to the middle of
1589 a STRING_CST. */
1590 gcc_assert (offset == offsetof (struct tree_string, str));
1591 p = ((const char *) p) - offset;
1592 gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1593 return;
1594 }
1595
1596 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1597 word = bit / HOST_BITS_PER_LONG;
1598 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1599
1600 /* If the bit was previously set, skip it. */
1601 if (entry->in_use_p[word] & mask)
1602 return;
1603
1604 /* Otherwise set it, and decrement the free object count. */
1605 entry->in_use_p[word] |= mask;
1606 entry->num_free_objects -= 1;
1607
1608 if (GGC_DEBUG_LEVEL >= 4)
1609 fprintf (stream: G.debug_file, format: "Marking %p\n", p);
1610
1611 return;
1612}
1613
1614
1615/* User-callable entry points for marking string X. */
1616
1617void
1618gt_ggc_mx (const char *& x)
1619{
1620 gt_ggc_m_S (p: x);
1621}
1622
1623void
1624gt_ggc_mx (char *& x)
1625{
1626 gt_ggc_m_S (p: x);
1627}
1628
1629void
1630gt_ggc_mx (unsigned char *& x)
1631{
1632 gt_ggc_m_S (p: x);
1633}
1634
1635void
1636gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1637{
1638}
1639
1640/* If P is not marked, marks it and return false. Otherwise return true.
1641 P must have been allocated by the GC allocator; it mustn't point to
1642 static objects, stack variables, or memory allocated with malloc. */
1643
1644bool
1645ggc_set_mark (const void *p)
1646{
1647 page_entry *entry;
1648 unsigned bit, word;
1649 unsigned long mask;
1650
1651 /* Look up the page on which the object is alloced. If the object
1652 wasn't allocated by the collector, we'll probably die. */
1653 entry = lookup_page_table_entry (p);
1654 gcc_assert (entry);
1655
1656 /* Calculate the index of the object on the page; this is its bit
1657 position in the in_use_p bitmap. */
1658 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1659 word = bit / HOST_BITS_PER_LONG;
1660 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1661
1662 /* If the bit was previously set, skip it. */
1663 if (entry->in_use_p[word] & mask)
1664 return true;
1665
1666 /* Otherwise set it, and decrement the free object count. */
1667 entry->in_use_p[word] |= mask;
1668 entry->num_free_objects -= 1;
1669
1670 if (GGC_DEBUG_LEVEL >= 4)
1671 fprintf (stream: G.debug_file, format: "Marking %p\n", p);
1672
1673 return false;
1674}
1675
1676/* Return true if P has been marked, zero otherwise.
1677 P must have been allocated by the GC allocator; it mustn't point to
1678 static objects, stack variables, or memory allocated with malloc. */
1679
1680bool
1681ggc_marked_p (const void *p)
1682{
1683 page_entry *entry;
1684 unsigned bit, word;
1685 unsigned long mask;
1686
1687 /* Look up the page on which the object is alloced. If the object
1688 wasn't allocated by the collector, we'll probably die. */
1689 entry = lookup_page_table_entry (p);
1690 gcc_assert (entry);
1691
1692 /* Calculate the index of the object on the page; this is its bit
1693 position in the in_use_p bitmap. */
1694 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1695 word = bit / HOST_BITS_PER_LONG;
1696 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1697
1698 return (entry->in_use_p[word] & mask) != 0;
1699}
1700
1701/* Return the size of the gc-able object P. */
1702
1703size_t
1704ggc_get_size (const void *p)
1705{
1706 page_entry *pe = lookup_page_table_entry (p);
1707 return OBJECT_SIZE (pe->order);
1708}
1709
1710/* Release the memory for object P. */
1711
1712void
1713ggc_free (void *p)
1714{
1715 if (in_gc)
1716 return;
1717
1718 page_entry *pe = lookup_page_table_entry (p);
1719 size_t order = pe->order;
1720 size_t size = OBJECT_SIZE (order);
1721
1722 if (GATHER_STATISTICS)
1723 ggc_free_overhead (p);
1724
1725 if (GGC_DEBUG_LEVEL >= 3)
1726 fprintf (stream: G.debug_file,
1727 format: "Freeing object, actual size="
1728 HOST_SIZE_T_PRINT_UNSIGNED ", at %p on %p\n",
1729 (fmt_size_t) size, p, (void *) pe);
1730
1731#ifdef ENABLE_GC_CHECKING
1732 /* Poison the data, to indicate the data is garbage. */
1733 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1734 memset (s: p, c: 0xa5, n: size);
1735#endif
1736 /* Let valgrind know the object is free. */
1737 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1738
1739#ifdef ENABLE_GC_ALWAYS_COLLECT
1740 /* In the completely-anal-checking mode, we do *not* immediately free
1741 the data, but instead verify that the data is *actually* not
1742 reachable the next time we collect. */
1743 {
1744 struct free_object *fo = XNEW (struct free_object);
1745 fo->object = p;
1746 fo->next = G.free_object_list;
1747 G.free_object_list = fo;
1748 }
1749#else
1750 {
1751 unsigned int bit_offset, word, bit;
1752
1753 G.allocated -= size;
1754
1755 /* Mark the object not-in-use. */
1756 bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1757 word = bit_offset / HOST_BITS_PER_LONG;
1758 bit = bit_offset % HOST_BITS_PER_LONG;
1759 pe->in_use_p[word] &= ~(1UL << bit);
1760
1761 if (pe->num_free_objects++ == 0)
1762 {
1763 page_entry *p, *q;
1764
1765 /* If the page is completely full, then it's supposed to
1766 be after all pages that aren't. Since we've freed one
1767 object from a page that was full, we need to move the
1768 page to the head of the list.
1769
1770 PE is the node we want to move. Q is the previous node
1771 and P is the next node in the list. */
1772 q = pe->prev;
1773 if (q && q->num_free_objects == 0)
1774 {
1775 p = pe->next;
1776
1777 q->next = p;
1778
1779 /* If PE was at the end of the list, then Q becomes the
1780 new end of the list. If PE was not the end of the
1781 list, then we need to update the PREV field for P. */
1782 if (!p)
1783 G.page_tails[order] = q;
1784 else
1785 p->prev = q;
1786
1787 /* Move PE to the head of the list. */
1788 pe->next = G.pages[order];
1789 pe->prev = NULL;
1790 G.pages[order]->prev = pe;
1791 G.pages[order] = pe;
1792 }
1793
1794 /* Reset the hint bit to point to the only free object. */
1795 pe->next_bit_hint = bit_offset;
1796 }
1797 }
1798#endif
1799}
1800
1801/* Subroutine of init_ggc which computes the pair of numbers used to
1802 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1803
1804 This algorithm is taken from Granlund and Montgomery's paper
1805 "Division by Invariant Integers using Multiplication"
1806 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1807 constants). */
1808
1809static void
1810compute_inverse (unsigned order)
1811{
1812 size_t size, inv;
1813 unsigned int e;
1814
1815 size = OBJECT_SIZE (order);
1816 e = 0;
1817 while (size % 2 == 0)
1818 {
1819 e++;
1820 size >>= 1;
1821 }
1822
1823 inv = size;
1824 while (inv * size != 1)
1825 inv = inv * (2 - inv*size);
1826
1827 DIV_MULT (order) = inv;
1828 DIV_SHIFT (order) = e;
1829}
1830
1831/* Initialize the ggc-mmap allocator. */
1832void
1833init_ggc (void)
1834{
1835 static bool init_p = false;
1836 unsigned order;
1837
1838 if (init_p)
1839 return;
1840 init_p = true;
1841
1842 G.pagesize = getpagesize ();
1843 G.lg_pagesize = exact_log2 (x: G.pagesize);
1844
1845#ifdef HAVE_MMAP_DEV_ZERO
1846 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1847 if (G.dev_zero_fd == -1)
1848 internal_error ("open /dev/zero: %m");
1849#endif
1850
1851#if 0
1852 G.debug_file = fopen ("ggc-mmap.debug", "w");
1853#else
1854 G.debug_file = stdout;
1855#endif
1856
1857#ifdef USING_MMAP
1858 /* StunOS has an amazing off-by-one error for the first mmap allocation
1859 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1860 believe, is an unaligned page allocation, which would cause us to
1861 hork badly if we tried to use it. */
1862 {
1863 char *p = alloc_anon (NULL, size: G.pagesize, check: true);
1864 struct page_entry *e;
1865 if ((uintptr_t)p & (G.pagesize - 1))
1866 {
1867 /* How losing. Discard this one and try another. If we still
1868 can't get something useful, give up. */
1869
1870 p = alloc_anon (NULL, size: G.pagesize, check: true);
1871 gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1872 }
1873
1874 /* We have a good page, might as well hold onto it... */
1875 e = XCNEW (struct page_entry);
1876 e->bytes = G.pagesize;
1877 e->free_list = find_free_list (entry_size: G.pagesize);
1878 e->page = p;
1879 e->next = e->free_list->free_pages;
1880 e->free_list->free_pages = e;
1881 }
1882#endif
1883
1884 /* Initialize the object size table. */
1885 for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1886 object_size_table[order] = (size_t) 1 << order;
1887 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1888 {
1889 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1890
1891 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1892 so that we're sure of getting aligned memory. */
1893 s = ROUND_UP (s, MAX_ALIGNMENT);
1894 object_size_table[order] = s;
1895 }
1896
1897 /* Initialize the objects-per-page and inverse tables. */
1898 for (order = 0; order < NUM_ORDERS; ++order)
1899 {
1900 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1901 if (objects_per_page_table[order] == 0)
1902 objects_per_page_table[order] = 1;
1903 compute_inverse (order);
1904 }
1905
1906 /* Reset the size_lookup array to put appropriately sized objects in
1907 the special orders. All objects bigger than the previous power
1908 of two, but no greater than the special size, should go in the
1909 new order. */
1910 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1911 {
1912 int o;
1913 int i;
1914
1915 i = OBJECT_SIZE (order);
1916 if (i >= NUM_SIZE_LOOKUP)
1917 continue;
1918
1919 for (o = size_lookup[i]; o == size_lookup [i]; --i)
1920 size_lookup[i] = order;
1921 }
1922
1923 G.depth_in_use = 0;
1924 G.depth_max = 10;
1925 G.depth = XNEWVEC (unsigned int, G.depth_max);
1926
1927 G.by_depth_in_use = 0;
1928 G.by_depth_max = INITIAL_PTE_COUNT;
1929 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1930 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1931
1932 /* Allocate space for the depth 0 finalizers. */
1933 G.finalizers.safe_push (obj: vNULL);
1934 G.vec_finalizers.safe_push (obj: vNULL);
1935 gcc_assert (G.finalizers.length() == 1);
1936}
1937
1938/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1939 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1940
1941static void
1942ggc_recalculate_in_use_p (page_entry *p)
1943{
1944 unsigned int i;
1945 size_t num_objects;
1946
1947 /* Because the past-the-end bit in in_use_p is always set, we
1948 pretend there is one additional object. */
1949 num_objects = OBJECTS_IN_PAGE (p) + 1;
1950
1951 /* Reset the free object count. */
1952 p->num_free_objects = num_objects;
1953
1954 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1955 for (i = 0;
1956 i < CEIL (BITMAP_SIZE (num_objects),
1957 sizeof (*p->in_use_p));
1958 ++i)
1959 {
1960 unsigned long j;
1961
1962 /* Something is in use if it is marked, or if it was in use in a
1963 context further down the context stack. */
1964 p->in_use_p[i] |= save_in_use_p (p)[i];
1965
1966 /* Decrement the free object count for every object allocated. */
1967 for (j = p->in_use_p[i]; j; j >>= 1)
1968 p->num_free_objects -= (j & 1);
1969 }
1970
1971 gcc_assert (p->num_free_objects < num_objects);
1972}
1973
1974/* Unmark all objects. */
1975
1976static void
1977clear_marks (void)
1978{
1979 unsigned order;
1980
1981 for (order = 2; order < NUM_ORDERS; order++)
1982 {
1983 page_entry *p;
1984
1985 for (p = G.pages[order]; p != NULL; p = p->next)
1986 {
1987 size_t num_objects = OBJECTS_IN_PAGE (p);
1988 size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1989
1990 /* The data should be page-aligned. */
1991 gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1992
1993 /* Pages that aren't in the topmost context are not collected;
1994 nevertheless, we need their in-use bit vectors to store GC
1995 marks. So, back them up first. */
1996 if (p->context_depth < G.context_depth)
1997 {
1998 if (! save_in_use_p (p))
1999 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
2000 memcpy (save_in_use_p (p), src: p->in_use_p, n: bitmap_size);
2001 }
2002
2003 /* Reset reset the number of free objects and clear the
2004 in-use bits. These will be adjusted by mark_obj. */
2005 p->num_free_objects = num_objects;
2006 memset (s: p->in_use_p, c: 0, n: bitmap_size);
2007
2008 /* Make sure the one-past-the-end bit is always set. */
2009 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
2010 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
2011 }
2012 }
2013}
2014
2015/* Check if any blocks with a registered finalizer have become unmarked. If so
2016 run the finalizer and unregister it because the block is about to be freed.
2017 Note that no guarantee is made about what order finalizers will run in so
2018 touching other objects in gc memory is extremely unwise. */
2019
2020static void
2021ggc_handle_finalizers ()
2022{
2023 unsigned dlen = G.finalizers.length();
2024 for (unsigned d = G.context_depth; d < dlen; ++d)
2025 {
2026 vec<finalizer> &v = G.finalizers[d];
2027 unsigned length = v.length ();
2028 for (unsigned int i = 0; i < length;)
2029 {
2030 finalizer &f = v[i];
2031 if (!ggc_marked_p (p: f.addr ()))
2032 {
2033 f.call ();
2034 v.unordered_remove (ix: i);
2035 length--;
2036 }
2037 else
2038 i++;
2039 }
2040 }
2041
2042 gcc_assert (dlen == G.vec_finalizers.length());
2043 for (unsigned d = G.context_depth; d < dlen; ++d)
2044 {
2045 vec<vec_finalizer> &vv = G.vec_finalizers[d];
2046 unsigned length = vv.length ();
2047 for (unsigned int i = 0; i < length;)
2048 {
2049 vec_finalizer &f = vv[i];
2050 if (!ggc_marked_p (p: f.addr ()))
2051 {
2052 f.call ();
2053 vv.unordered_remove (ix: i);
2054 length--;
2055 }
2056 else
2057 i++;
2058 }
2059 }
2060}
2061
2062/* Free all empty pages. Partially empty pages need no attention
2063 because the `mark' bit doubles as an `unused' bit. */
2064
2065static void
2066sweep_pages (void)
2067{
2068 unsigned order;
2069
2070 for (order = 2; order < NUM_ORDERS; order++)
2071 {
2072 /* The last page-entry to consider, regardless of entries
2073 placed at the end of the list. */
2074 page_entry * const last = G.page_tails[order];
2075
2076 size_t num_objects;
2077 size_t live_objects;
2078 page_entry *p, *previous;
2079 int done;
2080
2081 p = G.pages[order];
2082 if (p == NULL)
2083 continue;
2084
2085 previous = NULL;
2086 do
2087 {
2088 page_entry *next = p->next;
2089
2090 /* Loop until all entries have been examined. */
2091 done = (p == last);
2092
2093 num_objects = OBJECTS_IN_PAGE (p);
2094
2095 /* Add all live objects on this page to the count of
2096 allocated memory. */
2097 live_objects = num_objects - p->num_free_objects;
2098
2099 G.allocated += OBJECT_SIZE (order) * live_objects;
2100
2101 /* Only objects on pages in the topmost context should get
2102 collected. */
2103 if (p->context_depth < G.context_depth)
2104 ;
2105
2106 /* Remove the page if it's empty. */
2107 else if (live_objects == 0)
2108 {
2109 /* If P was the first page in the list, then NEXT
2110 becomes the new first page in the list, otherwise
2111 splice P out of the forward pointers. */
2112 if (! previous)
2113 G.pages[order] = next;
2114 else
2115 previous->next = next;
2116
2117 /* Splice P out of the back pointers too. */
2118 if (next)
2119 next->prev = previous;
2120
2121 /* Are we removing the last element? */
2122 if (p == G.page_tails[order])
2123 G.page_tails[order] = previous;
2124 free_page (entry: p);
2125 p = previous;
2126 }
2127
2128 /* If the page is full, move it to the end. */
2129 else if (p->num_free_objects == 0)
2130 {
2131 /* Don't move it if it's already at the end. */
2132 if (p != G.page_tails[order])
2133 {
2134 /* Move p to the end of the list. */
2135 p->next = NULL;
2136 p->prev = G.page_tails[order];
2137 G.page_tails[order]->next = p;
2138
2139 /* Update the tail pointer... */
2140 G.page_tails[order] = p;
2141
2142 /* ... and the head pointer, if necessary. */
2143 if (! previous)
2144 G.pages[order] = next;
2145 else
2146 previous->next = next;
2147
2148 /* And update the backpointer in NEXT if necessary. */
2149 if (next)
2150 next->prev = previous;
2151
2152 p = previous;
2153 }
2154 }
2155
2156 /* If we've fallen through to here, it's a page in the
2157 topmost context that is neither full nor empty. Such a
2158 page must precede pages at lesser context depth in the
2159 list, so move it to the head. */
2160 else if (p != G.pages[order])
2161 {
2162 previous->next = p->next;
2163
2164 /* Update the backchain in the next node if it exists. */
2165 if (p->next)
2166 p->next->prev = previous;
2167
2168 /* Move P to the head of the list. */
2169 p->next = G.pages[order];
2170 p->prev = NULL;
2171 G.pages[order]->prev = p;
2172
2173 /* Update the head pointer. */
2174 G.pages[order] = p;
2175
2176 /* Are we moving the last element? */
2177 if (G.page_tails[order] == p)
2178 G.page_tails[order] = previous;
2179 p = previous;
2180 }
2181
2182 previous = p;
2183 p = next;
2184 }
2185 while (! done);
2186
2187 /* Now, restore the in_use_p vectors for any pages from contexts
2188 other than the current one. */
2189 for (p = G.pages[order]; p; p = p->next)
2190 if (p->context_depth != G.context_depth)
2191 ggc_recalculate_in_use_p (p);
2192 }
2193}
2194
2195#ifdef ENABLE_GC_CHECKING
2196/* Clobber all free objects. */
2197
2198static void
2199poison_pages (void)
2200{
2201 unsigned order;
2202
2203 for (order = 2; order < NUM_ORDERS; order++)
2204 {
2205 size_t size = OBJECT_SIZE (order);
2206 page_entry *p;
2207
2208 for (p = G.pages[order]; p != NULL; p = p->next)
2209 {
2210 size_t num_objects;
2211 size_t i;
2212
2213 if (p->context_depth != G.context_depth)
2214 /* Since we don't do any collection for pages in pushed
2215 contexts, there's no need to do any poisoning. And
2216 besides, the IN_USE_P array isn't valid until we pop
2217 contexts. */
2218 continue;
2219
2220 num_objects = OBJECTS_IN_PAGE (p);
2221 for (i = 0; i < num_objects; i++)
2222 {
2223 size_t word, bit;
2224 word = i / HOST_BITS_PER_LONG;
2225 bit = i % HOST_BITS_PER_LONG;
2226 if (((p->in_use_p[word] >> bit) & 1) == 0)
2227 {
2228 char *object = p->page + i * size;
2229
2230 /* Keep poison-by-write when we expect to use Valgrind,
2231 so the exact same memory semantics is kept, in case
2232 there are memory errors. We override this request
2233 below. */
2234 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2235 size));
2236 memset (s: object, c: 0xa5, n: size);
2237
2238 /* Drop the handle to avoid handle leak. */
2239 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2240 }
2241 }
2242 }
2243 }
2244}
2245#else
2246#define poison_pages()
2247#endif
2248
2249#ifdef ENABLE_GC_ALWAYS_COLLECT
2250/* Validate that the reportedly free objects actually are. */
2251
2252static void
2253validate_free_objects (void)
2254{
2255 struct free_object *f, *next, *still_free = NULL;
2256
2257 for (f = G.free_object_list; f ; f = next)
2258 {
2259 page_entry *pe = lookup_page_table_entry (f->object);
2260 size_t bit, word;
2261
2262 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2263 word = bit / HOST_BITS_PER_LONG;
2264 bit = bit % HOST_BITS_PER_LONG;
2265 next = f->next;
2266
2267 /* Make certain it isn't visible from any root. Notice that we
2268 do this check before sweep_pages merges save_in_use_p. */
2269 gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2270
2271 /* If the object comes from an outer context, then retain the
2272 free_object entry, so that we can verify that the address
2273 isn't live on the stack in some outer context. */
2274 if (pe->context_depth != G.context_depth)
2275 {
2276 f->next = still_free;
2277 still_free = f;
2278 }
2279 else
2280 free (f);
2281 }
2282
2283 G.free_object_list = still_free;
2284}
2285#else
2286#define validate_free_objects()
2287#endif
2288
2289/* Top level mark-and-sweep routine. */
2290
2291void
2292ggc_collect (enum ggc_collect mode)
2293{
2294 /* Avoid frequent unnecessary work by skipping collection if the
2295 total allocations haven't expanded much since the last
2296 collection. */
2297 float allocated_last_gc =
2298 MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
2299
2300 /* It is also good time to get memory block pool into limits. */
2301 memory_block_pool::trim ();
2302
2303 float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
2304 if (mode == GGC_COLLECT_HEURISTIC
2305 && G.allocated < allocated_last_gc + min_expand)
2306 return;
2307
2308 timevar_push (tv: TV_GC);
2309 if (GGC_DEBUG_LEVEL >= 2)
2310 fprintf (stream: G.debug_file, format: "BEGIN COLLECTING\n");
2311
2312 /* Zero the total allocated bytes. This will be recalculated in the
2313 sweep phase. */
2314 size_t allocated = G.allocated;
2315 G.allocated = 0;
2316
2317 /* Release the pages we freed the last time we collected, but didn't
2318 reuse in the interim. */
2319 release_pages ();
2320
2321 /* Output this later so we do not interfere with release_pages. */
2322 if (!quiet_flag)
2323 fprintf (stderr, format: " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
2324
2325 /* Indicate that we've seen collections at this context depth. */
2326 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2327
2328 invoke_plugin_callbacks (event: PLUGIN_GGC_START, NULL);
2329
2330 in_gc = true;
2331 clear_marks ();
2332 ggc_mark_roots ();
2333 ggc_handle_finalizers ();
2334
2335 if (GATHER_STATISTICS)
2336 ggc_prune_overhead_list ();
2337
2338 poison_pages ();
2339 validate_free_objects ();
2340 sweep_pages ();
2341
2342 in_gc = false;
2343 G.allocated_last_gc = G.allocated;
2344
2345 invoke_plugin_callbacks (event: PLUGIN_GGC_END, NULL);
2346
2347 timevar_pop (tv: TV_GC);
2348
2349 if (!quiet_flag)
2350 fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
2351 if (GGC_DEBUG_LEVEL >= 2)
2352 fprintf (stream: G.debug_file, format: "END COLLECTING\n");
2353}
2354
2355/* Return free pages to the system. */
2356
2357void
2358ggc_trim ()
2359{
2360 timevar_push (tv: TV_GC);
2361 G.allocated = 0;
2362 sweep_pages ();
2363 release_pages ();
2364 if (!quiet_flag)
2365 fprintf (stderr, format: " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
2366 SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
2367 timevar_pop (tv: TV_GC);
2368}
2369
2370/* Assume that all GGC memory is reachable and grow the limits for next
2371 collection. With checking, trigger GGC so -Q compilation outputs how much
2372 of memory really is reachable. */
2373
2374void
2375ggc_grow (void)
2376{
2377 if (!flag_checking)
2378 G.allocated_last_gc = MAX (G.allocated_last_gc,
2379 G.allocated);
2380 else
2381 ggc_collect ();
2382 if (!quiet_flag)
2383 fprintf (stderr, format: " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
2384}
2385
2386void
2387ggc_print_statistics (void)
2388{
2389 struct ggc_statistics stats;
2390 unsigned int i;
2391 size_t total_overhead = 0;
2392
2393 /* Clear the statistics. */
2394 memset (s: &stats, c: 0, n: sizeof (stats));
2395
2396 /* Make sure collection will really occur. */
2397 G.allocated_last_gc = 0;
2398
2399 /* Collect and print the statistics common across collectors. */
2400 ggc_print_common_statistics (stderr, &stats);
2401
2402 /* Release free pages so that we will not count the bytes allocated
2403 there as part of the total allocated memory. */
2404 release_pages ();
2405
2406 /* Collect some information about the various sizes of
2407 allocation. */
2408 fprintf (stderr,
2409 format: "Memory still allocated at the end of the compilation process\n");
2410 fprintf (stderr, format: "%-8s %10s %10s %10s\n",
2411 "Size", "Allocated", "Used", "Overhead");
2412 for (i = 0; i < NUM_ORDERS; ++i)
2413 {
2414 page_entry *p;
2415 size_t allocated;
2416 size_t in_use;
2417 size_t overhead;
2418
2419 /* Skip empty entries. */
2420 if (!G.pages[i])
2421 continue;
2422
2423 overhead = allocated = in_use = 0;
2424
2425 /* Figure out the total number of bytes allocated for objects of
2426 this size, and how many of them are actually in use. Also figure
2427 out how much memory the page table is using. */
2428 for (p = G.pages[i]; p; p = p->next)
2429 {
2430 allocated += p->bytes;
2431 in_use +=
2432 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2433
2434 overhead += (sizeof (page_entry) - sizeof (long)
2435 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2436 }
2437 fprintf (stderr, format: "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2438 PRsa (10) "\n",
2439 (uint64_t)OBJECT_SIZE (i),
2440 SIZE_AMOUNT (allocated),
2441 SIZE_AMOUNT (in_use),
2442 SIZE_AMOUNT (overhead));
2443 total_overhead += overhead;
2444 }
2445 fprintf (stderr, format: "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2446 "Total",
2447 SIZE_AMOUNT (G.bytes_mapped),
2448 SIZE_AMOUNT (G.allocated),
2449 SIZE_AMOUNT (total_overhead));
2450
2451 if (GATHER_STATISTICS)
2452 {
2453 fprintf (stderr, format: "\nTotal allocations and overheads during "
2454 "the compilation process\n");
2455
2456 fprintf (stderr, format: "Total Overhead: "
2457 PRsa (9) "\n",
2458 SIZE_AMOUNT (G.stats.total_overhead));
2459 fprintf (stderr, format: "Total Allocated: "
2460 PRsa (9) "\n",
2461 SIZE_AMOUNT (G.stats.total_allocated));
2462
2463 fprintf (stderr, format: "Total Overhead under 32B: "
2464 PRsa (9) "\n",
2465 SIZE_AMOUNT (G.stats.total_overhead_under32));
2466 fprintf (stderr, format: "Total Allocated under 32B: "
2467 PRsa (9) "\n",
2468 SIZE_AMOUNT (G.stats.total_allocated_under32));
2469 fprintf (stderr, format: "Total Overhead under 64B: "
2470 PRsa (9) "\n",
2471 SIZE_AMOUNT (G.stats.total_overhead_under64));
2472 fprintf (stderr, format: "Total Allocated under 64B: "
2473 PRsa (9) "\n",
2474 SIZE_AMOUNT (G.stats.total_allocated_under64));
2475 fprintf (stderr, format: "Total Overhead under 128B: "
2476 PRsa (9) "\n",
2477 SIZE_AMOUNT (G.stats.total_overhead_under128));
2478 fprintf (stderr, format: "Total Allocated under 128B: "
2479 PRsa (9) "\n",
2480 SIZE_AMOUNT (G.stats.total_allocated_under128));
2481 fprintf (stderr, format: "Number of free list fallbacks: "
2482 PRsa (9) "\n",
2483 SIZE_AMOUNT (G.stats.fallback));
2484
2485 for (i = 0; i < NUM_ORDERS; i++)
2486 if (G.stats.total_allocated_per_order[i])
2487 {
2488 fprintf (stderr, format: "Total Overhead page size %9" PRIu64 ": "
2489 PRsa (9) "\n",
2490 (uint64_t)OBJECT_SIZE (i),
2491 SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2492 fprintf (stderr, format: "Total Allocated page size %9" PRIu64 ": "
2493 PRsa (9) "\n",
2494 (uint64_t)OBJECT_SIZE (i),
2495 SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2496 }
2497 }
2498}
2499
2500struct ggc_pch_ondisk
2501{
2502 unsigned totals[NUM_ORDERS];
2503};
2504
2505struct ggc_pch_data
2506{
2507 struct ggc_pch_ondisk d;
2508 uintptr_t base[NUM_ORDERS];
2509 size_t written[NUM_ORDERS];
2510};
2511
2512struct ggc_pch_data *
2513init_ggc_pch (void)
2514{
2515 return XCNEW (struct ggc_pch_data);
2516}
2517
2518void
2519ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2520 size_t size)
2521{
2522 unsigned order;
2523
2524 if (size < NUM_SIZE_LOOKUP)
2525 order = size_lookup[size];
2526 else
2527 {
2528 order = 10;
2529 while (size > OBJECT_SIZE (order))
2530 order++;
2531 }
2532
2533 d->d.totals[order]++;
2534}
2535
2536size_t
2537ggc_pch_total_size (struct ggc_pch_data *d)
2538{
2539 size_t a = 0;
2540 unsigned i;
2541
2542 for (i = 0; i < NUM_ORDERS; i++)
2543 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2544 return a;
2545}
2546
2547void
2548ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2549{
2550 uintptr_t a = (uintptr_t) base;
2551 unsigned i;
2552
2553 for (i = 0; i < NUM_ORDERS; i++)
2554 {
2555 d->base[i] = a;
2556 a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2557 }
2558}
2559
2560
2561char *
2562ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2563 size_t size)
2564{
2565 unsigned order;
2566 char *result;
2567
2568 if (size < NUM_SIZE_LOOKUP)
2569 order = size_lookup[size];
2570 else
2571 {
2572 order = 10;
2573 while (size > OBJECT_SIZE (order))
2574 order++;
2575 }
2576
2577 result = (char *) d->base[order];
2578 d->base[order] += OBJECT_SIZE (order);
2579 return result;
2580}
2581
2582void
2583ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2584 FILE *f ATTRIBUTE_UNUSED)
2585{
2586 /* Nothing to do. */
2587}
2588
2589void
2590ggc_pch_write_object (struct ggc_pch_data *d,
2591 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2592 size_t size)
2593{
2594 unsigned order;
2595 static const char emptyBytes[256] = { 0 };
2596
2597 if (size < NUM_SIZE_LOOKUP)
2598 order = size_lookup[size];
2599 else
2600 {
2601 order = 10;
2602 while (size > OBJECT_SIZE (order))
2603 order++;
2604 }
2605
2606 if (fwrite (ptr: x, size: size, n: 1, s: f) != 1)
2607 fatal_error (input_location, "cannot write PCH file: %m");
2608
2609 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2610 object out to OBJECT_SIZE(order). This happens for strings. */
2611
2612 if (size != OBJECT_SIZE (order))
2613 {
2614 unsigned padding = OBJECT_SIZE (order) - size;
2615
2616 /* To speed small writes, we use a nulled-out array that's larger
2617 than most padding requests as the source for our null bytes. This
2618 permits us to do the padding with fwrite() rather than fseek(), and
2619 limits the chance the OS may try to flush any outstanding writes. */
2620 if (padding <= sizeof (emptyBytes))
2621 {
2622 if (fwrite (ptr: emptyBytes, size: 1, n: padding, s: f) != padding)
2623 fatal_error (input_location, "cannot write PCH file");
2624 }
2625 else
2626 {
2627 /* Larger than our buffer? Just default to fseek. */
2628 if (fseek (stream: f, off: padding, SEEK_CUR) != 0)
2629 fatal_error (input_location, "cannot write PCH file");
2630 }
2631 }
2632
2633 d->written[order]++;
2634 if (d->written[order] == d->d.totals[order]
2635 && fseek (stream: f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2636 G.pagesize),
2637 SEEK_CUR) != 0)
2638 fatal_error (input_location, "cannot write PCH file: %m");
2639}
2640
2641void
2642ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2643{
2644 if (fwrite (ptr: &d->d, size: sizeof (d->d), n: 1, s: f) != 1)
2645 fatal_error (input_location, "cannot write PCH file: %m");
2646 free (ptr: d);
2647}
2648
2649/* Move the PCH PTE entries just added to the end of by_depth, to the
2650 front. */
2651
2652static void
2653move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2654{
2655 /* First, we swap the new entries to the front of the varrays. */
2656 page_entry **new_by_depth;
2657 unsigned long **new_save_in_use;
2658
2659 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2660 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2661
2662 memcpy (dest: &new_by_depth[0],
2663 src: &G.by_depth[count_old_page_tables],
2664 n: count_new_page_tables * sizeof (void *));
2665 memcpy (dest: &new_by_depth[count_new_page_tables],
2666 src: &G.by_depth[0],
2667 n: count_old_page_tables * sizeof (void *));
2668 memcpy (dest: &new_save_in_use[0],
2669 src: &G.save_in_use[count_old_page_tables],
2670 n: count_new_page_tables * sizeof (void *));
2671 memcpy (dest: &new_save_in_use[count_new_page_tables],
2672 src: &G.save_in_use[0],
2673 n: count_old_page_tables * sizeof (void *));
2674
2675 free (ptr: G.by_depth);
2676 free (ptr: G.save_in_use);
2677
2678 G.by_depth = new_by_depth;
2679 G.save_in_use = new_save_in_use;
2680
2681 /* Now update all the index_by_depth fields. */
2682 for (unsigned i = G.by_depth_in_use; i--;)
2683 {
2684 page_entry *p = G.by_depth[i];
2685 p->index_by_depth = i;
2686 }
2687
2688 /* And last, we update the depth pointers in G.depth. The first
2689 entry is already 0, and context 0 entries always start at index
2690 0, so there is nothing to update in the first slot. We need a
2691 second slot, only if we have old ptes, and if we do, they start
2692 at index count_new_page_tables. */
2693 if (count_old_page_tables)
2694 push_depth (i: count_new_page_tables);
2695}
2696
2697void
2698ggc_pch_read (FILE *f, void *addr)
2699{
2700 struct ggc_pch_ondisk d;
2701 unsigned i;
2702 char *offs = (char *) addr;
2703 unsigned long count_old_page_tables;
2704 unsigned long count_new_page_tables;
2705
2706 count_old_page_tables = G.by_depth_in_use;
2707
2708 if (fread (ptr: &d, size: sizeof (d), n: 1, stream: f) != 1)
2709 fatal_error (input_location, "cannot read PCH file: %m");
2710
2711 /* We've just read in a PCH file. So, every object that used to be
2712 allocated is now free. */
2713 clear_marks ();
2714#ifdef ENABLE_GC_CHECKING
2715 poison_pages ();
2716#endif
2717 /* Since we free all the allocated objects, the free list becomes
2718 useless. Validate it now, which will also clear it. */
2719 validate_free_objects ();
2720
2721 /* No object read from a PCH file should ever be freed. So, set the
2722 context depth to 1, and set the depth of all the currently-allocated
2723 pages to be 1 too. PCH pages will have depth 0. */
2724 gcc_assert (!G.context_depth);
2725 G.context_depth = 1;
2726 /* Allocate space for the depth 1 finalizers. */
2727 G.finalizers.safe_push (obj: vNULL);
2728 G.vec_finalizers.safe_push (obj: vNULL);
2729 gcc_assert (G.finalizers.length() == 2);
2730 for (i = 0; i < NUM_ORDERS; i++)
2731 {
2732 page_entry *p;
2733 for (p = G.pages[i]; p != NULL; p = p->next)
2734 p->context_depth = G.context_depth;
2735 }
2736
2737 /* Allocate the appropriate page-table entries for the pages read from
2738 the PCH file. */
2739
2740 for (i = 0; i < NUM_ORDERS; i++)
2741 {
2742 struct page_entry *entry;
2743 char *pte;
2744 size_t bytes;
2745 size_t num_objs;
2746 size_t j;
2747
2748 if (d.totals[i] == 0)
2749 continue;
2750
2751 bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2752 num_objs = bytes / OBJECT_SIZE (i);
2753 entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2754 - sizeof (long)
2755 + BITMAP_SIZE (num_objs + 1)));
2756 entry->bytes = bytes;
2757 entry->free_list = find_free_list (entry_size: bytes);
2758 entry->page = offs;
2759 entry->context_depth = 0;
2760 offs += bytes;
2761 entry->num_free_objects = 0;
2762 entry->order = i;
2763
2764 for (j = 0;
2765 j + HOST_BITS_PER_LONG <= num_objs + 1;
2766 j += HOST_BITS_PER_LONG)
2767 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2768 for (; j < num_objs + 1; j++)
2769 entry->in_use_p[j / HOST_BITS_PER_LONG]
2770 |= 1L << (j % HOST_BITS_PER_LONG);
2771
2772 for (pte = entry->page;
2773 pte < entry->page + entry->bytes;
2774 pte += G.pagesize)
2775 set_page_table_entry (p: pte, entry);
2776
2777 if (G.page_tails[i] != NULL)
2778 G.page_tails[i]->next = entry;
2779 else
2780 G.pages[i] = entry;
2781 G.page_tails[i] = entry;
2782
2783 /* We start off by just adding all the new information to the
2784 end of the varrays, later, we will move the new information
2785 to the front of the varrays, as the PCH page tables are at
2786 context 0. */
2787 push_by_depth (p: entry, s: 0);
2788 }
2789
2790 /* Now, we update the various data structures that speed page table
2791 handling. */
2792 count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2793
2794 move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2795
2796 /* Update the statistics. */
2797 G.allocated = G.allocated_last_gc = offs - (char *)addr;
2798}
2799

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of gcc/ggc-page.cc