Deleted Added
full compact
ggc-page.c (96489) ggc-page.c (117395)
1/* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 2, or (at your option) any later

--- 10 unchanged lines hidden (view full) ---

1902111-1307, USA. */
20
21#include "config.h"
22#include "system.h"
23#include "tree.h"
24#include "rtl.h"
25#include "tm_p.h"
26#include "toplev.h"
1/* "Bag-of-pages" garbage collector for the GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 2, or (at your option) any later

--- 10 unchanged lines hidden (view full) ---

1902111-1307, USA. */
20
21#include "config.h"
22#include "system.h"
23#include "tree.h"
24#include "rtl.h"
25#include "tm_p.h"
26#include "toplev.h"
27#include "varray.h"
28#include "flags.h"
29#include "ggc.h"
30#include "timevar.h"
27#include "flags.h"
28#include "ggc.h"
29#include "timevar.h"
30#include "params.h"
31#ifdef ENABLE_VALGRIND_CHECKING
32#include <valgrind.h>
33#else
34/* Avoid #ifdef:s when we can help it. */
35#define VALGRIND_DISCARD(x)
36#endif
31
32/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
33 file open. Prefer either to valloc. */
34#ifdef HAVE_MMAP_ANON
35# undef HAVE_MMAP_DEV_ZERO
36
37# include <sys/mman.h>
38# ifndef MAP_FAILED

--- 15 unchanged lines hidden (view full) ---

54# define USING_MMAP
55
56#endif
57
58#ifndef USING_MMAP
59#define USING_MALLOC_PAGE_GROUPS
60#endif
61
37
38/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39 file open. Prefer either to valloc. */
40#ifdef HAVE_MMAP_ANON
41# undef HAVE_MMAP_DEV_ZERO
42
43# include <sys/mman.h>
44# ifndef MAP_FAILED

--- 15 unchanged lines hidden (view full) ---

60# define USING_MMAP
61
62#endif
63
64#ifndef USING_MMAP
65#define USING_MALLOC_PAGE_GROUPS
66#endif
67
62/* Stategy:
68/* Stategy:
63
64 This garbage-collecting allocator allocates objects on one of a set
65 of pages. Each page can allocate objects of a single size only;
66 available sizes are powers of two starting at four bytes. The size
67 of an allocation request is rounded up to the next power of two
68 (`order'), and satisfied from the appropriate page.
69
70 Each page is recorded in a page-entry, which also maintains an
71 in-use bitmap of object positions on the page. This allows the
72 allocation state of a particular object to be flipped without
73 touching the page itself.
74
75 Each page-entry also has a context depth, which is used to track
76 pushing and popping of allocation contexts. Only objects allocated
69
70 This garbage-collecting allocator allocates objects on one of a set
71 of pages. Each page can allocate objects of a single size only;
72 available sizes are powers of two starting at four bytes. The size
73 of an allocation request is rounded up to the next power of two
74 (`order'), and satisfied from the appropriate page.
75
76 Each page is recorded in a page-entry, which also maintains an
77 in-use bitmap of object positions on the page. This allows the
78 allocation state of a particular object to be flipped without
79 touching the page itself.
80
81 Each page-entry also has a context depth, which is used to track
82 pushing and popping of allocation contexts. Only objects allocated
77 in the current (highest-numbered) context may be collected.
83 in the current (highest-numbered) context may be collected.
78
79 Page entries are arranged in an array of singly-linked lists. The
80 array is indexed by the allocation size, in bits, of the pages on
81 it; i.e. all pages on a list allocate objects of the same size.
82 Pages are ordered on the list such that all non-full pages precede
83 all full pages, with non-full pages arranged in order of decreasing
84 context depth.
85
86 Empty pages (of all orders) are kept on a single page cache list,
87 and are considered first when new pages are required; they are
88 deallocated at the start of the next collection if they haven't
89 been recycled by then. */
90
84
85 Page entries are arranged in an array of singly-linked lists. The
86 array is indexed by the allocation size, in bits, of the pages on
87 it; i.e. all pages on a list allocate objects of the same size.
88 Pages are ordered on the list such that all non-full pages precede
89 all full pages, with non-full pages arranged in order of decreasing
90 context depth.
91
92 Empty pages (of all orders) are kept on a single page cache list,
93 and are considered first when new pages are required; they are
94 deallocated at the start of the next collection if they haven't
95 been recycled by then. */
96
91
92/* Define GGC_POISON to poison memory marked unused by the collector. */
93#undef GGC_POISON
94
95/* Define GGC_ALWAYS_COLLECT to perform collection every time
96 ggc_collect is invoked. Otherwise, collection is performed only
97 when a significant amount of memory has been allocated since the
98 last collection. */
99#undef GGC_ALWAYS_COLLECT
100
101#ifdef ENABLE_GC_CHECKING
102#define GGC_POISON
103#endif
104#ifdef ENABLE_GC_ALWAYS_COLLECT
105#define GGC_ALWAYS_COLLECT
106#endif
107
108/* Define GGC_DEBUG_LEVEL to print debugging information.
109 0: No debugging output.
110 1: GC statistics only.
111 2: Page-entry allocations/deallocations as well.
112 3: Object allocations as well.
113 4: Object marks as well. */
114#define GGC_DEBUG_LEVEL (0)
115

--- 12 unchanged lines hidden (view full) ---

128 | | |
129 PAGE_L1_BITS |
130 | |
131 PAGE_L2_BITS
132
133 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
134 pages are aligned on system page boundaries. The next most
135 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
97/* Define GGC_DEBUG_LEVEL to print debugging information.
98 0: No debugging output.
99 1: GC statistics only.
100 2: Page-entry allocations/deallocations as well.
101 3: Object allocations as well.
102 4: Object marks as well. */
103#define GGC_DEBUG_LEVEL (0)
104

--- 12 unchanged lines hidden (view full) ---

117 | | |
118 PAGE_L1_BITS |
119 | |
120 PAGE_L2_BITS
121
122 The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
123 pages are aligned on system page boundaries. The next most
124 significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
136 index values in the lookup table, respectively.
125 index values in the lookup table, respectively.
137
138 For 32-bit architectures and the settings below, there are no
139 leftover bits. For architectures with wider pointers, the lookup
140 tree points to a list of pages, which must be scanned to find the
141 correct one. */
142
143#define PAGE_L1_BITS (8)
144#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)

--- 8 unchanged lines hidden (view full) ---

153
154/* The number of objects per allocation page, for objects on a page of
155 the indicated ORDER. */
156#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
157
158/* The size of an object on a page of the indicated ORDER. */
159#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
160
126
127 For 32-bit architectures and the settings below, there are no
128 leftover bits. For architectures with wider pointers, the lookup
129 tree points to a list of pages, which must be scanned to find the
130 correct one. */
131
132#define PAGE_L1_BITS (8)
133#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)

--- 8 unchanged lines hidden (view full) ---

142
143/* The number of objects per allocation page, for objects on a page of
144 the indicated ORDER. */
145#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
146
147/* The size of an object on a page of the indicated ORDER. */
148#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
149
150/* For speed, we avoid doing a general integer divide to locate the
151 offset in the allocation bitmap, by precalculating numbers M, S
152 such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
153 within the page which is evenly divisible by the object size Z. */
154#define DIV_MULT(ORDER) inverse_table[ORDER].mult
155#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
156#define OFFSET_TO_BIT(OFFSET, ORDER) \
157 (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
158
161/* The number of extra orders, not corresponding to power-of-two sized
162 objects. */
163
159/* The number of extra orders, not corresponding to power-of-two sized
160 objects. */
161
164#define NUM_EXTRA_ORDERS \
165 (sizeof (extra_order_size_table) / sizeof (extra_order_size_table[0]))
162#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
166
163
164#define RTL_SIZE(NSLOTS) \
165 (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
166
167/* The Ith entry is the maximum size of an object to be stored in the
168 Ith extra order. Adding a new entry to this array is the *only*
169 thing you need to do to add a new special allocation size. */
170
171static const size_t extra_order_size_table[] = {
172 sizeof (struct tree_decl),
167/* The Ith entry is the maximum size of an object to be stored in the
168 Ith extra order. Adding a new entry to this array is the *only*
169 thing you need to do to add a new special allocation size. */
170
171static const size_t extra_order_size_table[] = {
172 sizeof (struct tree_decl),
173 sizeof (struct tree_list)
173 sizeof (struct tree_list),
174 RTL_SIZE (2), /* REG, MEM, PLUS, etc. */
175 RTL_SIZE (10), /* INSN, CALL_INSN, JUMP_INSN */
174};
175
176/* The total number of orders. */
177
178#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
179
180/* We use this structure to determine the alignment required for
181 allocations. For power-of-two sized allocations, that's not a

--- 18 unchanged lines hidden (view full) ---

200/* The Ith entry is the number of objects on a page or order I. */
201
202static unsigned objects_per_page_table[NUM_ORDERS];
203
204/* The Ith entry is the size of an object on a page of order I. */
205
206static size_t object_size_table[NUM_ORDERS];
207
176};
177
178/* The total number of orders. */
179
180#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
181
182/* We use this structure to determine the alignment required for
183 allocations. For power-of-two sized allocations, that's not a

--- 18 unchanged lines hidden (view full) ---

202/* The Ith entry is the number of objects on a page or order I. */
203
204static unsigned objects_per_page_table[NUM_ORDERS];
205
206/* The Ith entry is the size of an object on a page of order I. */
207
208static size_t object_size_table[NUM_ORDERS];
209
210/* The Ith entry is a pair of numbers (mult, shift) such that
211 ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
212 for all k evenly divisible by OBJECT_SIZE(I). */
213
214static struct
215{
216 unsigned int mult;
217 unsigned int shift;
218}
219inverse_table[NUM_ORDERS];
220
208/* A page_entry records the status of an allocation page. This
209 structure is dynamically sized to fit the bitmap in_use_p. */
221/* A page_entry records the status of an allocation page. This
222 structure is dynamically sized to fit the bitmap in_use_p. */
210typedef struct page_entry
223typedef struct page_entry
211{
212 /* The next page-entry with objects of the same size, or NULL if
213 this is the last page-entry. */
214 struct page_entry *next;
215
216 /* The number of bytes allocated. (This will always be a multiple
217 of the host system page size.) */
218 size_t bytes;
219
220 /* The address at which the memory is allocated. */
221 char *page;
222
223#ifdef USING_MALLOC_PAGE_GROUPS
224 /* Back pointer to the page group this page came from. */
225 struct page_group *group;
226#endif
227
224{
225 /* The next page-entry with objects of the same size, or NULL if
226 this is the last page-entry. */
227 struct page_entry *next;
228
229 /* The number of bytes allocated. (This will always be a multiple
230 of the host system page size.) */
231 size_t bytes;
232
233 /* The address at which the memory is allocated. */
234 char *page;
235
236#ifdef USING_MALLOC_PAGE_GROUPS
237 /* Back pointer to the page group this page came from. */
238 struct page_group *group;
239#endif
240
228 /* Saved in-use bit vector for pages that aren't in the topmost
229 context during collection. */
230 unsigned long *save_in_use_p;
241 /* This is the index in the by_depth varray where this page table
242 can be found. */
243 unsigned long index_by_depth;
231
232 /* Context depth of this page. */
233 unsigned short context_depth;
234
235 /* The number of free objects remaining on this page. */
236 unsigned short num_free_objects;
237
238 /* A likely candidate for the bit position of a free object for the

--- 72 unchanged lines hidden (view full) ---

311 size_t allocated;
312
313 /* Bytes currently allocated at the end of the last collection. */
314 size_t allocated_last_gc;
315
316 /* Total amount of memory mapped. */
317 size_t bytes_mapped;
318
244
245 /* Context depth of this page. */
246 unsigned short context_depth;
247
248 /* The number of free objects remaining on this page. */
249 unsigned short num_free_objects;
250
251 /* A likely candidate for the bit position of a free object for the

--- 72 unchanged lines hidden (view full) ---

324 size_t allocated;
325
326 /* Bytes currently allocated at the end of the last collection. */
327 size_t allocated_last_gc;
328
329 /* Total amount of memory mapped. */
330 size_t bytes_mapped;
331
332 /* Bit N set if any allocations have been done at context depth N. */
333 unsigned long context_depth_allocations;
334
335 /* Bit N set if any collections have been done at context depth N. */
336 unsigned long context_depth_collections;
337
319 /* The current depth in the context stack. */
320 unsigned short context_depth;
321
322 /* A file descriptor open to /dev/zero for reading. */
323#if defined (HAVE_MMAP_DEV_ZERO)
324 int dev_zero_fd;
325#endif
326
327 /* A cache of free system pages. */
328 page_entry *free_pages;
329
330#ifdef USING_MALLOC_PAGE_GROUPS
331 page_group *page_groups;
332#endif
333
334 /* The file descriptor for debugging output. */
335 FILE *debug_file;
338 /* The current depth in the context stack. */
339 unsigned short context_depth;
340
341 /* A file descriptor open to /dev/zero for reading. */
342#if defined (HAVE_MMAP_DEV_ZERO)
343 int dev_zero_fd;
344#endif
345
346 /* A cache of free system pages. */
347 page_entry *free_pages;
348
349#ifdef USING_MALLOC_PAGE_GROUPS
350 page_group *page_groups;
351#endif
352
353 /* The file descriptor for debugging output. */
354 FILE *debug_file;
355
356 /* Current number of elements in use in depth below. */
357 unsigned int depth_in_use;
358
359 /* Maximum number of elements that can be used before resizing. */
360 unsigned int depth_max;
361
362 /* Each element of this arry is an index in by_depth where the given
363 depth starts. This structure is indexed by that given depth we
364 are interested in. */
365 unsigned int *depth;
366
367 /* Current number of elements in use in by_depth below. */
368 unsigned int by_depth_in_use;
369
370 /* Maximum number of elements that can be used before resizing. */
371 unsigned int by_depth_max;
372
373 /* Each element of this array is a pointer to a page_entry, all
374 page_entries can be found in here by increasing depth.
375 index_by_depth in the page_entry is the index into this data
376 structure where that page_entry can be found. This is used to
377 speed up finding all page_entries at a particular depth. */
378 page_entry **by_depth;
379
380 /* Each element is a pointer to the saved in_use_p bits, if any,
381 zero otherwise. We allocate them all together, to enable a
382 better runtime data access pattern. */
383 unsigned long **save_in_use;
384
336} G;
337
338/* The size in bytes required to maintain a bitmap for the objects
339 on a page-entry. */
340#define BITMAP_SIZE(Num_objects) \
341 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
342
385} G;
386
387/* The size in bytes required to maintain a bitmap for the objects
388 on a page-entry. */
389#define BITMAP_SIZE(Num_objects) \
390 (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
391
343/* Skip garbage collection if the current allocation is not at least
344 this factor times the allocation at the end of the last collection.
345 In other words, total allocation must expand by (this factor minus
346 one) before collection is performed. */
347#define GGC_MIN_EXPAND_FOR_GC (1.3)
348
349/* Bound `allocated_last_gc' to 4MB, to prevent the memory expansion
350 test from triggering too often when the heap is small. */
351#define GGC_MIN_LAST_ALLOCATED (4 * 1024 * 1024)
352
353/* Allocate pages in chunks of this size, to throttle calls to memory
354 allocation routines. The first page is used, the rest go onto the
355 free list. This cannot be larger than HOST_BITS_PER_INT for the
356 in_use bitmask for page_group. */
357#define GGC_QUIRE_SIZE 16
392/* Allocate pages in chunks of this size, to throttle calls to memory
393 allocation routines. The first page is used, the rest go onto the
394 free list. This cannot be larger than HOST_BITS_PER_INT for the
395 in_use bitmask for page_group. */
396#define GGC_QUIRE_SIZE 16
397
398/* Initial guess as to how many page table entries we might need. */
399#define INITIAL_PTE_COUNT 128
358
359static int ggc_allocated_p PARAMS ((const void *));
360static page_entry *lookup_page_table_entry PARAMS ((const void *));
361static void set_page_table_entry PARAMS ((void *, page_entry *));
362#ifdef USING_MMAP
363static char *alloc_anon PARAMS ((char *, size_t));
364#endif
365#ifdef USING_MALLOC_PAGE_GROUPS
366static size_t page_group_index PARAMS ((char *, char *));
367static void set_page_group_in_use PARAMS ((page_group *, char *));
368static void clear_page_group_in_use PARAMS ((page_group *, char *));
369#endif
370static struct page_entry * alloc_page PARAMS ((unsigned));
371static void free_page PARAMS ((struct page_entry *));
372static void release_pages PARAMS ((void));
373static void clear_marks PARAMS ((void));
374static void sweep_pages PARAMS ((void));
375static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
400
401static int ggc_allocated_p PARAMS ((const void *));
402static page_entry *lookup_page_table_entry PARAMS ((const void *));
403static void set_page_table_entry PARAMS ((void *, page_entry *));
404#ifdef USING_MMAP
405static char *alloc_anon PARAMS ((char *, size_t));
406#endif
407#ifdef USING_MALLOC_PAGE_GROUPS
408static size_t page_group_index PARAMS ((char *, char *));
409static void set_page_group_in_use PARAMS ((page_group *, char *));
410static void clear_page_group_in_use PARAMS ((page_group *, char *));
411#endif
412static struct page_entry * alloc_page PARAMS ((unsigned));
413static void free_page PARAMS ((struct page_entry *));
414static void release_pages PARAMS ((void));
415static void clear_marks PARAMS ((void));
416static void sweep_pages PARAMS ((void));
417static void ggc_recalculate_in_use_p PARAMS ((page_entry *));
418static void compute_inverse PARAMS ((unsigned));
419static inline void adjust_depth PARAMS ((void));
376
420
377#ifdef GGC_POISON
421#ifdef ENABLE_GC_CHECKING
378static void poison_pages PARAMS ((void));
379#endif
380
381void debug_print_page_list PARAMS ((int));
422static void poison_pages PARAMS ((void));
423#endif
424
425void debug_print_page_list PARAMS ((int));
426static void push_depth PARAMS ((unsigned int));
427static void push_by_depth PARAMS ((page_entry *, unsigned long *));
382
428
383/* Returns non-zero if P was allocated in GC'able memory. */
429/* Push an entry onto G.depth. */
384
430
431inline static void
432push_depth (i)
433 unsigned int i;
434{
435 if (G.depth_in_use >= G.depth_max)
436 {
437 G.depth_max *= 2;
438 G.depth = (unsigned int *) xrealloc ((char *) G.depth,
439 G.depth_max * sizeof (unsigned int));
440 }
441 G.depth[G.depth_in_use++] = i;
442}
443
444/* Push an entry onto G.by_depth and G.save_in_use. */
445
446inline static void
447push_by_depth (p, s)
448 page_entry *p;
449 unsigned long *s;
450{
451 if (G.by_depth_in_use >= G.by_depth_max)
452 {
453 G.by_depth_max *= 2;
454 G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
455 G.by_depth_max * sizeof (page_entry *));
456 G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
457 G.by_depth_max * sizeof (unsigned long *));
458 }
459 G.by_depth[G.by_depth_in_use] = p;
460 G.save_in_use[G.by_depth_in_use++] = s;
461}
462
463/* For the 3.3 release, we will avoid prefetch, as it isn't tested widely. */
464#define prefetch(X) ((void) X)
465
466#define save_in_use_p_i(__i) \
467 (G.save_in_use[__i])
468#define save_in_use_p(__p) \
469 (save_in_use_p_i (__p->index_by_depth))
470
471/* Returns nonzero if P was allocated in GC'able memory. */
472
385static inline int
386ggc_allocated_p (p)
387 const void *p;
388{
389 page_entry ***base;
390 size_t L1, L2;
391
392#if HOST_BITS_PER_PTR <= 32

--- 14 unchanged lines hidden (view full) ---

407
408 /* Extract the level 1 and 2 indices. */
409 L1 = LOOKUP_L1 (p);
410 L2 = LOOKUP_L2 (p);
411
412 return base[L1] && base[L1][L2];
413}
414
473static inline int
474ggc_allocated_p (p)
475 const void *p;
476{
477 page_entry ***base;
478 size_t L1, L2;
479
480#if HOST_BITS_PER_PTR <= 32

--- 14 unchanged lines hidden (view full) ---

495
496 /* Extract the level 1 and 2 indices. */
497 L1 = LOOKUP_L1 (p);
498 L2 = LOOKUP_L2 (p);
499
500 return base[L1] && base[L1][L2];
501}
502
415/* Traverse the page table and find the entry for a page.
503/* Traverse the page table and find the entry for a page.
416 Die (probably) if the object wasn't allocated via GC. */
417
418static inline page_entry *
419lookup_page_table_entry(p)
420 const void *p;
421{
422 page_entry ***base;
423 size_t L1, L2;

--- 96 unchanged lines hidden (view full) ---

520 {
521 perror ("virtual memory exhausted");
522 exit (FATAL_EXIT_CODE);
523 }
524
525 /* Remember that we allocated this memory. */
526 G.bytes_mapped += size;
527
504 Die (probably) if the object wasn't allocated via GC. */
505
506static inline page_entry *
507lookup_page_table_entry(p)
508 const void *p;
509{
510 page_entry ***base;
511 size_t L1, L2;

--- 96 unchanged lines hidden (view full) ---

608 {
609 perror ("virtual memory exhausted");
610 exit (FATAL_EXIT_CODE);
611 }
612
613 /* Remember that we allocated this memory. */
614 G.bytes_mapped += size;
615
616 /* Pretend we don't have access to the allocated pages. We'll enable
617 access to smaller pieces of the area in ggc_alloc. Discard the
618 handle to avoid handle leak. */
619 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
620
528 return page;
529}
530#endif
531#ifdef USING_MALLOC_PAGE_GROUPS
532/* Compute the index for this page into the page group. */
533
534static inline size_t
535page_group_index (allocation, page)

--- 178 unchanged lines hidden (view full) ---

714
715 entry->bytes = entry_size;
716 entry->page = page;
717 entry->context_depth = G.context_depth;
718 entry->order = order;
719 entry->num_free_objects = num_objects;
720 entry->next_bit_hint = 1;
721
621 return page;
622}
623#endif
624#ifdef USING_MALLOC_PAGE_GROUPS
625/* Compute the index for this page into the page group. */
626
627static inline size_t
628page_group_index (allocation, page)

--- 178 unchanged lines hidden (view full) ---

807
808 entry->bytes = entry_size;
809 entry->page = page;
810 entry->context_depth = G.context_depth;
811 entry->order = order;
812 entry->num_free_objects = num_objects;
813 entry->next_bit_hint = 1;
814
815 G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
816
722#ifdef USING_MALLOC_PAGE_GROUPS
723 entry->group = group;
724 set_page_group_in_use (group, page);
725#endif
726
727 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
728 increment the hint. */
729 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
730 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
731
732 set_page_table_entry (page, entry);
733
734 if (GGC_DEBUG_LEVEL >= 2)
817#ifdef USING_MALLOC_PAGE_GROUPS
818 entry->group = group;
819 set_page_group_in_use (group, page);
820#endif
821
822 /* Set the one-past-the-end in-use bit. This acts as a sentry as we
823 increment the hint. */
824 entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
825 = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
826
827 set_page_table_entry (page, entry);
828
829 if (GGC_DEBUG_LEVEL >= 2)
735 fprintf (G.debug_file,
736 "Allocating page at %p, object size=%ld, data %p-%p\n",
737 (PTR) entry, (long) OBJECT_SIZE (order), page,
830 fprintf (G.debug_file,
831 "Allocating page at %p, object size=%lu, data %p-%p\n",
832 (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
738 page + entry_size - 1);
739
740 return entry;
741}
742
833 page + entry_size - 1);
834
835 return entry;
836}
837
838/* Adjust the size of G.depth so that no index greater than the one
839 used by the top of the G.by_depth is used. */
840
841static inline void
842adjust_depth ()
843{
844 page_entry *top;
845
846 if (G.by_depth_in_use)
847 {
848 top = G.by_depth[G.by_depth_in_use-1];
849
850 /* Peel back indicies in depth that index into by_depth, so that
851 as new elements are added to by_depth, we note the indicies
852 of those elements, if they are for new context depths. */
853 while (G.depth_in_use > (size_t)top->context_depth+1)
854 --G.depth_in_use;
855 }
856}
857
743/* For a page that is no longer needed, put it on the free page list. */
744
745static inline void
746free_page (entry)
747 page_entry *entry;
748{
749 if (GGC_DEBUG_LEVEL >= 2)
858/* For a page that is no longer needed, put it on the free page list. */
859
860static inline void
861free_page (entry)
862 page_entry *entry;
863{
864 if (GGC_DEBUG_LEVEL >= 2)
750 fprintf (G.debug_file,
865 fprintf (G.debug_file,
751 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
752 entry->page, entry->page + entry->bytes - 1);
753
866 "Deallocating page at %p, data %p-%p\n", (PTR) entry,
867 entry->page, entry->page + entry->bytes - 1);
868
869 /* Mark the page as inaccessible. Discard the handle to avoid handle
870 leak. */
871 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
872
754 set_page_table_entry (entry->page, NULL);
755
756#ifdef USING_MALLOC_PAGE_GROUPS
757 clear_page_group_in_use (entry->group, entry->page);
758#endif
759
873 set_page_table_entry (entry->page, NULL);
874
875#ifdef USING_MALLOC_PAGE_GROUPS
876 clear_page_group_in_use (entry->group, entry->page);
877#endif
878
879 if (G.by_depth_in_use > 1)
880 {
881 page_entry *top = G.by_depth[G.by_depth_in_use-1];
882
883 /* If they are at the same depth, put top element into freed
884 slot. */
885 if (entry->context_depth == top->context_depth)
886 {
887 int i = entry->index_by_depth;
888 G.by_depth[i] = top;
889 G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
890 top->index_by_depth = i;
891 }
892 else
893 {
894 /* We cannot free a page from a context deeper than the
895 current one. */
896 abort ();
897 }
898 }
899 --G.by_depth_in_use;
900
901 adjust_depth ();
902
760 entry->next = G.free_pages;
761 G.free_pages = entry;
762}
763
764/* Release the free page cache to the system. */
765
766static void
767release_pages ()

--- 44 unchanged lines hidden (view full) ---

812 pp = &p->next;
813
814 /* Remove all free page groups, and release the storage. */
815 gp = &G.page_groups;
816 while ((g = *gp) != NULL)
817 if (g->in_use == 0)
818 {
819 *gp = g->next;
903 entry->next = G.free_pages;
904 G.free_pages = entry;
905}
906
907/* Release the free page cache to the system. */
908
909static void
910release_pages ()

--- 44 unchanged lines hidden (view full) ---

955 pp = &p->next;
956
957 /* Remove all free page groups, and release the storage. */
958 gp = &G.page_groups;
959 while ((g = *gp) != NULL)
960 if (g->in_use == 0)
961 {
962 *gp = g->next;
820 G.bytes_mapped -= g->alloc_size;
963 G.bytes_mapped -= g->alloc_size;
821 free (g->allocation);
822 }
823 else
824 gp = &g->next;
825#endif
826}
827
828/* This table provides a fast way to determine ceil(log_2(size)) for
829 allocation requests. The minimum allocation size is eight bytes. */
830
964 free (g->allocation);
965 }
966 else
967 gp = &g->next;
968#endif
969}
970
971/* This table provides a fast way to determine ceil(log_2(size)) for
972 allocation requests. The minimum allocation size is eight bytes. */
973
831static unsigned char size_lookup[257] =
974static unsigned char size_lookup[257] =
832{
975{
833 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
834 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
835 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
836 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
837 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
838 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
976 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
977 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
978 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
979 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
980 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
839 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
981 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
840 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
982 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
983 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
841 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
842 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
843 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
844 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
845 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
846 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
847 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
848 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
849 8
850};
851
984 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
985 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
986 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
987 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
988 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
989 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
990 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
991 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
992 8
993};
994
852/* Allocate a chunk of memory of SIZE bytes. If ZERO is non-zero, the
995/* Allocate a chunk of memory of SIZE bytes. If ZERO is nonzero, the
853 memory is zeroed; otherwise, its contents are undefined. */
854
855void *
856ggc_alloc (size)
857 size_t size;
858{
859 unsigned order, word, bit, object_offset;
860 struct page_entry *entry;

--- 13 unchanged lines hidden (view full) ---

874 entry = G.pages[order];
875
876 /* If there is no page for this object size, or all pages in this
877 context are full, allocate a new page. */
878 if (entry == NULL || entry->num_free_objects == 0)
879 {
880 struct page_entry *new_entry;
881 new_entry = alloc_page (order);
996 memory is zeroed; otherwise, its contents are undefined. */
997
998void *
999ggc_alloc (size)
1000 size_t size;
1001{
1002 unsigned order, word, bit, object_offset;
1003 struct page_entry *entry;

--- 13 unchanged lines hidden (view full) ---

1017 entry = G.pages[order];
1018
1019 /* If there is no page for this object size, or all pages in this
1020 context are full, allocate a new page. */
1021 if (entry == NULL || entry->num_free_objects == 0)
1022 {
1023 struct page_entry *new_entry;
1024 new_entry = alloc_page (order);
882
1025
1026 new_entry->index_by_depth = G.by_depth_in_use;
1027 push_by_depth (new_entry, 0);
1028
1029 /* We can skip context depths, if we do, make sure we go all the
1030 way to the new depth. */
1031 while (new_entry->context_depth >= G.depth_in_use)
1032 push_depth (G.by_depth_in_use-1);
1033
883 /* If this is the only entry, it's also the tail. */
884 if (entry == NULL)
885 G.page_tails[order] = new_entry;
1034 /* If this is the only entry, it's also the tail. */
1035 if (entry == NULL)
1036 G.page_tails[order] = new_entry;
886
1037
887 /* Put new pages at the head of the page list. */
888 new_entry->next = entry;
889 entry = new_entry;
890 G.pages[order] = new_entry;
891
892 /* For a new page, we know the word and bit positions (in the
893 in_use bitmap) of the first available object -- they're zero. */
894 new_entry->next_bit_hint = 1;

--- 5 unchanged lines hidden (view full) ---

900 {
901 /* First try to use the hint left from the previous allocation
902 to locate a clear bit in the in-use bitmap. We've made sure
903 that the one-past-the-end bit is always set, so if the hint
904 has run over, this test will fail. */
905 unsigned hint = entry->next_bit_hint;
906 word = hint / HOST_BITS_PER_LONG;
907 bit = hint % HOST_BITS_PER_LONG;
1038 /* Put new pages at the head of the page list. */
1039 new_entry->next = entry;
1040 entry = new_entry;
1041 G.pages[order] = new_entry;
1042
1043 /* For a new page, we know the word and bit positions (in the
1044 in_use bitmap) of the first available object -- they're zero. */
1045 new_entry->next_bit_hint = 1;

--- 5 unchanged lines hidden (view full) ---

1051 {
1052 /* First try to use the hint left from the previous allocation
1053 to locate a clear bit in the in-use bitmap. We've made sure
1054 that the one-past-the-end bit is always set, so if the hint
1055 has run over, this test will fail. */
1056 unsigned hint = entry->next_bit_hint;
1057 word = hint / HOST_BITS_PER_LONG;
1058 bit = hint % HOST_BITS_PER_LONG;
908
1059
909 /* If the hint didn't work, scan the bitmap from the beginning. */
910 if ((entry->in_use_p[word] >> bit) & 1)
911 {
912 word = bit = 0;
913 while (~entry->in_use_p[word] == 0)
914 ++word;
915 while ((entry->in_use_p[word] >> bit) & 1)
916 ++bit;

--- 21 unchanged lines hidden (view full) ---

938 entry->next = NULL;
939 G.page_tails[order]->next = entry;
940 G.page_tails[order] = entry;
941 }
942
943 /* Calculate the object's address. */
944 result = entry->page + object_offset;
945
1060 /* If the hint didn't work, scan the bitmap from the beginning. */
1061 if ((entry->in_use_p[word] >> bit) & 1)
1062 {
1063 word = bit = 0;
1064 while (~entry->in_use_p[word] == 0)
1065 ++word;
1066 while ((entry->in_use_p[word] >> bit) & 1)
1067 ++bit;

--- 21 unchanged lines hidden (view full) ---

1089 entry->next = NULL;
1090 G.page_tails[order]->next = entry;
1091 G.page_tails[order] = entry;
1092 }
1093
1094 /* Calculate the object's address. */
1095 result = entry->page + object_offset;
1096
946#ifdef GGC_POISON
1097#ifdef ENABLE_GC_CHECKING
1098 /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1099 exact same semantics in presence of memory bugs, regardless of
1100 ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1101 handle to avoid handle leak. */
1102 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1103
947 /* `Poison' the entire allocated object, including any padding at
948 the end. */
949 memset (result, 0xaf, OBJECT_SIZE (order));
1104 /* `Poison' the entire allocated object, including any padding at
1105 the end. */
1106 memset (result, 0xaf, OBJECT_SIZE (order));
1107
1108 /* Make the bytes after the end of the object unaccessible. Discard the
1109 handle to avoid handle leak. */
1110 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1111 OBJECT_SIZE (order) - size));
950#endif
951
1112#endif
1113
1114 /* Tell Valgrind that the memory is there, but its content isn't
1115 defined. The bytes at the end of the object are still marked
1116 unaccessible. */
1117 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1118
952 /* Keep track of how many bytes are being allocated. This
953 information is used in deciding when to collect. */
954 G.allocated += OBJECT_SIZE (order);
955
956 if (GGC_DEBUG_LEVEL >= 3)
1119 /* Keep track of how many bytes are being allocated. This
1120 information is used in deciding when to collect. */
1121 G.allocated += OBJECT_SIZE (order);
1122
1123 if (GGC_DEBUG_LEVEL >= 3)
957 fprintf (G.debug_file,
958 "Allocating object, requested size=%ld, actual=%ld at %p on %p\n",
959 (long) size, (long) OBJECT_SIZE (order), result, (PTR) entry);
1124 fprintf (G.debug_file,
1125 "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1126 (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1127 (PTR) entry);
960
961 return result;
962}
963
964/* If P is not marked, marks it and return false. Otherwise return true.
965 P must have been allocated by the GC allocator; it mustn't point to
966 static objects, stack variables, or memory allocated with malloc. */
967

--- 10 unchanged lines hidden (view full) ---

978 entry = lookup_page_table_entry (p);
979#ifdef ENABLE_CHECKING
980 if (entry == NULL)
981 abort ();
982#endif
983
984 /* Calculate the index of the object on the page; this is its bit
985 position in the in_use_p bitmap. */
1128
1129 return result;
1130}
1131
1132/* If P is not marked, marks it and return false. Otherwise return true.
1133 P must have been allocated by the GC allocator; it mustn't point to
1134 static objects, stack variables, or memory allocated with malloc. */
1135

--- 10 unchanged lines hidden (view full) ---

1146 entry = lookup_page_table_entry (p);
1147#ifdef ENABLE_CHECKING
1148 if (entry == NULL)
1149 abort ();
1150#endif
1151
1152 /* Calculate the index of the object on the page; this is its bit
1153 position in the in_use_p bitmap. */
986 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
1154 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
987 word = bit / HOST_BITS_PER_LONG;
988 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1155 word = bit / HOST_BITS_PER_LONG;
1156 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
989
1157
990 /* If the bit was previously set, skip it. */
991 if (entry->in_use_p[word] & mask)
992 return 1;
993
994 /* Otherwise set it, and decrement the free object count. */
995 entry->in_use_p[word] |= mask;
996 entry->num_free_objects -= 1;
997
998 if (GGC_DEBUG_LEVEL >= 4)
999 fprintf (G.debug_file, "Marking %p\n", p);
1000
1001 return 0;
1002}
1003
1158 /* If the bit was previously set, skip it. */
1159 if (entry->in_use_p[word] & mask)
1160 return 1;
1161
1162 /* Otherwise set it, and decrement the free object count. */
1163 entry->in_use_p[word] |= mask;
1164 entry->num_free_objects -= 1;
1165
1166 if (GGC_DEBUG_LEVEL >= 4)
1167 fprintf (G.debug_file, "Marking %p\n", p);
1168
1169 return 0;
1170}
1171
1004/* Return 1 if P has been marked, zero otherwise.
1172/* Return 1 if P has been marked, zero otherwise.
1005 P must have been allocated by the GC allocator; it mustn't point to
1006 static objects, stack variables, or memory allocated with malloc. */
1007
1008int
1009ggc_marked_p (p)
1010 const void *p;
1011{
1012 page_entry *entry;

--- 5 unchanged lines hidden (view full) ---

1018 entry = lookup_page_table_entry (p);
1019#ifdef ENABLE_CHECKING
1020 if (entry == NULL)
1021 abort ();
1022#endif
1023
1024 /* Calculate the index of the object on the page; this is its bit
1025 position in the in_use_p bitmap. */
1173 P must have been allocated by the GC allocator; it mustn't point to
1174 static objects, stack variables, or memory allocated with malloc. */
1175
1176int
1177ggc_marked_p (p)
1178 const void *p;
1179{
1180 page_entry *entry;

--- 5 unchanged lines hidden (view full) ---

1186 entry = lookup_page_table_entry (p);
1187#ifdef ENABLE_CHECKING
1188 if (entry == NULL)
1189 abort ();
1190#endif
1191
1192 /* Calculate the index of the object on the page; this is its bit
1193 position in the in_use_p bitmap. */
1026 bit = (((const char *) p) - entry->page) / OBJECT_SIZE (entry->order);
1194 bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1027 word = bit / HOST_BITS_PER_LONG;
1028 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1195 word = bit / HOST_BITS_PER_LONG;
1196 mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1029
1197
1030 return (entry->in_use_p[word] & mask) != 0;
1031}
1032
1033/* Return the size of the gc-able object P. */
1034
1035size_t
1036ggc_get_size (p)
1037 const void *p;
1038{
1039 page_entry *pe = lookup_page_table_entry (p);
1040 return OBJECT_SIZE (pe->order);
1041}
1042
1198 return (entry->in_use_p[word] & mask) != 0;
1199}
1200
1201/* Return the size of the gc-able object P. */
1202
1203size_t
1204ggc_get_size (p)
1205 const void *p;
1206{
1207 page_entry *pe = lookup_page_table_entry (p);
1208 return OBJECT_SIZE (pe->order);
1209}
1210
1043/* Initialize the ggc-mmap allocator. */
1211/* Subroutine of init_ggc which computes the pair of numbers used to
1212 perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1044
1213
1214 This algorithm is taken from Granlund and Montgomery's paper
1215 "Division by Invariant Integers using Multiplication"
1216 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1217 constants). */
1218
1219static void
1220compute_inverse (order)
1221 unsigned order;
1222{
1223 unsigned size, inv, e;
1224
1225 /* There can be only one object per "page" in a bucket for sizes
1226 larger than half a machine page; it will always have offset zero. */
1227 if (OBJECT_SIZE (order) > G.pagesize/2)
1228 {
1229 if (OBJECTS_PER_PAGE (order) != 1)
1230 abort ();
1231
1232 DIV_MULT (order) = 1;
1233 DIV_SHIFT (order) = 0;
1234 return;
1235 }
1236
1237 size = OBJECT_SIZE (order);
1238 e = 0;
1239 while (size % 2 == 0)
1240 {
1241 e++;
1242 size >>= 1;
1243 }
1244
1245 inv = size;
1246 while (inv * size != 1)
1247 inv = inv * (2 - inv*size);
1248
1249 DIV_MULT (order) = inv;
1250 DIV_SHIFT (order) = e;
1251}
1252
1253/* Initialize the ggc-mmap allocator. */
1045void
1046init_ggc ()
1047{
1048 unsigned order;
1049
1050 G.pagesize = getpagesize();
1051 G.lg_pagesize = exact_log2 (G.pagesize);
1052
1053#ifdef HAVE_MMAP_DEV_ZERO
1054 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1055 if (G.dev_zero_fd == -1)
1254void
1255init_ggc ()
1256{
1257 unsigned order;
1258
1259 G.pagesize = getpagesize();
1260 G.lg_pagesize = exact_log2 (G.pagesize);
1261
1262#ifdef HAVE_MMAP_DEV_ZERO
1263 G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1264 if (G.dev_zero_fd == -1)
1056 abort ();
1265 fatal_io_error ("open /dev/zero");
1057#endif
1058
1059#if 0
1060 G.debug_file = fopen ("ggc-mmap.debug", "w");
1061#else
1062 G.debug_file = stdout;
1063#endif
1064
1266#endif
1267
1268#if 0
1269 G.debug_file = fopen ("ggc-mmap.debug", "w");
1270#else
1271 G.debug_file = stdout;
1272#endif
1273
1065 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1066
1067#ifdef USING_MMAP
1068 /* StunOS has an amazing off-by-one error for the first mmap allocation
1069 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1070 believe, is an unaligned page allocation, which would cause us to
1071 hork badly if we tried to use it. */
1072 {
1073 char *p = alloc_anon (NULL, G.pagesize);
1074 struct page_entry *e;

--- 24 unchanged lines hidden (view full) ---

1099 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1100
1101 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1102 so that we're sure of getting aligned memory. */
1103 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1104 object_size_table[order] = s;
1105 }
1106
1274#ifdef USING_MMAP
1275 /* StunOS has an amazing off-by-one error for the first mmap allocation
1276 after fiddling with RLIMIT_STACK. The result, as hard as it is to
1277 believe, is an unaligned page allocation, which would cause us to
1278 hork badly if we tried to use it. */
1279 {
1280 char *p = alloc_anon (NULL, G.pagesize);
1281 struct page_entry *e;

--- 24 unchanged lines hidden (view full) ---

1306 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1307
1308 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1309 so that we're sure of getting aligned memory. */
1310 s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
1311 object_size_table[order] = s;
1312 }
1313
1107 /* Initialize the objects-per-page table. */
1314 /* Initialize the objects-per-page and inverse tables. */
1108 for (order = 0; order < NUM_ORDERS; ++order)
1109 {
1110 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1111 if (objects_per_page_table[order] == 0)
1112 objects_per_page_table[order] = 1;
1315 for (order = 0; order < NUM_ORDERS; ++order)
1316 {
1317 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1318 if (objects_per_page_table[order] == 0)
1319 objects_per_page_table[order] = 1;
1320 compute_inverse (order);
1113 }
1114
1115 /* Reset the size_lookup array to put appropriately sized objects in
1116 the special orders. All objects bigger than the previous power
1117 of two, but no greater than the special size, should go in the
1118 new order. */
1119 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1120 {
1121 int o;
1122 int i;
1123
1124 o = size_lookup[OBJECT_SIZE (order)];
1125 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1126 size_lookup[i] = order;
1127 }
1321 }
1322
1323 /* Reset the size_lookup array to put appropriately sized objects in
1324 the special orders. All objects bigger than the previous power
1325 of two, but no greater than the special size, should go in the
1326 new order. */
1327 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1328 {
1329 int o;
1330 int i;
1331
1332 o = size_lookup[OBJECT_SIZE (order)];
1333 for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1334 size_lookup[i] = order;
1335 }
1336
1337 G.depth_in_use = 0;
1338 G.depth_max = 10;
1339 G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
1340
1341 G.by_depth_in_use = 0;
1342 G.by_depth_max = INITIAL_PTE_COUNT;
1343 G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1344 G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
1128}
1129
1130/* Increment the `GC context'. Objects allocated in an outer context
1131 are never freed, eliminating the need to register their roots. */
1132
1133void
1134ggc_push_context ()
1135{
1136 ++G.context_depth;
1137
1138 /* Die on wrap. */
1345}
1346
1347/* Increment the `GC context'. Objects allocated in an outer context
1348 are never freed, eliminating the need to register their roots. */
1349
1350void
1351ggc_push_context ()
1352{
1353 ++G.context_depth;
1354
1355 /* Die on wrap. */
1139 if (G.context_depth == 0)
1356 if (G.context_depth >= HOST_BITS_PER_LONG)
1140 abort ();
1141}
1142
1143/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1144 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1145
1146static void
1147ggc_recalculate_in_use_p (p)
1148 page_entry *p;
1149{
1150 unsigned int i;
1151 size_t num_objects;
1152
1357 abort ();
1358}
1359
1360/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1361 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1362
1363static void
1364ggc_recalculate_in_use_p (p)
1365 page_entry *p;
1366{
1367 unsigned int i;
1368 size_t num_objects;
1369
1153 /* Because the past-the-end bit in in_use_p is always set, we
1370 /* Because the past-the-end bit in in_use_p is always set, we
1154 pretend there is one additional object. */
1155 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1156
1157 /* Reset the free object count. */
1158 p->num_free_objects = num_objects;
1159
1160 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1371 pretend there is one additional object. */
1372 num_objects = OBJECTS_PER_PAGE (p->order) + 1;
1373
1374 /* Reset the free object count. */
1375 p->num_free_objects = num_objects;
1376
1377 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1161 for (i = 0;
1378 for (i = 0;
1162 i < CEIL (BITMAP_SIZE (num_objects),
1163 sizeof (*p->in_use_p));
1164 ++i)
1165 {
1166 unsigned long j;
1167
1168 /* Something is in use if it is marked, or if it was in use in a
1169 context further down the context stack. */
1379 i < CEIL (BITMAP_SIZE (num_objects),
1380 sizeof (*p->in_use_p));
1381 ++i)
1382 {
1383 unsigned long j;
1384
1385 /* Something is in use if it is marked, or if it was in use in a
1386 context further down the context stack. */
1170 p->in_use_p[i] |= p->save_in_use_p[i];
1387 p->in_use_p[i] |= save_in_use_p (p)[i];
1171
1172 /* Decrement the free object count for every object allocated. */
1173 for (j = p->in_use_p[i]; j; j >>= 1)
1174 p->num_free_objects -= (j & 1);
1175 }
1176
1177 if (p->num_free_objects >= num_objects)
1178 abort ();
1179}
1180
1388
1389 /* Decrement the free object count for every object allocated. */
1390 for (j = p->in_use_p[i]; j; j >>= 1)
1391 p->num_free_objects -= (j & 1);
1392 }
1393
1394 if (p->num_free_objects >= num_objects)
1395 abort ();
1396}
1397
1181/* Decrement the `GC context'. All objects allocated since the
1398/* Decrement the `GC context'. All objects allocated since the
1182 previous ggc_push_context are migrated to the outer context. */
1183
1184void
1185ggc_pop_context ()
1186{
1399 previous ggc_push_context are migrated to the outer context. */
1400
1401void
1402ggc_pop_context ()
1403{
1187 unsigned order, depth;
1404 unsigned long omask;
1405 unsigned int depth, i, e;
1406#ifdef ENABLE_CHECKING
1407 unsigned int order;
1408#endif
1188
1189 depth = --G.context_depth;
1409
1410 depth = --G.context_depth;
1411 omask = (unsigned long)1 << (depth + 1);
1190
1412
1191 /* Any remaining pages in the popped context are lowered to the new
1192 current context; i.e. objects allocated in the popped context and
1193 left over are imported into the previous context. */
1413 if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1414 return;
1415
1416 G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1417 G.context_depth_allocations &= omask - 1;
1418 G.context_depth_collections &= omask - 1;
1419
1420 /* The G.depth array is shortend so that the last index is the
1421 context_depth of the top element of by_depth. */
1422 if (depth+1 < G.depth_in_use)
1423 e = G.depth[depth+1];
1424 else
1425 e = G.by_depth_in_use;
1426
1427 /* We might not have any PTEs of depth depth. */
1428 if (depth < G.depth_in_use)
1429 {
1430
1431 /* First we go through all the pages at depth depth to
1432 recalculate the in use bits. */
1433 for (i = G.depth[depth]; i < e; ++i)
1434 {
1435 page_entry *p;
1436
1437#ifdef ENABLE_CHECKING
1438 p = G.by_depth[i];
1439
1440 /* Check that all of the pages really are at the depth that
1441 we expect. */
1442 if (p->context_depth != depth)
1443 abort ();
1444 if (p->index_by_depth != i)
1445 abort ();
1446#endif
1447
1448 prefetch (&save_in_use_p_i (i+8));
1449 prefetch (&save_in_use_p_i (i+16));
1450 if (save_in_use_p_i (i))
1451 {
1452 p = G.by_depth[i];
1453 ggc_recalculate_in_use_p (p);
1454 free (save_in_use_p_i (i));
1455 save_in_use_p_i (i) = 0;
1456 }
1457 }
1458 }
1459
1460 /* Then, we reset all page_entries with a depth greater than depth
1461 to be at depth. */
1462 for (i = e; i < G.by_depth_in_use; ++i)
1463 {
1464 page_entry *p = G.by_depth[i];
1465
1466 /* Check that all of the pages really are at the depth we
1467 expect. */
1468#ifdef ENABLE_CHECKING
1469 if (p->context_depth <= depth)
1470 abort ();
1471 if (p->index_by_depth != i)
1472 abort ();
1473#endif
1474 p->context_depth = depth;
1475 }
1476
1477 adjust_depth ();
1478
1479#ifdef ENABLE_CHECKING
1194 for (order = 2; order < NUM_ORDERS; order++)
1195 {
1196 page_entry *p;
1197
1198 for (p = G.pages[order]; p != NULL; p = p->next)
1199 {
1200 if (p->context_depth > depth)
1480 for (order = 2; order < NUM_ORDERS; order++)
1481 {
1482 page_entry *p;
1483
1484 for (p = G.pages[order]; p != NULL; p = p->next)
1485 {
1486 if (p->context_depth > depth)
1201 p->context_depth = depth;
1202
1203 /* If this page is now in the topmost context, and we'd
1204 saved its allocation state, restore it. */
1205 else if (p->context_depth == depth && p->save_in_use_p)
1206 {
1207 ggc_recalculate_in_use_p (p);
1208 free (p->save_in_use_p);
1209 p->save_in_use_p = 0;
1210 }
1487 abort ();
1488 else if (p->context_depth == depth && save_in_use_p (p))
1489 abort ();
1211 }
1212 }
1490 }
1491 }
1492#endif
1213}
1214
1215/* Unmark all objects. */
1216
1217static inline void
1218clear_marks ()
1219{
1220 unsigned order;

--- 12 unchanged lines hidden (view full) ---

1233 abort ();
1234#endif
1235
1236 /* Pages that aren't in the topmost context are not collected;
1237 nevertheless, we need their in-use bit vectors to store GC
1238 marks. So, back them up first. */
1239 if (p->context_depth < G.context_depth)
1240 {
1493}
1494
1495/* Unmark all objects. */
1496
1497static inline void
1498clear_marks ()
1499{
1500 unsigned order;

--- 12 unchanged lines hidden (view full) ---

1513 abort ();
1514#endif
1515
1516 /* Pages that aren't in the topmost context are not collected;
1517 nevertheless, we need their in-use bit vectors to store GC
1518 marks. So, back them up first. */
1519 if (p->context_depth < G.context_depth)
1520 {
1241 if (! p->save_in_use_p)
1242 p->save_in_use_p = xmalloc (bitmap_size);
1243 memcpy (p->save_in_use_p, p->in_use_p, bitmap_size);
1521 if (! save_in_use_p (p))
1522 save_in_use_p (p) = xmalloc (bitmap_size);
1523 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1244 }
1245
1246 /* Reset reset the number of free objects and clear the
1247 in-use bits. These will be adjusted by mark_obj. */
1248 p->num_free_objects = num_objects;
1249 memset (p->in_use_p, 0, bitmap_size);
1250
1251 /* Make sure the one-past-the-end bit is always set. */
1524 }
1525
1526 /* Reset reset the number of free objects and clear the
1527 in-use bits. These will be adjusted by mark_obj. */
1528 p->num_free_objects = num_objects;
1529 memset (p->in_use_p, 0, bitmap_size);
1530
1531 /* Make sure the one-past-the-end bit is always set. */
1252 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1532 p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1253 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1254 }
1255 }
1256}
1257
1258/* Free all empty pages. Partially empty pages need no attention
1259 because the `mark' bit doubles as an `unused' bit. */
1260

--- 7 unchanged lines hidden (view full) ---

1268 /* The last page-entry to consider, regardless of entries
1269 placed at the end of the list. */
1270 page_entry * const last = G.page_tails[order];
1271
1272 size_t num_objects = OBJECTS_PER_PAGE (order);
1273 size_t live_objects;
1274 page_entry *p, *previous;
1275 int done;
1533 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1534 }
1535 }
1536}
1537
1538/* Free all empty pages. Partially empty pages need no attention
1539 because the `mark' bit doubles as an `unused' bit. */
1540

--- 7 unchanged lines hidden (view full) ---

1548 /* The last page-entry to consider, regardless of entries
1549 placed at the end of the list. */
1550 page_entry * const last = G.page_tails[order];
1551
1552 size_t num_objects = OBJECTS_PER_PAGE (order);
1553 size_t live_objects;
1554 page_entry *p, *previous;
1555 int done;
1276
1556
1277 p = G.pages[order];
1278 if (p == NULL)
1279 continue;
1280
1281 previous = NULL;
1282 do
1283 {
1284 page_entry *next = p->next;

--- 61 unchanged lines hidden (view full) ---

1346 /* Are we moving the last element? */
1347 if (G.page_tails[order] == p)
1348 G.page_tails[order] = previous;
1349 p = previous;
1350 }
1351
1352 previous = p;
1353 p = next;
1557 p = G.pages[order];
1558 if (p == NULL)
1559 continue;
1560
1561 previous = NULL;
1562 do
1563 {
1564 page_entry *next = p->next;

--- 61 unchanged lines hidden (view full) ---

1626 /* Are we moving the last element? */
1627 if (G.page_tails[order] == p)
1628 G.page_tails[order] = previous;
1629 p = previous;
1630 }
1631
1632 previous = p;
1633 p = next;
1354 }
1634 }
1355 while (! done);
1356
1357 /* Now, restore the in_use_p vectors for any pages from contexts
1358 other than the current one. */
1359 for (p = G.pages[order]; p; p = p->next)
1360 if (p->context_depth != G.context_depth)
1361 ggc_recalculate_in_use_p (p);
1362 }
1363}
1364
1635 while (! done);
1636
1637 /* Now, restore the in_use_p vectors for any pages from contexts
1638 other than the current one. */
1639 for (p = G.pages[order]; p; p = p->next)
1640 if (p->context_depth != G.context_depth)
1641 ggc_recalculate_in_use_p (p);
1642 }
1643}
1644
1365#ifdef GGC_POISON
1645#ifdef ENABLE_GC_CHECKING
1366/* Clobber all free objects. */
1367
1368static inline void
1369poison_pages ()
1370{
1371 unsigned order;
1372
1373 for (order = 2; order < NUM_ORDERS; order++)

--- 14 unchanged lines hidden (view full) ---

1388 continue;
1389
1390 for (i = 0; i < num_objects; i++)
1391 {
1392 size_t word, bit;
1393 word = i / HOST_BITS_PER_LONG;
1394 bit = i % HOST_BITS_PER_LONG;
1395 if (((p->in_use_p[word] >> bit) & 1) == 0)
1646/* Clobber all free objects. */
1647
1648static inline void
1649poison_pages ()
1650{
1651 unsigned order;
1652
1653 for (order = 2; order < NUM_ORDERS; order++)

--- 14 unchanged lines hidden (view full) ---

1668 continue;
1669
1670 for (i = 0; i < num_objects; i++)
1671 {
1672 size_t word, bit;
1673 word = i / HOST_BITS_PER_LONG;
1674 bit = i % HOST_BITS_PER_LONG;
1675 if (((p->in_use_p[word] >> bit) & 1) == 0)
1396 memset (p->page + i * size, 0xa5, size);
1676 {
1677 char *object = p->page + i * size;
1678
1679 /* Keep poison-by-write when we expect to use Valgrind,
1680 so the exact same memory semantics is kept, in case
1681 there are memory errors. We override this request
1682 below. */
1683 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1684 memset (object, 0xa5, size);
1685
1686 /* Drop the handle to avoid handle leak. */
1687 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1688 }
1397 }
1398 }
1399 }
1400}
1401#endif
1402
1403/* Top level mark-and-sweep routine. */
1404
1405void
1406ggc_collect ()
1407{
1408 /* Avoid frequent unnecessary work by skipping collection if the
1409 total allocations haven't expanded much since the last
1410 collection. */
1689 }
1690 }
1691 }
1692}
1693#endif
1694
1695/* Top level mark-and-sweep routine. */
1696
1697void
1698ggc_collect ()
1699{
1700 /* Avoid frequent unnecessary work by skipping collection if the
1701 total allocations haven't expanded much since the last
1702 collection. */
1411#ifndef GGC_ALWAYS_COLLECT
1412 if (G.allocated < GGC_MIN_EXPAND_FOR_GC * G.allocated_last_gc)
1703 float allocated_last_gc =
1704 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1705
1706 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1707
1708 if (G.allocated < allocated_last_gc + min_expand)
1413 return;
1709 return;
1414#endif
1415
1416 timevar_push (TV_GC);
1417 if (!quiet_flag)
1418 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1419
1420 /* Zero the total allocated bytes. This will be recalculated in the
1421 sweep phase. */
1422 G.allocated = 0;
1423
1710
1711 timevar_push (TV_GC);
1712 if (!quiet_flag)
1713 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1714
1715 /* Zero the total allocated bytes. This will be recalculated in the
1716 sweep phase. */
1717 G.allocated = 0;
1718
1424 /* Release the pages we freed the last time we collected, but didn't
1719 /* Release the pages we freed the last time we collected, but didn't
1425 reuse in the interim. */
1426 release_pages ();
1427
1720 reuse in the interim. */
1721 release_pages ();
1722
1723 /* Indicate that we've seen collections at this context depth. */
1724 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1725
1428 clear_marks ();
1429 ggc_mark_roots ();
1726 clear_marks ();
1727 ggc_mark_roots ();
1430
1431#ifdef GGC_POISON
1728
1729#ifdef ENABLE_GC_CHECKING
1432 poison_pages ();
1433#endif
1434
1435 sweep_pages ();
1436
1437 G.allocated_last_gc = G.allocated;
1730 poison_pages ();
1731#endif
1732
1733 sweep_pages ();
1734
1735 G.allocated_last_gc = G.allocated;
1438 if (G.allocated_last_gc < GGC_MIN_LAST_ALLOCATED)
1439 G.allocated_last_gc = GGC_MIN_LAST_ALLOCATED;
1440
1441 timevar_pop (TV_GC);
1442
1443 if (!quiet_flag)
1444 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1445}
1446
1447/* Print allocation statistics. */

--- 8 unchanged lines hidden (view full) ---

1456ggc_print_statistics ()
1457{
1458 struct ggc_statistics stats;
1459 unsigned int i;
1460 size_t total_overhead = 0;
1461
1462 /* Clear the statistics. */
1463 memset (&stats, 0, sizeof (stats));
1736
1737 timevar_pop (TV_GC);
1738
1739 if (!quiet_flag)
1740 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1741}
1742
1743/* Print allocation statistics. */

--- 8 unchanged lines hidden (view full) ---

1752ggc_print_statistics ()
1753{
1754 struct ggc_statistics stats;
1755 unsigned int i;
1756 size_t total_overhead = 0;
1757
1758 /* Clear the statistics. */
1759 memset (&stats, 0, sizeof (stats));
1464
1760
1465 /* Make sure collection will really occur. */
1466 G.allocated_last_gc = 0;
1467
1468 /* Collect and print the statistics common across collectors. */
1469 ggc_print_common_statistics (stderr, &stats);
1470
1471 /* Release free pages so that we will not count the bytes allocated
1472 there as part of the total allocated memory. */
1473 release_pages ();
1474
1761 /* Make sure collection will really occur. */
1762 G.allocated_last_gc = 0;
1763
1764 /* Collect and print the statistics common across collectors. */
1765 ggc_print_common_statistics (stderr, &stats);
1766
1767 /* Release free pages so that we will not count the bytes allocated
1768 there as part of the total allocated memory. */
1769 release_pages ();
1770
1475 /* Collect some information about the various sizes of
1771 /* Collect some information about the various sizes of
1476 allocation. */
1477 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1478 "Size", "Allocated", "Used", "Overhead");
1479 for (i = 0; i < NUM_ORDERS; ++i)
1480 {
1481 page_entry *p;
1482 size_t allocated;
1483 size_t in_use;

--- 6 unchanged lines hidden (view full) ---

1490 overhead = allocated = in_use = 0;
1491
1492 /* Figure out the total number of bytes allocated for objects of
1493 this size, and how many of them are actually in use. Also figure
1494 out how much memory the page table is using. */
1495 for (p = G.pages[i]; p; p = p->next)
1496 {
1497 allocated += p->bytes;
1772 allocation. */
1773 fprintf (stderr, "\n%-5s %10s %10s %10s\n",
1774 "Size", "Allocated", "Used", "Overhead");
1775 for (i = 0; i < NUM_ORDERS; ++i)
1776 {
1777 page_entry *p;
1778 size_t allocated;
1779 size_t in_use;

--- 6 unchanged lines hidden (view full) ---

1786 overhead = allocated = in_use = 0;
1787
1788 /* Figure out the total number of bytes allocated for objects of
1789 this size, and how many of them are actually in use. Also figure
1790 out how much memory the page table is using. */
1791 for (p = G.pages[i]; p; p = p->next)
1792 {
1793 allocated += p->bytes;
1498 in_use +=
1794 in_use +=
1499 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1500
1501 overhead += (sizeof (page_entry) - sizeof (long)
1502 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1503 }
1795 (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
1796
1797 overhead += (sizeof (page_entry) - sizeof (long)
1798 + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
1799 }
1504 fprintf (stderr, "%-5d %10ld%c %10ld%c %10ld%c\n", OBJECT_SIZE (i),
1800 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1801 (unsigned long) OBJECT_SIZE (i),
1505 SCALE (allocated), LABEL (allocated),
1506 SCALE (in_use), LABEL (in_use),
1507 SCALE (overhead), LABEL (overhead));
1508 total_overhead += overhead;
1509 }
1802 SCALE (allocated), LABEL (allocated),
1803 SCALE (in_use), LABEL (in_use),
1804 SCALE (overhead), LABEL (overhead));
1805 total_overhead += overhead;
1806 }
1510 fprintf (stderr, "%-5s %10ld%c %10ld%c %10ld%c\n", "Total",
1807 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1511 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1512 SCALE (G.allocated), LABEL(G.allocated),
1513 SCALE (total_overhead), LABEL (total_overhead));
1514}
1808 SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
1809 SCALE (G.allocated), LABEL(G.allocated),
1810 SCALE (total_overhead), LABEL (total_overhead));
1811}