1/* "Bag-of-pages" garbage collector for the GNU compiler.
2   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3   Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA.  */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "tree.h"
27#include "rtl.h"
28#include "tm_p.h"
29#include "toplev.h"
30#include "flags.h"
31#include "ggc.h"
32#include "timevar.h"
33#include "params.h"
34#include "tree-flow.h"
35#ifdef ENABLE_VALGRIND_CHECKING
36# ifdef HAVE_VALGRIND_MEMCHECK_H
37#  include <valgrind/memcheck.h>
38# elif defined HAVE_MEMCHECK_H
39#  include <memcheck.h>
40# else
41#  include <valgrind.h>
42# endif
43#else
44/* Avoid #ifdef:s when we can help it.  */
45#define VALGRIND_DISCARD(x)
46#endif
47
48/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
49   file open.  Prefer either to valloc.  */
50#ifdef HAVE_MMAP_ANON
51# undef HAVE_MMAP_DEV_ZERO
52
53# include <sys/mman.h>
54# ifndef MAP_FAILED
55#  define MAP_FAILED -1
56# endif
57# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
58#  define MAP_ANONYMOUS MAP_ANON
59# endif
60# define USING_MMAP
61
62#endif
63
64#ifdef HAVE_MMAP_DEV_ZERO
65
66# include <sys/mman.h>
67# ifndef MAP_FAILED
68#  define MAP_FAILED -1
69# endif
70# define USING_MMAP
71
72#endif
73
74#ifndef USING_MMAP
75#define USING_MALLOC_PAGE_GROUPS
76#endif
77
78/* Strategy:
79
80   This garbage-collecting allocator allocates objects on one of a set
81   of pages.  Each page can allocate objects of a single size only;
82   available sizes are powers of two starting at four bytes.  The size
83   of an allocation request is rounded up to the next power of two
84   (`order'), and satisfied from the appropriate page.
85
86   Each page is recorded in a page-entry, which also maintains an
87   in-use bitmap of object positions on the page.  This allows the
88   allocation state of a particular object to be flipped without
89   touching the page itself.
90
91   Each page-entry also has a context depth, which is used to track
92   pushing and popping of allocation contexts.  Only objects allocated
93   in the current (highest-numbered) context may be collected.
94
95   Page entries are arranged in an array of singly-linked lists.  The
96   array is indexed by the allocation size, in bits, of the pages on
97   it; i.e. all pages on a list allocate objects of the same size.
98   Pages are ordered on the list such that all non-full pages precede
99   all full pages, with non-full pages arranged in order of decreasing
100   context depth.
101
102   Empty pages (of all orders) are kept on a single page cache list,
103   and are considered first when new pages are required; they are
104   deallocated at the start of the next collection if they haven't
105   been recycled by then.  */
106
107/* Define GGC_DEBUG_LEVEL to print debugging information.
108     0: No debugging output.
109     1: GC statistics only.
110     2: Page-entry allocations/deallocations as well.
111     3: Object allocations as well.
112     4: Object marks as well.  */
113#define GGC_DEBUG_LEVEL (0)
114
115#ifndef HOST_BITS_PER_PTR
116#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
117#endif
118
119
120/* A two-level tree is used to look up the page-entry for a given
121   pointer.  Two chunks of the pointer's bits are extracted to index
122   the first and second levels of the tree, as follows:
123
124				   HOST_PAGE_SIZE_BITS
125			   32		|      |
126       msb +----------------+----+------+------+ lsb
127			    |    |      |
128			 PAGE_L1_BITS   |
129				 |      |
130			       PAGE_L2_BITS
131
132   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
133   pages are aligned on system page boundaries.  The next most
134   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
135   index values in the lookup table, respectively.
136
137   For 32-bit architectures and the settings below, there are no
138   leftover bits.  For architectures with wider pointers, the lookup
139   tree points to a list of pages, which must be scanned to find the
140   correct one.  */
141
142#define PAGE_L1_BITS	(8)
143#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
144#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
145#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
146
147#define LOOKUP_L1(p) \
148  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149
150#define LOOKUP_L2(p) \
151  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152
153/* The number of objects per allocation page, for objects on a page of
154   the indicated ORDER.  */
155#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156
157/* The number of objects in P.  */
158#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159
160/* The size of an object on a page of the indicated ORDER.  */
161#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162
163/* For speed, we avoid doing a general integer divide to locate the
164   offset in the allocation bitmap, by precalculating numbers M, S
165   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166   within the page which is evenly divisible by the object size Z.  */
167#define DIV_MULT(ORDER) inverse_table[ORDER].mult
168#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169#define OFFSET_TO_BIT(OFFSET, ORDER) \
170  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171
172/* The number of extra orders, not corresponding to power-of-two sized
173   objects.  */
174
175#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
176
177#define RTL_SIZE(NSLOTS) \
178  (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
179
180#define TREE_EXP_SIZE(OPS) \
181  (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182
183/* The Ith entry is the maximum size of an object to be stored in the
184   Ith extra order.  Adding a new entry to this array is the *only*
185   thing you need to do to add a new special allocation size.  */
186
187static const size_t extra_order_size_table[] = {
188  sizeof (struct stmt_ann_d),
189  sizeof (struct var_ann_d),
190  sizeof (struct tree_decl_non_common),
191  sizeof (struct tree_field_decl),
192  sizeof (struct tree_parm_decl),
193  sizeof (struct tree_var_decl),
194  sizeof (struct tree_list),
195  sizeof (struct tree_ssa_name),
196  sizeof (struct function),
197  sizeof (struct basic_block_def),
198  sizeof (bitmap_element),
199  /* PHI nodes with one to three arguments are already covered by the
200     above sizes.  */
201  sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3,
202  TREE_EXP_SIZE (2),
203  RTL_SIZE (2),			/* MEM, PLUS, etc.  */
204  RTL_SIZE (9),			/* INSN */
205};
206
207/* The total number of orders.  */
208
209#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
210
211/* We use this structure to determine the alignment required for
212   allocations.  For power-of-two sized allocations, that's not a
213   problem, but it does matter for odd-sized allocations.  */
214
215struct max_alignment {
216  char c;
217  union {
218    HOST_WIDEST_INT i;
219    long double d;
220  } u;
221};
222
223/* The biggest alignment required.  */
224
225#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
226
227/* Compute the smallest nonnegative number which when added to X gives
228   a multiple of F.  */
229
230#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
231
232/* Compute the smallest multiple of F that is >= X.  */
233
234#define ROUND_UP(x, f) (CEIL (x, f) * (f))
235
236/* The Ith entry is the number of objects on a page or order I.  */
237
238static unsigned objects_per_page_table[NUM_ORDERS];
239
240/* The Ith entry is the size of an object on a page of order I.  */
241
242static size_t object_size_table[NUM_ORDERS];
243
244/* The Ith entry is a pair of numbers (mult, shift) such that
245   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
246   for all k evenly divisible by OBJECT_SIZE(I).  */
247
248static struct
249{
250  size_t mult;
251  unsigned int shift;
252}
253inverse_table[NUM_ORDERS];
254
255/* A page_entry records the status of an allocation page.  This
256   structure is dynamically sized to fit the bitmap in_use_p.  */
257typedef struct page_entry
258{
259  /* The next page-entry with objects of the same size, or NULL if
260     this is the last page-entry.  */
261  struct page_entry *next;
262
263  /* The previous page-entry with objects of the same size, or NULL if
264     this is the first page-entry.   The PREV pointer exists solely to
265     keep the cost of ggc_free manageable.  */
266  struct page_entry *prev;
267
268  /* The number of bytes allocated.  (This will always be a multiple
269     of the host system page size.)  */
270  size_t bytes;
271
272  /* The address at which the memory is allocated.  */
273  char *page;
274
275#ifdef USING_MALLOC_PAGE_GROUPS
276  /* Back pointer to the page group this page came from.  */
277  struct page_group *group;
278#endif
279
280  /* This is the index in the by_depth varray where this page table
281     can be found.  */
282  unsigned long index_by_depth;
283
284  /* Context depth of this page.  */
285  unsigned short context_depth;
286
287  /* The number of free objects remaining on this page.  */
288  unsigned short num_free_objects;
289
290  /* A likely candidate for the bit position of a free object for the
291     next allocation from this page.  */
292  unsigned short next_bit_hint;
293
294  /* The lg of size of objects allocated from this page.  */
295  unsigned char order;
296
297  /* A bit vector indicating whether or not objects are in use.  The
298     Nth bit is one if the Nth object on this page is allocated.  This
299     array is dynamically sized.  */
300  unsigned long in_use_p[1];
301} page_entry;
302
303#ifdef USING_MALLOC_PAGE_GROUPS
304/* A page_group describes a large allocation from malloc, from which
305   we parcel out aligned pages.  */
306typedef struct page_group
307{
308  /* A linked list of all extant page groups.  */
309  struct page_group *next;
310
311  /* The address we received from malloc.  */
312  char *allocation;
313
314  /* The size of the block.  */
315  size_t alloc_size;
316
317  /* A bitmask of pages in use.  */
318  unsigned int in_use;
319} page_group;
320#endif
321
322#if HOST_BITS_PER_PTR <= 32
323
324/* On 32-bit hosts, we use a two level page table, as pictured above.  */
325typedef page_entry **page_table[PAGE_L1_SIZE];
326
327#else
328
329/* On 64-bit hosts, we use the same two level page tables plus a linked
330   list that disambiguates the top 32-bits.  There will almost always be
331   exactly one entry in the list.  */
332typedef struct page_table_chain
333{
334  struct page_table_chain *next;
335  size_t high_bits;
336  page_entry **table[PAGE_L1_SIZE];
337} *page_table;
338
339#endif
340
341/* The rest of the global variables.  */
342static struct globals
343{
344  /* The Nth element in this array is a page with objects of size 2^N.
345     If there are any pages with free objects, they will be at the
346     head of the list.  NULL if there are no page-entries for this
347     object size.  */
348  page_entry *pages[NUM_ORDERS];
349
350  /* The Nth element in this array is the last page with objects of
351     size 2^N.  NULL if there are no page-entries for this object
352     size.  */
353  page_entry *page_tails[NUM_ORDERS];
354
355  /* Lookup table for associating allocation pages with object addresses.  */
356  page_table lookup;
357
358  /* The system's page size.  */
359  size_t pagesize;
360  size_t lg_pagesize;
361
362  /* Bytes currently allocated.  */
363  size_t allocated;
364
365  /* Bytes currently allocated at the end of the last collection.  */
366  size_t allocated_last_gc;
367
368  /* Total amount of memory mapped.  */
369  size_t bytes_mapped;
370
371  /* Bit N set if any allocations have been done at context depth N.  */
372  unsigned long context_depth_allocations;
373
374  /* Bit N set if any collections have been done at context depth N.  */
375  unsigned long context_depth_collections;
376
377  /* The current depth in the context stack.  */
378  unsigned short context_depth;
379
380  /* A file descriptor open to /dev/zero for reading.  */
381#if defined (HAVE_MMAP_DEV_ZERO)
382  int dev_zero_fd;
383#endif
384
385  /* A cache of free system pages.  */
386  page_entry *free_pages;
387
388#ifdef USING_MALLOC_PAGE_GROUPS
389  page_group *page_groups;
390#endif
391
392  /* The file descriptor for debugging output.  */
393  FILE *debug_file;
394
395  /* Current number of elements in use in depth below.  */
396  unsigned int depth_in_use;
397
398  /* Maximum number of elements that can be used before resizing.  */
399  unsigned int depth_max;
400
401  /* Each element of this arry is an index in by_depth where the given
402     depth starts.  This structure is indexed by that given depth we
403     are interested in.  */
404  unsigned int *depth;
405
406  /* Current number of elements in use in by_depth below.  */
407  unsigned int by_depth_in_use;
408
409  /* Maximum number of elements that can be used before resizing.  */
410  unsigned int by_depth_max;
411
412  /* Each element of this array is a pointer to a page_entry, all
413     page_entries can be found in here by increasing depth.
414     index_by_depth in the page_entry is the index into this data
415     structure where that page_entry can be found.  This is used to
416     speed up finding all page_entries at a particular depth.  */
417  page_entry **by_depth;
418
419  /* Each element is a pointer to the saved in_use_p bits, if any,
420     zero otherwise.  We allocate them all together, to enable a
421     better runtime data access pattern.  */
422  unsigned long **save_in_use;
423
424#ifdef ENABLE_GC_ALWAYS_COLLECT
425  /* List of free objects to be verified as actually free on the
426     next collection.  */
427  struct free_object
428  {
429    void *object;
430    struct free_object *next;
431  } *free_object_list;
432#endif
433
434#ifdef GATHER_STATISTICS
435  struct
436  {
437    /* Total memory allocated with ggc_alloc.  */
438    unsigned long long total_allocated;
439    /* Total overhead for memory to be allocated with ggc_alloc.  */
440    unsigned long long total_overhead;
441
442    /* Total allocations and overhead for sizes less than 32, 64 and 128.
443       These sizes are interesting because they are typical cache line
444       sizes.  */
445
446    unsigned long long total_allocated_under32;
447    unsigned long long total_overhead_under32;
448
449    unsigned long long total_allocated_under64;
450    unsigned long long total_overhead_under64;
451
452    unsigned long long total_allocated_under128;
453    unsigned long long total_overhead_under128;
454
455    /* The allocations for each of the allocation orders.  */
456    unsigned long long total_allocated_per_order[NUM_ORDERS];
457
458    /* The overhead for each of the allocation orders.  */
459    unsigned long long total_overhead_per_order[NUM_ORDERS];
460  } stats;
461#endif
462} G;
463
464/* The size in bytes required to maintain a bitmap for the objects
465   on a page-entry.  */
466#define BITMAP_SIZE(Num_objects) \
467  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
468
469/* Allocate pages in chunks of this size, to throttle calls to memory
470   allocation routines.  The first page is used, the rest go onto the
471   free list.  This cannot be larger than HOST_BITS_PER_INT for the
472   in_use bitmask for page_group.  Hosts that need a different value
473   can override this by defining GGC_QUIRE_SIZE explicitly.  */
474#ifndef GGC_QUIRE_SIZE
475# ifdef USING_MMAP
476#  define GGC_QUIRE_SIZE 256
477# else
478#  define GGC_QUIRE_SIZE 16
479# endif
480#endif
481
482/* Initial guess as to how many page table entries we might need.  */
483#define INITIAL_PTE_COUNT 128
484
485static int ggc_allocated_p (const void *);
486static page_entry *lookup_page_table_entry (const void *);
487static void set_page_table_entry (void *, page_entry *);
488#ifdef USING_MMAP
489static char *alloc_anon (char *, size_t);
490#endif
491#ifdef USING_MALLOC_PAGE_GROUPS
492static size_t page_group_index (char *, char *);
493static void set_page_group_in_use (page_group *, char *);
494static void clear_page_group_in_use (page_group *, char *);
495#endif
496static struct page_entry * alloc_page (unsigned);
497static void free_page (struct page_entry *);
498static void release_pages (void);
499static void clear_marks (void);
500static void sweep_pages (void);
501static void ggc_recalculate_in_use_p (page_entry *);
502static void compute_inverse (unsigned);
503static inline void adjust_depth (void);
504static void move_ptes_to_front (int, int);
505
506void debug_print_page_list (int);
507static void push_depth (unsigned int);
508static void push_by_depth (page_entry *, unsigned long *);
509
510/* Push an entry onto G.depth.  */
511
512inline static void
513push_depth (unsigned int i)
514{
515  if (G.depth_in_use >= G.depth_max)
516    {
517      G.depth_max *= 2;
518      G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
519    }
520  G.depth[G.depth_in_use++] = i;
521}
522
523/* Push an entry onto G.by_depth and G.save_in_use.  */
524
525inline static void
526push_by_depth (page_entry *p, unsigned long *s)
527{
528  if (G.by_depth_in_use >= G.by_depth_max)
529    {
530      G.by_depth_max *= 2;
531      G.by_depth = xrealloc (G.by_depth,
532			     G.by_depth_max * sizeof (page_entry *));
533      G.save_in_use = xrealloc (G.save_in_use,
534				G.by_depth_max * sizeof (unsigned long *));
535    }
536  G.by_depth[G.by_depth_in_use] = p;
537  G.save_in_use[G.by_depth_in_use++] = s;
538}
539
540#if (GCC_VERSION < 3001)
541#define prefetch(X) ((void) X)
542#else
543#define prefetch(X) __builtin_prefetch (X)
544#endif
545
546#define save_in_use_p_i(__i) \
547  (G.save_in_use[__i])
548#define save_in_use_p(__p) \
549  (save_in_use_p_i (__p->index_by_depth))
550
551/* Returns nonzero if P was allocated in GC'able memory.  */
552
553static inline int
554ggc_allocated_p (const void *p)
555{
556  page_entry ***base;
557  size_t L1, L2;
558
559#if HOST_BITS_PER_PTR <= 32
560  base = &G.lookup[0];
561#else
562  page_table table = G.lookup;
563  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564  while (1)
565    {
566      if (table == NULL)
567	return 0;
568      if (table->high_bits == high_bits)
569	break;
570      table = table->next;
571    }
572  base = &table->table[0];
573#endif
574
575  /* Extract the level 1 and 2 indices.  */
576  L1 = LOOKUP_L1 (p);
577  L2 = LOOKUP_L2 (p);
578
579  return base[L1] && base[L1][L2];
580}
581
582/* Traverse the page table and find the entry for a page.
583   Die (probably) if the object wasn't allocated via GC.  */
584
585static inline page_entry *
586lookup_page_table_entry (const void *p)
587{
588  page_entry ***base;
589  size_t L1, L2;
590
591#if HOST_BITS_PER_PTR <= 32
592  base = &G.lookup[0];
593#else
594  page_table table = G.lookup;
595  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
596  while (table->high_bits != high_bits)
597    table = table->next;
598  base = &table->table[0];
599#endif
600
601  /* Extract the level 1 and 2 indices.  */
602  L1 = LOOKUP_L1 (p);
603  L2 = LOOKUP_L2 (p);
604
605  return base[L1][L2];
606}
607
608/* Set the page table entry for a page.  */
609
610static void
611set_page_table_entry (void *p, page_entry *entry)
612{
613  page_entry ***base;
614  size_t L1, L2;
615
616#if HOST_BITS_PER_PTR <= 32
617  base = &G.lookup[0];
618#else
619  page_table table;
620  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
621  for (table = G.lookup; table; table = table->next)
622    if (table->high_bits == high_bits)
623      goto found;
624
625  /* Not found -- allocate a new table.  */
626  table = xcalloc (1, sizeof(*table));
627  table->next = G.lookup;
628  table->high_bits = high_bits;
629  G.lookup = table;
630found:
631  base = &table->table[0];
632#endif
633
634  /* Extract the level 1 and 2 indices.  */
635  L1 = LOOKUP_L1 (p);
636  L2 = LOOKUP_L2 (p);
637
638  if (base[L1] == NULL)
639    base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
640
641  base[L1][L2] = entry;
642}
643
644/* Prints the page-entry for object size ORDER, for debugging.  */
645
646void
647debug_print_page_list (int order)
648{
649  page_entry *p;
650  printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
651	  (void *) G.page_tails[order]);
652  p = G.pages[order];
653  while (p != NULL)
654    {
655      printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
656	      p->num_free_objects);
657      p = p->next;
658    }
659  printf ("NULL\n");
660  fflush (stdout);
661}
662
663#ifdef USING_MMAP
664/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
665   (if non-null).  The ifdef structure here is intended to cause a
666   compile error unless exactly one of the HAVE_* is defined.  */
667
668static inline char *
669alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
670{
671#ifdef HAVE_MMAP_ANON
672  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
673		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
674#endif
675#ifdef HAVE_MMAP_DEV_ZERO
676  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
677		     MAP_PRIVATE, G.dev_zero_fd, 0);
678#endif
679
680  if (page == (char *) MAP_FAILED)
681    {
682      perror ("virtual memory exhausted");
683      exit (FATAL_EXIT_CODE);
684    }
685
686  /* Remember that we allocated this memory.  */
687  G.bytes_mapped += size;
688
689  /* Pretend we don't have access to the allocated pages.  We'll enable
690     access to smaller pieces of the area in ggc_alloc.  Discard the
691     handle to avoid handle leak.  */
692  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
693
694  return page;
695}
696#endif
697#ifdef USING_MALLOC_PAGE_GROUPS
698/* Compute the index for this page into the page group.  */
699
700static inline size_t
701page_group_index (char *allocation, char *page)
702{
703  return (size_t) (page - allocation) >> G.lg_pagesize;
704}
705
706/* Set and clear the in_use bit for this page in the page group.  */
707
708static inline void
709set_page_group_in_use (page_group *group, char *page)
710{
711  group->in_use |= 1 << page_group_index (group->allocation, page);
712}
713
714static inline void
715clear_page_group_in_use (page_group *group, char *page)
716{
717  group->in_use &= ~(1 << page_group_index (group->allocation, page));
718}
719#endif
720
721/* Allocate a new page for allocating objects of size 2^ORDER,
722   and return an entry for it.  The entry is not added to the
723   appropriate page_table list.  */
724
725static inline struct page_entry *
726alloc_page (unsigned order)
727{
728  struct page_entry *entry, *p, **pp;
729  char *page;
730  size_t num_objects;
731  size_t bitmap_size;
732  size_t page_entry_size;
733  size_t entry_size;
734#ifdef USING_MALLOC_PAGE_GROUPS
735  page_group *group;
736#endif
737
738  num_objects = OBJECTS_PER_PAGE (order);
739  bitmap_size = BITMAP_SIZE (num_objects + 1);
740  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
741  entry_size = num_objects * OBJECT_SIZE (order);
742  if (entry_size < G.pagesize)
743    entry_size = G.pagesize;
744
745  entry = NULL;
746  page = NULL;
747
748  /* Check the list of free pages for one we can use.  */
749  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
750    if (p->bytes == entry_size)
751      break;
752
753  if (p != NULL)
754    {
755      /* Recycle the allocated memory from this page ...  */
756      *pp = p->next;
757      page = p->page;
758
759#ifdef USING_MALLOC_PAGE_GROUPS
760      group = p->group;
761#endif
762
763      /* ... and, if possible, the page entry itself.  */
764      if (p->order == order)
765	{
766	  entry = p;
767	  memset (entry, 0, page_entry_size);
768	}
769      else
770	free (p);
771    }
772#ifdef USING_MMAP
773  else if (entry_size == G.pagesize)
774    {
775      /* We want just one page.  Allocate a bunch of them and put the
776	 extras on the freelist.  (Can only do this optimization with
777	 mmap for backing store.)  */
778      struct page_entry *e, *f = G.free_pages;
779      int i;
780
781      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
782
783      /* This loop counts down so that the chain will be in ascending
784	 memory order.  */
785      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
786	{
787	  e = xcalloc (1, page_entry_size);
788	  e->order = order;
789	  e->bytes = G.pagesize;
790	  e->page = page + (i << G.lg_pagesize);
791	  e->next = f;
792	  f = e;
793	}
794
795      G.free_pages = f;
796    }
797  else
798    page = alloc_anon (NULL, entry_size);
799#endif
800#ifdef USING_MALLOC_PAGE_GROUPS
801  else
802    {
803      /* Allocate a large block of memory and serve out the aligned
804	 pages therein.  This results in much less memory wastage
805	 than the traditional implementation of valloc.  */
806
807      char *allocation, *a, *enda;
808      size_t alloc_size, head_slop, tail_slop;
809      int multiple_pages = (entry_size == G.pagesize);
810
811      if (multiple_pages)
812	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
813      else
814	alloc_size = entry_size + G.pagesize - 1;
815      allocation = xmalloc (alloc_size);
816
817      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
818      head_slop = page - allocation;
819      if (multiple_pages)
820	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
821      else
822	tail_slop = alloc_size - entry_size - head_slop;
823      enda = allocation + alloc_size - tail_slop;
824
825      /* We allocated N pages, which are likely not aligned, leaving
826	 us with N-1 usable pages.  We plan to place the page_group
827	 structure somewhere in the slop.  */
828      if (head_slop >= sizeof (page_group))
829	group = (page_group *)page - 1;
830      else
831	{
832	  /* We magically got an aligned allocation.  Too bad, we have
833	     to waste a page anyway.  */
834	  if (tail_slop == 0)
835	    {
836	      enda -= G.pagesize;
837	      tail_slop += G.pagesize;
838	    }
839	  gcc_assert (tail_slop >= sizeof (page_group));
840	  group = (page_group *)enda;
841	  tail_slop -= sizeof (page_group);
842	}
843
844      /* Remember that we allocated this memory.  */
845      group->next = G.page_groups;
846      group->allocation = allocation;
847      group->alloc_size = alloc_size;
848      group->in_use = 0;
849      G.page_groups = group;
850      G.bytes_mapped += alloc_size;
851
852      /* If we allocated multiple pages, put the rest on the free list.  */
853      if (multiple_pages)
854	{
855	  struct page_entry *e, *f = G.free_pages;
856	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
857	    {
858	      e = xcalloc (1, page_entry_size);
859	      e->order = order;
860	      e->bytes = G.pagesize;
861	      e->page = a;
862	      e->group = group;
863	      e->next = f;
864	      f = e;
865	    }
866	  G.free_pages = f;
867	}
868    }
869#endif
870
871  if (entry == NULL)
872    entry = xcalloc (1, page_entry_size);
873
874  entry->bytes = entry_size;
875  entry->page = page;
876  entry->context_depth = G.context_depth;
877  entry->order = order;
878  entry->num_free_objects = num_objects;
879  entry->next_bit_hint = 1;
880
881  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
882
883#ifdef USING_MALLOC_PAGE_GROUPS
884  entry->group = group;
885  set_page_group_in_use (group, page);
886#endif
887
888  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
889     increment the hint.  */
890  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
891    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
892
893  set_page_table_entry (page, entry);
894
895  if (GGC_DEBUG_LEVEL >= 2)
896    fprintf (G.debug_file,
897	     "Allocating page at %p, object size=%lu, data %p-%p\n",
898	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
899	     page + entry_size - 1);
900
901  return entry;
902}
903
904/* Adjust the size of G.depth so that no index greater than the one
905   used by the top of the G.by_depth is used.  */
906
907static inline void
908adjust_depth (void)
909{
910  page_entry *top;
911
912  if (G.by_depth_in_use)
913    {
914      top = G.by_depth[G.by_depth_in_use-1];
915
916      /* Peel back indices in depth that index into by_depth, so that
917	 as new elements are added to by_depth, we note the indices
918	 of those elements, if they are for new context depths.  */
919      while (G.depth_in_use > (size_t)top->context_depth+1)
920	--G.depth_in_use;
921    }
922}
923
924/* For a page that is no longer needed, put it on the free page list.  */
925
926static void
927free_page (page_entry *entry)
928{
929  if (GGC_DEBUG_LEVEL >= 2)
930    fprintf (G.debug_file,
931	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
932	     entry->page, entry->page + entry->bytes - 1);
933
934  /* Mark the page as inaccessible.  Discard the handle to avoid handle
935     leak.  */
936  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
937
938  set_page_table_entry (entry->page, NULL);
939
940#ifdef USING_MALLOC_PAGE_GROUPS
941  clear_page_group_in_use (entry->group, entry->page);
942#endif
943
944  if (G.by_depth_in_use > 1)
945    {
946      page_entry *top = G.by_depth[G.by_depth_in_use-1];
947      int i = entry->index_by_depth;
948
949      /* We cannot free a page from a context deeper than the current
950	 one.  */
951      gcc_assert (entry->context_depth == top->context_depth);
952
953      /* Put top element into freed slot.  */
954      G.by_depth[i] = top;
955      G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
956      top->index_by_depth = i;
957    }
958  --G.by_depth_in_use;
959
960  adjust_depth ();
961
962  entry->next = G.free_pages;
963  G.free_pages = entry;
964}
965
966/* Release the free page cache to the system.  */
967
968static void
969release_pages (void)
970{
971#ifdef USING_MMAP
972  page_entry *p, *next;
973  char *start;
974  size_t len;
975
976  /* Gather up adjacent pages so they are unmapped together.  */
977  p = G.free_pages;
978
979  while (p)
980    {
981      start = p->page;
982      next = p->next;
983      len = p->bytes;
984      free (p);
985      p = next;
986
987      while (p && p->page == start + len)
988	{
989	  next = p->next;
990	  len += p->bytes;
991	  free (p);
992	  p = next;
993	}
994
995      munmap (start, len);
996      G.bytes_mapped -= len;
997    }
998
999  G.free_pages = NULL;
1000#endif
1001#ifdef USING_MALLOC_PAGE_GROUPS
1002  page_entry **pp, *p;
1003  page_group **gp, *g;
1004
1005  /* Remove all pages from free page groups from the list.  */
1006  pp = &G.free_pages;
1007  while ((p = *pp) != NULL)
1008    if (p->group->in_use == 0)
1009      {
1010	*pp = p->next;
1011	free (p);
1012      }
1013    else
1014      pp = &p->next;
1015
1016  /* Remove all free page groups, and release the storage.  */
1017  gp = &G.page_groups;
1018  while ((g = *gp) != NULL)
1019    if (g->in_use == 0)
1020      {
1021	*gp = g->next;
1022	G.bytes_mapped -= g->alloc_size;
1023	free (g->allocation);
1024      }
1025    else
1026      gp = &g->next;
1027#endif
1028}
1029
1030/* This table provides a fast way to determine ceil(log_2(size)) for
1031   allocation requests.  The minimum allocation size is eight bytes.  */
1032#define NUM_SIZE_LOOKUP 512
1033static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1034{
1035  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1036  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1037  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1038  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1039  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1040  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1041  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1042  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1043  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1044  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1045  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1046  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1047  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1048  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1049  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1050  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1051  8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1052  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1053  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1054  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1055  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1056  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1057  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1058  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1059  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1060  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1061  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1062  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1063  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1064  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1065  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1066  9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1067};
1068
1069/* Typed allocation function.  Does nothing special in this collector.  */
1070
1071void *
1072ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1073		      MEM_STAT_DECL)
1074{
1075  return ggc_alloc_stat (size PASS_MEM_STAT);
1076}
1077
1078/* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1079
1080void *
1081ggc_alloc_stat (size_t size MEM_STAT_DECL)
1082{
1083  size_t order, word, bit, object_offset, object_size;
1084  struct page_entry *entry;
1085  void *result;
1086
1087  if (size < NUM_SIZE_LOOKUP)
1088    {
1089      order = size_lookup[size];
1090      object_size = OBJECT_SIZE (order);
1091    }
1092  else
1093    {
1094      order = 10;
1095      while (size > (object_size = OBJECT_SIZE (order)))
1096	order++;
1097    }
1098
1099  /* If there are non-full pages for this size allocation, they are at
1100     the head of the list.  */
1101  entry = G.pages[order];
1102
1103  /* If there is no page for this object size, or all pages in this
1104     context are full, allocate a new page.  */
1105  if (entry == NULL || entry->num_free_objects == 0)
1106    {
1107      struct page_entry *new_entry;
1108      new_entry = alloc_page (order);
1109
1110      new_entry->index_by_depth = G.by_depth_in_use;
1111      push_by_depth (new_entry, 0);
1112
1113      /* We can skip context depths, if we do, make sure we go all the
1114	 way to the new depth.  */
1115      while (new_entry->context_depth >= G.depth_in_use)
1116	push_depth (G.by_depth_in_use-1);
1117
1118      /* If this is the only entry, it's also the tail.  If it is not
1119	 the only entry, then we must update the PREV pointer of the
1120	 ENTRY (G.pages[order]) to point to our new page entry.  */
1121      if (entry == NULL)
1122	G.page_tails[order] = new_entry;
1123      else
1124	entry->prev = new_entry;
1125
1126      /* Put new pages at the head of the page list.  By definition the
1127	 entry at the head of the list always has a NULL pointer.  */
1128      new_entry->next = entry;
1129      new_entry->prev = NULL;
1130      entry = new_entry;
1131      G.pages[order] = new_entry;
1132
1133      /* For a new page, we know the word and bit positions (in the
1134	 in_use bitmap) of the first available object -- they're zero.  */
1135      new_entry->next_bit_hint = 1;
1136      word = 0;
1137      bit = 0;
1138      object_offset = 0;
1139    }
1140  else
1141    {
1142      /* First try to use the hint left from the previous allocation
1143	 to locate a clear bit in the in-use bitmap.  We've made sure
1144	 that the one-past-the-end bit is always set, so if the hint
1145	 has run over, this test will fail.  */
1146      unsigned hint = entry->next_bit_hint;
1147      word = hint / HOST_BITS_PER_LONG;
1148      bit = hint % HOST_BITS_PER_LONG;
1149
1150      /* If the hint didn't work, scan the bitmap from the beginning.  */
1151      if ((entry->in_use_p[word] >> bit) & 1)
1152	{
1153	  word = bit = 0;
1154	  while (~entry->in_use_p[word] == 0)
1155	    ++word;
1156
1157#if GCC_VERSION >= 3004
1158	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1159#else
1160	  while ((entry->in_use_p[word] >> bit) & 1)
1161	    ++bit;
1162#endif
1163
1164	  hint = word * HOST_BITS_PER_LONG + bit;
1165	}
1166
1167      /* Next time, try the next bit.  */
1168      entry->next_bit_hint = hint + 1;
1169
1170      object_offset = hint * object_size;
1171    }
1172
1173  /* Set the in-use bit.  */
1174  entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1175
1176  /* Keep a running total of the number of free objects.  If this page
1177     fills up, we may have to move it to the end of the list if the
1178     next page isn't full.  If the next page is full, all subsequent
1179     pages are full, so there's no need to move it.  */
1180  if (--entry->num_free_objects == 0
1181      && entry->next != NULL
1182      && entry->next->num_free_objects > 0)
1183    {
1184      /* We have a new head for the list.  */
1185      G.pages[order] = entry->next;
1186
1187      /* We are moving ENTRY to the end of the page table list.
1188	 The new page at the head of the list will have NULL in
1189	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1190      entry->next->prev = NULL;
1191      entry->next = NULL;
1192
1193      /* Append ENTRY to the tail of the list.  */
1194      entry->prev = G.page_tails[order];
1195      G.page_tails[order]->next = entry;
1196      G.page_tails[order] = entry;
1197    }
1198
1199  /* Calculate the object's address.  */
1200  result = entry->page + object_offset;
1201#ifdef GATHER_STATISTICS
1202  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1203		       result PASS_MEM_STAT);
1204#endif
1205
1206#ifdef ENABLE_GC_CHECKING
1207  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1208     exact same semantics in presence of memory bugs, regardless of
1209     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1210     handle to avoid handle leak.  */
1211  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1212
1213  /* `Poison' the entire allocated object, including any padding at
1214     the end.  */
1215  memset (result, 0xaf, object_size);
1216
1217  /* Make the bytes after the end of the object unaccessible.  Discard the
1218     handle to avoid handle leak.  */
1219  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1220					    object_size - size));
1221#endif
1222
1223  /* Tell Valgrind that the memory is there, but its content isn't
1224     defined.  The bytes at the end of the object are still marked
1225     unaccessible.  */
1226  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1227
1228  /* Keep track of how many bytes are being allocated.  This
1229     information is used in deciding when to collect.  */
1230  G.allocated += object_size;
1231
1232  /* For timevar statistics.  */
1233  timevar_ggc_mem_total += object_size;
1234
1235#ifdef GATHER_STATISTICS
1236  {
1237    size_t overhead = object_size - size;
1238
1239    G.stats.total_overhead += overhead;
1240    G.stats.total_allocated += object_size;
1241    G.stats.total_overhead_per_order[order] += overhead;
1242    G.stats.total_allocated_per_order[order] += object_size;
1243
1244    if (size <= 32)
1245      {
1246	G.stats.total_overhead_under32 += overhead;
1247	G.stats.total_allocated_under32 += object_size;
1248      }
1249    if (size <= 64)
1250      {
1251	G.stats.total_overhead_under64 += overhead;
1252	G.stats.total_allocated_under64 += object_size;
1253      }
1254    if (size <= 128)
1255      {
1256	G.stats.total_overhead_under128 += overhead;
1257	G.stats.total_allocated_under128 += object_size;
1258      }
1259  }
1260#endif
1261
1262  if (GGC_DEBUG_LEVEL >= 3)
1263    fprintf (G.debug_file,
1264	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1265	     (unsigned long) size, (unsigned long) object_size, result,
1266	     (void *) entry);
1267
1268  return result;
1269}
1270
1271/* If P is not marked, marks it and return false.  Otherwise return true.
1272   P must have been allocated by the GC allocator; it mustn't point to
1273   static objects, stack variables, or memory allocated with malloc.  */
1274
1275int
1276ggc_set_mark (const void *p)
1277{
1278  page_entry *entry;
1279  unsigned bit, word;
1280  unsigned long mask;
1281
1282  /* Look up the page on which the object is alloced.  If the object
1283     wasn't allocated by the collector, we'll probably die.  */
1284  entry = lookup_page_table_entry (p);
1285  gcc_assert (entry);
1286
1287  /* Calculate the index of the object on the page; this is its bit
1288     position in the in_use_p bitmap.  */
1289  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1290  word = bit / HOST_BITS_PER_LONG;
1291  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1292
1293  /* If the bit was previously set, skip it.  */
1294  if (entry->in_use_p[word] & mask)
1295    return 1;
1296
1297  /* Otherwise set it, and decrement the free object count.  */
1298  entry->in_use_p[word] |= mask;
1299  entry->num_free_objects -= 1;
1300
1301  if (GGC_DEBUG_LEVEL >= 4)
1302    fprintf (G.debug_file, "Marking %p\n", p);
1303
1304  return 0;
1305}
1306
1307/* Return 1 if P has been marked, zero otherwise.
1308   P must have been allocated by the GC allocator; it mustn't point to
1309   static objects, stack variables, or memory allocated with malloc.  */
1310
1311int
1312ggc_marked_p (const void *p)
1313{
1314  page_entry *entry;
1315  unsigned bit, word;
1316  unsigned long mask;
1317
1318  /* Look up the page on which the object is alloced.  If the object
1319     wasn't allocated by the collector, we'll probably die.  */
1320  entry = lookup_page_table_entry (p);
1321  gcc_assert (entry);
1322
1323  /* Calculate the index of the object on the page; this is its bit
1324     position in the in_use_p bitmap.  */
1325  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1326  word = bit / HOST_BITS_PER_LONG;
1327  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1328
1329  return (entry->in_use_p[word] & mask) != 0;
1330}
1331
1332/* Return the size of the gc-able object P.  */
1333
1334size_t
1335ggc_get_size (const void *p)
1336{
1337  page_entry *pe = lookup_page_table_entry (p);
1338  return OBJECT_SIZE (pe->order);
1339}
1340
1341/* Release the memory for object P.  */
1342
1343void
1344ggc_free (void *p)
1345{
1346  page_entry *pe = lookup_page_table_entry (p);
1347  size_t order = pe->order;
1348  size_t size = OBJECT_SIZE (order);
1349
1350#ifdef GATHER_STATISTICS
1351  ggc_free_overhead (p);
1352#endif
1353
1354  if (GGC_DEBUG_LEVEL >= 3)
1355    fprintf (G.debug_file,
1356	     "Freeing object, actual size=%lu, at %p on %p\n",
1357	     (unsigned long) size, p, (void *) pe);
1358
1359#ifdef ENABLE_GC_CHECKING
1360  /* Poison the data, to indicate the data is garbage.  */
1361  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1362  memset (p, 0xa5, size);
1363#endif
1364  /* Let valgrind know the object is free.  */
1365  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1366
1367#ifdef ENABLE_GC_ALWAYS_COLLECT
1368  /* In the completely-anal-checking mode, we do *not* immediately free
1369     the data, but instead verify that the data is *actually* not
1370     reachable the next time we collect.  */
1371  {
1372    struct free_object *fo = XNEW (struct free_object);
1373    fo->object = p;
1374    fo->next = G.free_object_list;
1375    G.free_object_list = fo;
1376  }
1377#else
1378  {
1379    unsigned int bit_offset, word, bit;
1380
1381    G.allocated -= size;
1382
1383    /* Mark the object not-in-use.  */
1384    bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1385    word = bit_offset / HOST_BITS_PER_LONG;
1386    bit = bit_offset % HOST_BITS_PER_LONG;
1387    pe->in_use_p[word] &= ~(1UL << bit);
1388
1389    if (pe->num_free_objects++ == 0)
1390      {
1391	page_entry *p, *q;
1392
1393	/* If the page is completely full, then it's supposed to
1394	   be after all pages that aren't.  Since we've freed one
1395	   object from a page that was full, we need to move the
1396	   page to the head of the list.
1397
1398	   PE is the node we want to move.  Q is the previous node
1399	   and P is the next node in the list.  */
1400	q = pe->prev;
1401	if (q && q->num_free_objects == 0)
1402	  {
1403	    p = pe->next;
1404
1405	    q->next = p;
1406
1407	    /* If PE was at the end of the list, then Q becomes the
1408	       new end of the list.  If PE was not the end of the
1409	       list, then we need to update the PREV field for P.  */
1410	    if (!p)
1411	      G.page_tails[order] = q;
1412	    else
1413	      p->prev = q;
1414
1415	    /* Move PE to the head of the list.  */
1416	    pe->next = G.pages[order];
1417	    pe->prev = NULL;
1418	    G.pages[order]->prev = pe;
1419	    G.pages[order] = pe;
1420	  }
1421
1422	/* Reset the hint bit to point to the only free object.  */
1423	pe->next_bit_hint = bit_offset;
1424      }
1425  }
1426#endif
1427}
1428
1429/* Subroutine of init_ggc which computes the pair of numbers used to
1430   perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1431
1432   This algorithm is taken from Granlund and Montgomery's paper
1433   "Division by Invariant Integers using Multiplication"
1434   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1435   constants).  */
1436
1437static void
1438compute_inverse (unsigned order)
1439{
1440  size_t size, inv;
1441  unsigned int e;
1442
1443  size = OBJECT_SIZE (order);
1444  e = 0;
1445  while (size % 2 == 0)
1446    {
1447      e++;
1448      size >>= 1;
1449    }
1450
1451  inv = size;
1452  while (inv * size != 1)
1453    inv = inv * (2 - inv*size);
1454
1455  DIV_MULT (order) = inv;
1456  DIV_SHIFT (order) = e;
1457}
1458
1459/* Initialize the ggc-mmap allocator.  */
1460void
1461init_ggc (void)
1462{
1463  unsigned order;
1464
1465  G.pagesize = getpagesize();
1466  G.lg_pagesize = exact_log2 (G.pagesize);
1467
1468#ifdef HAVE_MMAP_DEV_ZERO
1469  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1470  if (G.dev_zero_fd == -1)
1471    internal_error ("open /dev/zero: %m");
1472#endif
1473
1474#if 0
1475  G.debug_file = fopen ("ggc-mmap.debug", "w");
1476#else
1477  G.debug_file = stdout;
1478#endif
1479
1480#ifdef USING_MMAP
1481  /* StunOS has an amazing off-by-one error for the first mmap allocation
1482     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1483     believe, is an unaligned page allocation, which would cause us to
1484     hork badly if we tried to use it.  */
1485  {
1486    char *p = alloc_anon (NULL, G.pagesize);
1487    struct page_entry *e;
1488    if ((size_t)p & (G.pagesize - 1))
1489      {
1490	/* How losing.  Discard this one and try another.  If we still
1491	   can't get something useful, give up.  */
1492
1493	p = alloc_anon (NULL, G.pagesize);
1494	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1495      }
1496
1497    /* We have a good page, might as well hold onto it...  */
1498    e = XCNEW (struct page_entry);
1499    e->bytes = G.pagesize;
1500    e->page = p;
1501    e->next = G.free_pages;
1502    G.free_pages = e;
1503  }
1504#endif
1505
1506  /* Initialize the object size table.  */
1507  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1508    object_size_table[order] = (size_t) 1 << order;
1509  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1510    {
1511      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1512
1513      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1514	 so that we're sure of getting aligned memory.  */
1515      s = ROUND_UP (s, MAX_ALIGNMENT);
1516      object_size_table[order] = s;
1517    }
1518
1519  /* Initialize the objects-per-page and inverse tables.  */
1520  for (order = 0; order < NUM_ORDERS; ++order)
1521    {
1522      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1523      if (objects_per_page_table[order] == 0)
1524	objects_per_page_table[order] = 1;
1525      compute_inverse (order);
1526    }
1527
1528  /* Reset the size_lookup array to put appropriately sized objects in
1529     the special orders.  All objects bigger than the previous power
1530     of two, but no greater than the special size, should go in the
1531     new order.  */
1532  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1533    {
1534      int o;
1535      int i;
1536
1537      i = OBJECT_SIZE (order);
1538      if (i >= NUM_SIZE_LOOKUP)
1539	continue;
1540
1541      for (o = size_lookup[i]; o == size_lookup [i]; --i)
1542	size_lookup[i] = order;
1543    }
1544
1545  G.depth_in_use = 0;
1546  G.depth_max = 10;
1547  G.depth = XNEWVEC (unsigned int, G.depth_max);
1548
1549  G.by_depth_in_use = 0;
1550  G.by_depth_max = INITIAL_PTE_COUNT;
1551  G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1552  G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1553}
1554
1555/* Start a new GGC zone.  */
1556
1557struct alloc_zone *
1558new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1559{
1560  return NULL;
1561}
1562
1563/* Destroy a GGC zone.  */
1564void
1565destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1566{
1567}
1568
1569/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1570   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1571
1572static void
1573ggc_recalculate_in_use_p (page_entry *p)
1574{
1575  unsigned int i;
1576  size_t num_objects;
1577
1578  /* Because the past-the-end bit in in_use_p is always set, we
1579     pretend there is one additional object.  */
1580  num_objects = OBJECTS_IN_PAGE (p) + 1;
1581
1582  /* Reset the free object count.  */
1583  p->num_free_objects = num_objects;
1584
1585  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1586  for (i = 0;
1587       i < CEIL (BITMAP_SIZE (num_objects),
1588		 sizeof (*p->in_use_p));
1589       ++i)
1590    {
1591      unsigned long j;
1592
1593      /* Something is in use if it is marked, or if it was in use in a
1594	 context further down the context stack.  */
1595      p->in_use_p[i] |= save_in_use_p (p)[i];
1596
1597      /* Decrement the free object count for every object allocated.  */
1598      for (j = p->in_use_p[i]; j; j >>= 1)
1599	p->num_free_objects -= (j & 1);
1600    }
1601
1602  gcc_assert (p->num_free_objects < num_objects);
1603}
1604
1605/* Unmark all objects.  */
1606
1607static void
1608clear_marks (void)
1609{
1610  unsigned order;
1611
1612  for (order = 2; order < NUM_ORDERS; order++)
1613    {
1614      page_entry *p;
1615
1616      for (p = G.pages[order]; p != NULL; p = p->next)
1617	{
1618	  size_t num_objects = OBJECTS_IN_PAGE (p);
1619	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1620
1621	  /* The data should be page-aligned.  */
1622	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1623
1624	  /* Pages that aren't in the topmost context are not collected;
1625	     nevertheless, we need their in-use bit vectors to store GC
1626	     marks.  So, back them up first.  */
1627	  if (p->context_depth < G.context_depth)
1628	    {
1629	      if (! save_in_use_p (p))
1630		save_in_use_p (p) = xmalloc (bitmap_size);
1631	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1632	    }
1633
1634	  /* Reset reset the number of free objects and clear the
1635             in-use bits.  These will be adjusted by mark_obj.  */
1636	  p->num_free_objects = num_objects;
1637	  memset (p->in_use_p, 0, bitmap_size);
1638
1639	  /* Make sure the one-past-the-end bit is always set.  */
1640	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1641	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1642	}
1643    }
1644}
1645
1646/* Free all empty pages.  Partially empty pages need no attention
1647   because the `mark' bit doubles as an `unused' bit.  */
1648
1649static void
1650sweep_pages (void)
1651{
1652  unsigned order;
1653
1654  for (order = 2; order < NUM_ORDERS; order++)
1655    {
1656      /* The last page-entry to consider, regardless of entries
1657	 placed at the end of the list.  */
1658      page_entry * const last = G.page_tails[order];
1659
1660      size_t num_objects;
1661      size_t live_objects;
1662      page_entry *p, *previous;
1663      int done;
1664
1665      p = G.pages[order];
1666      if (p == NULL)
1667	continue;
1668
1669      previous = NULL;
1670      do
1671	{
1672	  page_entry *next = p->next;
1673
1674	  /* Loop until all entries have been examined.  */
1675	  done = (p == last);
1676
1677	  num_objects = OBJECTS_IN_PAGE (p);
1678
1679	  /* Add all live objects on this page to the count of
1680             allocated memory.  */
1681	  live_objects = num_objects - p->num_free_objects;
1682
1683	  G.allocated += OBJECT_SIZE (order) * live_objects;
1684
1685	  /* Only objects on pages in the topmost context should get
1686	     collected.  */
1687	  if (p->context_depth < G.context_depth)
1688	    ;
1689
1690	  /* Remove the page if it's empty.  */
1691	  else if (live_objects == 0)
1692	    {
1693	      /* If P was the first page in the list, then NEXT
1694		 becomes the new first page in the list, otherwise
1695		 splice P out of the forward pointers.  */
1696	      if (! previous)
1697		G.pages[order] = next;
1698	      else
1699		previous->next = next;
1700
1701	      /* Splice P out of the back pointers too.  */
1702	      if (next)
1703		next->prev = previous;
1704
1705	      /* Are we removing the last element?  */
1706	      if (p == G.page_tails[order])
1707		G.page_tails[order] = previous;
1708	      free_page (p);
1709	      p = previous;
1710	    }
1711
1712	  /* If the page is full, move it to the end.  */
1713	  else if (p->num_free_objects == 0)
1714	    {
1715	      /* Don't move it if it's already at the end.  */
1716	      if (p != G.page_tails[order])
1717		{
1718		  /* Move p to the end of the list.  */
1719		  p->next = NULL;
1720		  p->prev = G.page_tails[order];
1721		  G.page_tails[order]->next = p;
1722
1723		  /* Update the tail pointer...  */
1724		  G.page_tails[order] = p;
1725
1726		  /* ... and the head pointer, if necessary.  */
1727		  if (! previous)
1728		    G.pages[order] = next;
1729		  else
1730		    previous->next = next;
1731
1732		  /* And update the backpointer in NEXT if necessary.  */
1733		  if (next)
1734		    next->prev = previous;
1735
1736		  p = previous;
1737		}
1738	    }
1739
1740	  /* If we've fallen through to here, it's a page in the
1741	     topmost context that is neither full nor empty.  Such a
1742	     page must precede pages at lesser context depth in the
1743	     list, so move it to the head.  */
1744	  else if (p != G.pages[order])
1745	    {
1746	      previous->next = p->next;
1747
1748	      /* Update the backchain in the next node if it exists.  */
1749	      if (p->next)
1750		p->next->prev = previous;
1751
1752	      /* Move P to the head of the list.  */
1753	      p->next = G.pages[order];
1754	      p->prev = NULL;
1755	      G.pages[order]->prev = p;
1756
1757	      /* Update the head pointer.  */
1758	      G.pages[order] = p;
1759
1760	      /* Are we moving the last element?  */
1761	      if (G.page_tails[order] == p)
1762	        G.page_tails[order] = previous;
1763	      p = previous;
1764	    }
1765
1766	  previous = p;
1767	  p = next;
1768	}
1769      while (! done);
1770
1771      /* Now, restore the in_use_p vectors for any pages from contexts
1772         other than the current one.  */
1773      for (p = G.pages[order]; p; p = p->next)
1774	if (p->context_depth != G.context_depth)
1775	  ggc_recalculate_in_use_p (p);
1776    }
1777}
1778
1779#ifdef ENABLE_GC_CHECKING
1780/* Clobber all free objects.  */
1781
1782static void
1783poison_pages (void)
1784{
1785  unsigned order;
1786
1787  for (order = 2; order < NUM_ORDERS; order++)
1788    {
1789      size_t size = OBJECT_SIZE (order);
1790      page_entry *p;
1791
1792      for (p = G.pages[order]; p != NULL; p = p->next)
1793	{
1794	  size_t num_objects;
1795	  size_t i;
1796
1797	  if (p->context_depth != G.context_depth)
1798	    /* Since we don't do any collection for pages in pushed
1799	       contexts, there's no need to do any poisoning.  And
1800	       besides, the IN_USE_P array isn't valid until we pop
1801	       contexts.  */
1802	    continue;
1803
1804	  num_objects = OBJECTS_IN_PAGE (p);
1805	  for (i = 0; i < num_objects; i++)
1806	    {
1807	      size_t word, bit;
1808	      word = i / HOST_BITS_PER_LONG;
1809	      bit = i % HOST_BITS_PER_LONG;
1810	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1811		{
1812		  char *object = p->page + i * size;
1813
1814		  /* Keep poison-by-write when we expect to use Valgrind,
1815		     so the exact same memory semantics is kept, in case
1816		     there are memory errors.  We override this request
1817		     below.  */
1818		  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1819		  memset (object, 0xa5, size);
1820
1821		  /* Drop the handle to avoid handle leak.  */
1822		  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1823		}
1824	    }
1825	}
1826    }
1827}
1828#else
1829#define poison_pages()
1830#endif
1831
1832#ifdef ENABLE_GC_ALWAYS_COLLECT
1833/* Validate that the reportedly free objects actually are.  */
1834
1835static void
1836validate_free_objects (void)
1837{
1838  struct free_object *f, *next, *still_free = NULL;
1839
1840  for (f = G.free_object_list; f ; f = next)
1841    {
1842      page_entry *pe = lookup_page_table_entry (f->object);
1843      size_t bit, word;
1844
1845      bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1846      word = bit / HOST_BITS_PER_LONG;
1847      bit = bit % HOST_BITS_PER_LONG;
1848      next = f->next;
1849
1850      /* Make certain it isn't visible from any root.  Notice that we
1851	 do this check before sweep_pages merges save_in_use_p.  */
1852      gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1853
1854      /* If the object comes from an outer context, then retain the
1855	 free_object entry, so that we can verify that the address
1856	 isn't live on the stack in some outer context.  */
1857      if (pe->context_depth != G.context_depth)
1858	{
1859	  f->next = still_free;
1860	  still_free = f;
1861	}
1862      else
1863	free (f);
1864    }
1865
1866  G.free_object_list = still_free;
1867}
1868#else
1869#define validate_free_objects()
1870#endif
1871
1872/* Top level mark-and-sweep routine.  */
1873
1874void
1875ggc_collect (void)
1876{
1877  /* Avoid frequent unnecessary work by skipping collection if the
1878     total allocations haven't expanded much since the last
1879     collection.  */
1880  float allocated_last_gc =
1881    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1882
1883  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1884
1885  if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1886    return;
1887
1888  timevar_push (TV_GC);
1889  if (!quiet_flag)
1890    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1891  if (GGC_DEBUG_LEVEL >= 2)
1892    fprintf (G.debug_file, "BEGIN COLLECTING\n");
1893
1894  /* Zero the total allocated bytes.  This will be recalculated in the
1895     sweep phase.  */
1896  G.allocated = 0;
1897
1898  /* Release the pages we freed the last time we collected, but didn't
1899     reuse in the interim.  */
1900  release_pages ();
1901
1902  /* Indicate that we've seen collections at this context depth.  */
1903  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1904
1905  clear_marks ();
1906  ggc_mark_roots ();
1907#ifdef GATHER_STATISTICS
1908  ggc_prune_overhead_list ();
1909#endif
1910  poison_pages ();
1911  validate_free_objects ();
1912  sweep_pages ();
1913
1914  G.allocated_last_gc = G.allocated;
1915
1916  timevar_pop (TV_GC);
1917
1918  if (!quiet_flag)
1919    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1920  if (GGC_DEBUG_LEVEL >= 2)
1921    fprintf (G.debug_file, "END COLLECTING\n");
1922}
1923
1924/* Print allocation statistics.  */
1925#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1926		  ? (x) \
1927		  : ((x) < 1024*1024*10 \
1928		     ? (x) / 1024 \
1929		     : (x) / (1024*1024))))
1930#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1931
1932void
1933ggc_print_statistics (void)
1934{
1935  struct ggc_statistics stats;
1936  unsigned int i;
1937  size_t total_overhead = 0;
1938
1939  /* Clear the statistics.  */
1940  memset (&stats, 0, sizeof (stats));
1941
1942  /* Make sure collection will really occur.  */
1943  G.allocated_last_gc = 0;
1944
1945  /* Collect and print the statistics common across collectors.  */
1946  ggc_print_common_statistics (stderr, &stats);
1947
1948  /* Release free pages so that we will not count the bytes allocated
1949     there as part of the total allocated memory.  */
1950  release_pages ();
1951
1952  /* Collect some information about the various sizes of
1953     allocation.  */
1954  fprintf (stderr,
1955           "Memory still allocated at the end of the compilation process\n");
1956  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
1957	   "Size", "Allocated", "Used", "Overhead");
1958  for (i = 0; i < NUM_ORDERS; ++i)
1959    {
1960      page_entry *p;
1961      size_t allocated;
1962      size_t in_use;
1963      size_t overhead;
1964
1965      /* Skip empty entries.  */
1966      if (!G.pages[i])
1967	continue;
1968
1969      overhead = allocated = in_use = 0;
1970
1971      /* Figure out the total number of bytes allocated for objects of
1972	 this size, and how many of them are actually in use.  Also figure
1973	 out how much memory the page table is using.  */
1974      for (p = G.pages[i]; p; p = p->next)
1975	{
1976	  allocated += p->bytes;
1977	  in_use +=
1978	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1979
1980	  overhead += (sizeof (page_entry) - sizeof (long)
1981		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1982	}
1983      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1984	       (unsigned long) OBJECT_SIZE (i),
1985	       SCALE (allocated), STAT_LABEL (allocated),
1986	       SCALE (in_use), STAT_LABEL (in_use),
1987	       SCALE (overhead), STAT_LABEL (overhead));
1988      total_overhead += overhead;
1989    }
1990  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1991	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
1992	   SCALE (G.allocated), STAT_LABEL(G.allocated),
1993	   SCALE (total_overhead), STAT_LABEL (total_overhead));
1994
1995#ifdef GATHER_STATISTICS
1996  {
1997    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1998
1999    fprintf (stderr, "Total Overhead:                        %10lld\n",
2000             G.stats.total_overhead);
2001    fprintf (stderr, "Total Allocated:                       %10lld\n",
2002             G.stats.total_allocated);
2003
2004    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2005             G.stats.total_overhead_under32);
2006    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2007             G.stats.total_allocated_under32);
2008    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2009             G.stats.total_overhead_under64);
2010    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2011             G.stats.total_allocated_under64);
2012    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2013             G.stats.total_overhead_under128);
2014    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2015             G.stats.total_allocated_under128);
2016
2017    for (i = 0; i < NUM_ORDERS; i++)
2018      if (G.stats.total_allocated_per_order[i])
2019        {
2020          fprintf (stderr, "Total Overhead  page size %7d:     %10lld\n",
2021                   OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
2022          fprintf (stderr, "Total Allocated page size %7d:     %10lld\n",
2023                   OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
2024        }
2025  }
2026#endif
2027}
2028
2029struct ggc_pch_data
2030{
2031  struct ggc_pch_ondisk
2032  {
2033    unsigned totals[NUM_ORDERS];
2034  } d;
2035  size_t base[NUM_ORDERS];
2036  size_t written[NUM_ORDERS];
2037};
2038
2039struct ggc_pch_data *
2040init_ggc_pch (void)
2041{
2042  return XCNEW (struct ggc_pch_data);
2043}
2044
2045void
2046ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2047		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2048		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2049{
2050  unsigned order;
2051
2052  if (size < NUM_SIZE_LOOKUP)
2053    order = size_lookup[size];
2054  else
2055    {
2056      order = 10;
2057      while (size > OBJECT_SIZE (order))
2058	order++;
2059    }
2060
2061  d->d.totals[order]++;
2062}
2063
2064size_t
2065ggc_pch_total_size (struct ggc_pch_data *d)
2066{
2067  size_t a = 0;
2068  unsigned i;
2069
2070  for (i = 0; i < NUM_ORDERS; i++)
2071    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2072  return a;
2073}
2074
2075void
2076ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2077{
2078  size_t a = (size_t) base;
2079  unsigned i;
2080
2081  for (i = 0; i < NUM_ORDERS; i++)
2082    {
2083      d->base[i] = a;
2084      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2085    }
2086}
2087
2088
2089char *
2090ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2091		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2092		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2093{
2094  unsigned order;
2095  char *result;
2096
2097  if (size < NUM_SIZE_LOOKUP)
2098    order = size_lookup[size];
2099  else
2100    {
2101      order = 10;
2102      while (size > OBJECT_SIZE (order))
2103	order++;
2104    }
2105
2106  result = (char *) d->base[order];
2107  d->base[order] += OBJECT_SIZE (order);
2108  return result;
2109}
2110
2111void
2112ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2113		       FILE *f ATTRIBUTE_UNUSED)
2114{
2115  /* Nothing to do.  */
2116}
2117
2118void
2119ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2120		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2121		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2122{
2123  unsigned order;
2124  static const char emptyBytes[256];
2125
2126  if (size < NUM_SIZE_LOOKUP)
2127    order = size_lookup[size];
2128  else
2129    {
2130      order = 10;
2131      while (size > OBJECT_SIZE (order))
2132	order++;
2133    }
2134
2135  if (fwrite (x, size, 1, f) != 1)
2136    fatal_error ("can't write PCH file: %m");
2137
2138  /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2139     object out to OBJECT_SIZE(order).  This happens for strings.  */
2140
2141  if (size != OBJECT_SIZE (order))
2142    {
2143      unsigned padding = OBJECT_SIZE(order) - size;
2144
2145      /* To speed small writes, we use a nulled-out array that's larger
2146         than most padding requests as the source for our null bytes.  This
2147         permits us to do the padding with fwrite() rather than fseek(), and
2148         limits the chance the OS may try to flush any outstanding writes.  */
2149      if (padding <= sizeof(emptyBytes))
2150        {
2151          if (fwrite (emptyBytes, 1, padding, f) != padding)
2152            fatal_error ("can't write PCH file");
2153        }
2154      else
2155        {
2156          /* Larger than our buffer?  Just default to fseek.  */
2157          if (fseek (f, padding, SEEK_CUR) != 0)
2158            fatal_error ("can't write PCH file");
2159        }
2160    }
2161
2162  d->written[order]++;
2163  if (d->written[order] == d->d.totals[order]
2164      && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2165				   G.pagesize),
2166		SEEK_CUR) != 0)
2167    fatal_error ("can't write PCH file: %m");
2168}
2169
2170void
2171ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2172{
2173  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2174    fatal_error ("can't write PCH file: %m");
2175  free (d);
2176}
2177
2178/* Move the PCH PTE entries just added to the end of by_depth, to the
2179   front.  */
2180
2181static void
2182move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2183{
2184  unsigned i;
2185
2186  /* First, we swap the new entries to the front of the varrays.  */
2187  page_entry **new_by_depth;
2188  unsigned long **new_save_in_use;
2189
2190  new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2191  new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2192
2193  memcpy (&new_by_depth[0],
2194	  &G.by_depth[count_old_page_tables],
2195	  count_new_page_tables * sizeof (void *));
2196  memcpy (&new_by_depth[count_new_page_tables],
2197	  &G.by_depth[0],
2198	  count_old_page_tables * sizeof (void *));
2199  memcpy (&new_save_in_use[0],
2200	  &G.save_in_use[count_old_page_tables],
2201	  count_new_page_tables * sizeof (void *));
2202  memcpy (&new_save_in_use[count_new_page_tables],
2203	  &G.save_in_use[0],
2204	  count_old_page_tables * sizeof (void *));
2205
2206  free (G.by_depth);
2207  free (G.save_in_use);
2208
2209  G.by_depth = new_by_depth;
2210  G.save_in_use = new_save_in_use;
2211
2212  /* Now update all the index_by_depth fields.  */
2213  for (i = G.by_depth_in_use; i > 0; --i)
2214    {
2215      page_entry *p = G.by_depth[i-1];
2216      p->index_by_depth = i-1;
2217    }
2218
2219  /* And last, we update the depth pointers in G.depth.  The first
2220     entry is already 0, and context 0 entries always start at index
2221     0, so there is nothing to update in the first slot.  We need a
2222     second slot, only if we have old ptes, and if we do, they start
2223     at index count_new_page_tables.  */
2224  if (count_old_page_tables)
2225    push_depth (count_new_page_tables);
2226}
2227
2228void
2229ggc_pch_read (FILE *f, void *addr)
2230{
2231  struct ggc_pch_ondisk d;
2232  unsigned i;
2233  char *offs = addr;
2234  unsigned long count_old_page_tables;
2235  unsigned long count_new_page_tables;
2236
2237  count_old_page_tables = G.by_depth_in_use;
2238
2239  /* We've just read in a PCH file.  So, every object that used to be
2240     allocated is now free.  */
2241  clear_marks ();
2242#ifdef ENABLE_GC_CHECKING
2243  poison_pages ();
2244#endif
2245
2246  /* No object read from a PCH file should ever be freed.  So, set the
2247     context depth to 1, and set the depth of all the currently-allocated
2248     pages to be 1 too.  PCH pages will have depth 0.  */
2249  gcc_assert (!G.context_depth);
2250  G.context_depth = 1;
2251  for (i = 0; i < NUM_ORDERS; i++)
2252    {
2253      page_entry *p;
2254      for (p = G.pages[i]; p != NULL; p = p->next)
2255	p->context_depth = G.context_depth;
2256    }
2257
2258  /* Allocate the appropriate page-table entries for the pages read from
2259     the PCH file.  */
2260  if (fread (&d, sizeof (d), 1, f) != 1)
2261    fatal_error ("can't read PCH file: %m");
2262
2263  for (i = 0; i < NUM_ORDERS; i++)
2264    {
2265      struct page_entry *entry;
2266      char *pte;
2267      size_t bytes;
2268      size_t num_objs;
2269      size_t j;
2270
2271      if (d.totals[i] == 0)
2272	continue;
2273
2274      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2275      num_objs = bytes / OBJECT_SIZE (i);
2276      entry = xcalloc (1, (sizeof (struct page_entry)
2277			   - sizeof (long)
2278			   + BITMAP_SIZE (num_objs + 1)));
2279      entry->bytes = bytes;
2280      entry->page = offs;
2281      entry->context_depth = 0;
2282      offs += bytes;
2283      entry->num_free_objects = 0;
2284      entry->order = i;
2285
2286      for (j = 0;
2287	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2288	   j += HOST_BITS_PER_LONG)
2289	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2290      for (; j < num_objs + 1; j++)
2291	entry->in_use_p[j / HOST_BITS_PER_LONG]
2292	  |= 1L << (j % HOST_BITS_PER_LONG);
2293
2294      for (pte = entry->page;
2295	   pte < entry->page + entry->bytes;
2296	   pte += G.pagesize)
2297	set_page_table_entry (pte, entry);
2298
2299      if (G.page_tails[i] != NULL)
2300	G.page_tails[i]->next = entry;
2301      else
2302	G.pages[i] = entry;
2303      G.page_tails[i] = entry;
2304
2305      /* We start off by just adding all the new information to the
2306	 end of the varrays, later, we will move the new information
2307	 to the front of the varrays, as the PCH page tables are at
2308	 context 0.  */
2309      push_by_depth (entry, 0);
2310    }
2311
2312  /* Now, we update the various data structures that speed page table
2313     handling.  */
2314  count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2315
2316  move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2317
2318  /* Update the statistics.  */
2319  G.allocated = G.allocated_last_gc = offs - (char *)addr;
2320}
2321