1/* "Bag-of-pages" garbage collector for the GNU compiler.
2   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3   Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA.  */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "tree.h"
27#include "rtl.h"
28#include "tm_p.h"
29#include "toplev.h"
30#include "flags.h"
31#include "ggc.h"
32#include "timevar.h"
33#include "params.h"
34#include "tree-flow.h"
35#ifdef ENABLE_VALGRIND_CHECKING
36# ifdef HAVE_VALGRIND_MEMCHECK_H
37#  include <valgrind/memcheck.h>
38# elif defined HAVE_MEMCHECK_H
39#  include <memcheck.h>
40# else
41#  include <valgrind.h>
42# endif
43#else
44/* Avoid #ifdef:s when we can help it.  */
45#define VALGRIND_DISCARD(x)
46#endif
47
48/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
49   file open.  Prefer either to valloc.  */
50#ifdef HAVE_MMAP_ANON
51# undef HAVE_MMAP_DEV_ZERO
52
53# include <sys/mman.h>
54# ifndef MAP_FAILED
55#  define MAP_FAILED -1
56# endif
57# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
58#  define MAP_ANONYMOUS MAP_ANON
59# endif
60# define USING_MMAP
61
62#endif
63
64#ifdef HAVE_MMAP_DEV_ZERO
65
66# include <sys/mman.h>
67# ifndef MAP_FAILED
68#  define MAP_FAILED -1
69# endif
70# define USING_MMAP
71
72#endif
73
74#ifndef USING_MMAP
75#define USING_MALLOC_PAGE_GROUPS
76#endif
77
78/* Strategy:
79
80   This garbage-collecting allocator allocates objects on one of a set
81   of pages.  Each page can allocate objects of a single size only;
82   available sizes are powers of two starting at four bytes.  The size
83   of an allocation request is rounded up to the next power of two
84   (`order'), and satisfied from the appropriate page.
85
86   Each page is recorded in a page-entry, which also maintains an
87   in-use bitmap of object positions on the page.  This allows the
88   allocation state of a particular object to be flipped without
89   touching the page itself.
90
91   Each page-entry also has a context depth, which is used to track
92   pushing and popping of allocation contexts.  Only objects allocated
93   in the current (highest-numbered) context may be collected.
94
95   Page entries are arranged in an array of singly-linked lists.  The
96   array is indexed by the allocation size, in bits, of the pages on
97   it; i.e. all pages on a list allocate objects of the same size.
98   Pages are ordered on the list such that all non-full pages precede
99   all full pages, with non-full pages arranged in order of decreasing
100   context depth.
101
102   Empty pages (of all orders) are kept on a single page cache list,
103   and are considered first when new pages are required; they are
104   deallocated at the start of the next collection if they haven't
105   been recycled by then.  */
106
107/* Define GGC_DEBUG_LEVEL to print debugging information.
108     0: No debugging output.
109     1: GC statistics only.
110     2: Page-entry allocations/deallocations as well.
111     3: Object allocations as well.
112     4: Object marks as well.  */
113#define GGC_DEBUG_LEVEL (0)
114
115#ifndef HOST_BITS_PER_PTR
116#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
117#endif
118
119
120/* A two-level tree is used to look up the page-entry for a given
121   pointer.  Two chunks of the pointer's bits are extracted to index
122   the first and second levels of the tree, as follows:
123
124				   HOST_PAGE_SIZE_BITS
125			   32		|      |
126       msb +----------------+----+------+------+ lsb
127			    |    |      |
128			 PAGE_L1_BITS   |
129				 |      |
130			       PAGE_L2_BITS
131
132   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
133   pages are aligned on system page boundaries.  The next most
134   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
135   index values in the lookup table, respectively.
136
137   For 32-bit architectures and the settings below, there are no
138   leftover bits.  For architectures with wider pointers, the lookup
139   tree points to a list of pages, which must be scanned to find the
140   correct one.  */
141
142#define PAGE_L1_BITS	(8)
143#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
144#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
145#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
146
147#define LOOKUP_L1(p) \
148  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
149
150#define LOOKUP_L2(p) \
151  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
152
153/* The number of objects per allocation page, for objects on a page of
154   the indicated ORDER.  */
155#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
156
157/* The number of objects in P.  */
158#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
159
160/* The size of an object on a page of the indicated ORDER.  */
161#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
162
163/* For speed, we avoid doing a general integer divide to locate the
164   offset in the allocation bitmap, by precalculating numbers M, S
165   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
166   within the page which is evenly divisible by the object size Z.  */
167#define DIV_MULT(ORDER) inverse_table[ORDER].mult
168#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
169#define OFFSET_TO_BIT(OFFSET, ORDER) \
170  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
171
172/* The number of extra orders, not corresponding to power-of-two sized
173   objects.  */
174
175#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
176
177#define RTL_SIZE(NSLOTS) \
178  (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
179
180#define TREE_EXP_SIZE(OPS) \
181  (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
182
183/* The Ith entry is the maximum size of an object to be stored in the
184   Ith extra order.  Adding a new entry to this array is the *only*
185   thing you need to do to add a new special allocation size.  */
186
187static const size_t extra_order_size_table[] = {
188  sizeof (struct stmt_ann_d),
189  sizeof (struct tree_decl_non_common),
190  sizeof (struct tree_field_decl),
191  sizeof (struct tree_parm_decl),
192  sizeof (struct tree_var_decl),
193  sizeof (struct tree_list),
194  TREE_EXP_SIZE (2),
195  RTL_SIZE (2),			/* MEM, PLUS, etc.  */
196  RTL_SIZE (9),			/* INSN */
197};
198
199/* The total number of orders.  */
200
201#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
202
203/* We use this structure to determine the alignment required for
204   allocations.  For power-of-two sized allocations, that's not a
205   problem, but it does matter for odd-sized allocations.  */
206
207struct max_alignment {
208  char c;
209  union {
210    HOST_WIDEST_INT i;
211    long double d;
212  } u;
213};
214
215/* The biggest alignment required.  */
216
217#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
218
219/* Compute the smallest nonnegative number which when added to X gives
220   a multiple of F.  */
221
222#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
223
224/* Compute the smallest multiple of F that is >= X.  */
225
226#define ROUND_UP(x, f) (CEIL (x, f) * (f))
227
228/* The Ith entry is the number of objects on a page or order I.  */
229
230static unsigned objects_per_page_table[NUM_ORDERS];
231
232/* The Ith entry is the size of an object on a page of order I.  */
233
234static size_t object_size_table[NUM_ORDERS];
235
236/* The Ith entry is a pair of numbers (mult, shift) such that
237   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
238   for all k evenly divisible by OBJECT_SIZE(I).  */
239
240static struct
241{
242  size_t mult;
243  unsigned int shift;
244}
245inverse_table[NUM_ORDERS];
246
247/* A page_entry records the status of an allocation page.  This
248   structure is dynamically sized to fit the bitmap in_use_p.  */
249typedef struct page_entry
250{
251  /* The next page-entry with objects of the same size, or NULL if
252     this is the last page-entry.  */
253  struct page_entry *next;
254
255  /* The previous page-entry with objects of the same size, or NULL if
256     this is the first page-entry.   The PREV pointer exists solely to
257     keep the cost of ggc_free manageable.  */
258  struct page_entry *prev;
259
260  /* The number of bytes allocated.  (This will always be a multiple
261     of the host system page size.)  */
262  size_t bytes;
263
264  /* The address at which the memory is allocated.  */
265  char *page;
266
267#ifdef USING_MALLOC_PAGE_GROUPS
268  /* Back pointer to the page group this page came from.  */
269  struct page_group *group;
270#endif
271
272  /* This is the index in the by_depth varray where this page table
273     can be found.  */
274  unsigned long index_by_depth;
275
276  /* Context depth of this page.  */
277  unsigned short context_depth;
278
279  /* The number of free objects remaining on this page.  */
280  unsigned short num_free_objects;
281
282  /* A likely candidate for the bit position of a free object for the
283     next allocation from this page.  */
284  unsigned short next_bit_hint;
285
286  /* The lg of size of objects allocated from this page.  */
287  unsigned char order;
288
289  /* A bit vector indicating whether or not objects are in use.  The
290     Nth bit is one if the Nth object on this page is allocated.  This
291     array is dynamically sized.  */
292  unsigned long in_use_p[1];
293} page_entry;
294
295#ifdef USING_MALLOC_PAGE_GROUPS
296/* A page_group describes a large allocation from malloc, from which
297   we parcel out aligned pages.  */
298typedef struct page_group
299{
300  /* A linked list of all extant page groups.  */
301  struct page_group *next;
302
303  /* The address we received from malloc.  */
304  char *allocation;
305
306  /* The size of the block.  */
307  size_t alloc_size;
308
309  /* A bitmask of pages in use.  */
310  unsigned int in_use;
311} page_group;
312#endif
313
314#if HOST_BITS_PER_PTR <= 32
315
316/* On 32-bit hosts, we use a two level page table, as pictured above.  */
317typedef page_entry **page_table[PAGE_L1_SIZE];
318
319#else
320
321/* On 64-bit hosts, we use the same two level page tables plus a linked
322   list that disambiguates the top 32-bits.  There will almost always be
323   exactly one entry in the list.  */
324typedef struct page_table_chain
325{
326  struct page_table_chain *next;
327  size_t high_bits;
328  page_entry **table[PAGE_L1_SIZE];
329} *page_table;
330
331#endif
332
333/* The rest of the global variables.  */
334static struct globals
335{
336  /* The Nth element in this array is a page with objects of size 2^N.
337     If there are any pages with free objects, they will be at the
338     head of the list.  NULL if there are no page-entries for this
339     object size.  */
340  page_entry *pages[NUM_ORDERS];
341
342  /* The Nth element in this array is the last page with objects of
343     size 2^N.  NULL if there are no page-entries for this object
344     size.  */
345  page_entry *page_tails[NUM_ORDERS];
346
347  /* Lookup table for associating allocation pages with object addresses.  */
348  page_table lookup;
349
350  /* The system's page size.  */
351  size_t pagesize;
352  size_t lg_pagesize;
353
354  /* Bytes currently allocated.  */
355  size_t allocated;
356
357  /* Bytes currently allocated at the end of the last collection.  */
358  size_t allocated_last_gc;
359
360  /* Total amount of memory mapped.  */
361  size_t bytes_mapped;
362
363  /* Bit N set if any allocations have been done at context depth N.  */
364  unsigned long context_depth_allocations;
365
366  /* Bit N set if any collections have been done at context depth N.  */
367  unsigned long context_depth_collections;
368
369  /* The current depth in the context stack.  */
370  unsigned short context_depth;
371
372  /* A file descriptor open to /dev/zero for reading.  */
373#if defined (HAVE_MMAP_DEV_ZERO)
374  int dev_zero_fd;
375#endif
376
377  /* A cache of free system pages.  */
378  page_entry *free_pages;
379
380#ifdef USING_MALLOC_PAGE_GROUPS
381  page_group *page_groups;
382#endif
383
384  /* The file descriptor for debugging output.  */
385  FILE *debug_file;
386
387  /* Current number of elements in use in depth below.  */
388  unsigned int depth_in_use;
389
390  /* Maximum number of elements that can be used before resizing.  */
391  unsigned int depth_max;
392
393  /* Each element of this arry is an index in by_depth where the given
394     depth starts.  This structure is indexed by that given depth we
395     are interested in.  */
396  unsigned int *depth;
397
398  /* Current number of elements in use in by_depth below.  */
399  unsigned int by_depth_in_use;
400
401  /* Maximum number of elements that can be used before resizing.  */
402  unsigned int by_depth_max;
403
404  /* Each element of this array is a pointer to a page_entry, all
405     page_entries can be found in here by increasing depth.
406     index_by_depth in the page_entry is the index into this data
407     structure where that page_entry can be found.  This is used to
408     speed up finding all page_entries at a particular depth.  */
409  page_entry **by_depth;
410
411  /* Each element is a pointer to the saved in_use_p bits, if any,
412     zero otherwise.  We allocate them all together, to enable a
413     better runtime data access pattern.  */
414  unsigned long **save_in_use;
415
416#ifdef ENABLE_GC_ALWAYS_COLLECT
417  /* List of free objects to be verified as actually free on the
418     next collection.  */
419  struct free_object
420  {
421    void *object;
422    struct free_object *next;
423  } *free_object_list;
424#endif
425
426#ifdef GATHER_STATISTICS
427  struct
428  {
429    /* Total memory allocated with ggc_alloc.  */
430    unsigned long long total_allocated;
431    /* Total overhead for memory to be allocated with ggc_alloc.  */
432    unsigned long long total_overhead;
433
434    /* Total allocations and overhead for sizes less than 32, 64 and 128.
435       These sizes are interesting because they are typical cache line
436       sizes.  */
437
438    unsigned long long total_allocated_under32;
439    unsigned long long total_overhead_under32;
440
441    unsigned long long total_allocated_under64;
442    unsigned long long total_overhead_under64;
443
444    unsigned long long total_allocated_under128;
445    unsigned long long total_overhead_under128;
446
447    /* The allocations for each of the allocation orders.  */
448    unsigned long long total_allocated_per_order[NUM_ORDERS];
449
450    /* The overhead for each of the allocation orders.  */
451    unsigned long long total_overhead_per_order[NUM_ORDERS];
452  } stats;
453#endif
454} G;
455
456/* The size in bytes required to maintain a bitmap for the objects
457   on a page-entry.  */
458#define BITMAP_SIZE(Num_objects) \
459  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
460
461/* Allocate pages in chunks of this size, to throttle calls to memory
462   allocation routines.  The first page is used, the rest go onto the
463   free list.  This cannot be larger than HOST_BITS_PER_INT for the
464   in_use bitmask for page_group.  Hosts that need a different value
465   can override this by defining GGC_QUIRE_SIZE explicitly.  */
466#ifndef GGC_QUIRE_SIZE
467# ifdef USING_MMAP
468#  define GGC_QUIRE_SIZE 256
469# else
470#  define GGC_QUIRE_SIZE 16
471# endif
472#endif
473
474/* Initial guess as to how many page table entries we might need.  */
475#define INITIAL_PTE_COUNT 128
476
477static int ggc_allocated_p (const void *);
478static page_entry *lookup_page_table_entry (const void *);
479static void set_page_table_entry (void *, page_entry *);
480#ifdef USING_MMAP
481static char *alloc_anon (char *, size_t);
482#endif
483#ifdef USING_MALLOC_PAGE_GROUPS
484static size_t page_group_index (char *, char *);
485static void set_page_group_in_use (page_group *, char *);
486static void clear_page_group_in_use (page_group *, char *);
487#endif
488static struct page_entry * alloc_page (unsigned);
489static void free_page (struct page_entry *);
490static void release_pages (void);
491static void clear_marks (void);
492static void sweep_pages (void);
493static void ggc_recalculate_in_use_p (page_entry *);
494static void compute_inverse (unsigned);
495static inline void adjust_depth (void);
496static void move_ptes_to_front (int, int);
497
498void debug_print_page_list (int);
499static void push_depth (unsigned int);
500static void push_by_depth (page_entry *, unsigned long *);
501
502/* Push an entry onto G.depth.  */
503
504inline static void
505push_depth (unsigned int i)
506{
507  if (G.depth_in_use >= G.depth_max)
508    {
509      G.depth_max *= 2;
510      G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
511    }
512  G.depth[G.depth_in_use++] = i;
513}
514
515/* Push an entry onto G.by_depth and G.save_in_use.  */
516
517inline static void
518push_by_depth (page_entry *p, unsigned long *s)
519{
520  if (G.by_depth_in_use >= G.by_depth_max)
521    {
522      G.by_depth_max *= 2;
523      G.by_depth = xrealloc (G.by_depth,
524			     G.by_depth_max * sizeof (page_entry *));
525      G.save_in_use = xrealloc (G.save_in_use,
526				G.by_depth_max * sizeof (unsigned long *));
527    }
528  G.by_depth[G.by_depth_in_use] = p;
529  G.save_in_use[G.by_depth_in_use++] = s;
530}
531
532#if (GCC_VERSION < 3001)
533#define prefetch(X) ((void) X)
534#else
535#define prefetch(X) __builtin_prefetch (X)
536#endif
537
538#define save_in_use_p_i(__i) \
539  (G.save_in_use[__i])
540#define save_in_use_p(__p) \
541  (save_in_use_p_i (__p->index_by_depth))
542
543/* Returns nonzero if P was allocated in GC'able memory.  */
544
545static inline int
546ggc_allocated_p (const void *p)
547{
548  page_entry ***base;
549  size_t L1, L2;
550
551#if HOST_BITS_PER_PTR <= 32
552  base = &G.lookup[0];
553#else
554  page_table table = G.lookup;
555  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
556  while (1)
557    {
558      if (table == NULL)
559	return 0;
560      if (table->high_bits == high_bits)
561	break;
562      table = table->next;
563    }
564  base = &table->table[0];
565#endif
566
567  /* Extract the level 1 and 2 indices.  */
568  L1 = LOOKUP_L1 (p);
569  L2 = LOOKUP_L2 (p);
570
571  return base[L1] && base[L1][L2];
572}
573
574/* Traverse the page table and find the entry for a page.
575   Die (probably) if the object wasn't allocated via GC.  */
576
577static inline page_entry *
578lookup_page_table_entry (const void *p)
579{
580  page_entry ***base;
581  size_t L1, L2;
582
583#if HOST_BITS_PER_PTR <= 32
584  base = &G.lookup[0];
585#else
586  page_table table = G.lookup;
587  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
588  while (table->high_bits != high_bits)
589    table = table->next;
590  base = &table->table[0];
591#endif
592
593  /* Extract the level 1 and 2 indices.  */
594  L1 = LOOKUP_L1 (p);
595  L2 = LOOKUP_L2 (p);
596
597  return base[L1][L2];
598}
599
600/* Set the page table entry for a page.  */
601
602static void
603set_page_table_entry (void *p, page_entry *entry)
604{
605  page_entry ***base;
606  size_t L1, L2;
607
608#if HOST_BITS_PER_PTR <= 32
609  base = &G.lookup[0];
610#else
611  page_table table;
612  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
613  for (table = G.lookup; table; table = table->next)
614    if (table->high_bits == high_bits)
615      goto found;
616
617  /* Not found -- allocate a new table.  */
618  table = xcalloc (1, sizeof(*table));
619  table->next = G.lookup;
620  table->high_bits = high_bits;
621  G.lookup = table;
622found:
623  base = &table->table[0];
624#endif
625
626  /* Extract the level 1 and 2 indices.  */
627  L1 = LOOKUP_L1 (p);
628  L2 = LOOKUP_L2 (p);
629
630  if (base[L1] == NULL)
631    base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
632
633  base[L1][L2] = entry;
634}
635
636/* Prints the page-entry for object size ORDER, for debugging.  */
637
638void
639debug_print_page_list (int order)
640{
641  page_entry *p;
642  printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
643	  (void *) G.page_tails[order]);
644  p = G.pages[order];
645  while (p != NULL)
646    {
647      printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
648	      p->num_free_objects);
649      p = p->next;
650    }
651  printf ("NULL\n");
652  fflush (stdout);
653}
654
655#ifdef USING_MMAP
656/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
657   (if non-null).  The ifdef structure here is intended to cause a
658   compile error unless exactly one of the HAVE_* is defined.  */
659
660static inline char *
661alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
662{
663#ifdef HAVE_MMAP_ANON
664  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
665		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
666#endif
667#ifdef HAVE_MMAP_DEV_ZERO
668  char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
669		     MAP_PRIVATE, G.dev_zero_fd, 0);
670#endif
671
672  if (page == (char *) MAP_FAILED)
673    {
674      perror ("virtual memory exhausted");
675      exit (FATAL_EXIT_CODE);
676    }
677
678  /* Remember that we allocated this memory.  */
679  G.bytes_mapped += size;
680
681  /* Pretend we don't have access to the allocated pages.  We'll enable
682     access to smaller pieces of the area in ggc_alloc.  Discard the
683     handle to avoid handle leak.  */
684  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
685
686  return page;
687}
688#endif
689#ifdef USING_MALLOC_PAGE_GROUPS
690/* Compute the index for this page into the page group.  */
691
692static inline size_t
693page_group_index (char *allocation, char *page)
694{
695  return (size_t) (page - allocation) >> G.lg_pagesize;
696}
697
698/* Set and clear the in_use bit for this page in the page group.  */
699
700static inline void
701set_page_group_in_use (page_group *group, char *page)
702{
703  group->in_use |= 1 << page_group_index (group->allocation, page);
704}
705
706static inline void
707clear_page_group_in_use (page_group *group, char *page)
708{
709  group->in_use &= ~(1 << page_group_index (group->allocation, page));
710}
711#endif
712
713/* Allocate a new page for allocating objects of size 2^ORDER,
714   and return an entry for it.  The entry is not added to the
715   appropriate page_table list.  */
716
717static inline struct page_entry *
718alloc_page (unsigned order)
719{
720  struct page_entry *entry, *p, **pp;
721  char *page;
722  size_t num_objects;
723  size_t bitmap_size;
724  size_t page_entry_size;
725  size_t entry_size;
726#ifdef USING_MALLOC_PAGE_GROUPS
727  page_group *group;
728#endif
729
730  num_objects = OBJECTS_PER_PAGE (order);
731  bitmap_size = BITMAP_SIZE (num_objects + 1);
732  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
733  entry_size = num_objects * OBJECT_SIZE (order);
734  if (entry_size < G.pagesize)
735    entry_size = G.pagesize;
736
737  entry = NULL;
738  page = NULL;
739
740  /* Check the list of free pages for one we can use.  */
741  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
742    if (p->bytes == entry_size)
743      break;
744
745  if (p != NULL)
746    {
747      /* Recycle the allocated memory from this page ...  */
748      *pp = p->next;
749      page = p->page;
750
751#ifdef USING_MALLOC_PAGE_GROUPS
752      group = p->group;
753#endif
754
755      /* ... and, if possible, the page entry itself.  */
756      if (p->order == order)
757	{
758	  entry = p;
759	  memset (entry, 0, page_entry_size);
760	}
761      else
762	free (p);
763    }
764#ifdef USING_MMAP
765  else if (entry_size == G.pagesize)
766    {
767      /* We want just one page.  Allocate a bunch of them and put the
768	 extras on the freelist.  (Can only do this optimization with
769	 mmap for backing store.)  */
770      struct page_entry *e, *f = G.free_pages;
771      int i;
772
773      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
774
775      /* This loop counts down so that the chain will be in ascending
776	 memory order.  */
777      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
778	{
779	  e = xcalloc (1, page_entry_size);
780	  e->order = order;
781	  e->bytes = G.pagesize;
782	  e->page = page + (i << G.lg_pagesize);
783	  e->next = f;
784	  f = e;
785	}
786
787      G.free_pages = f;
788    }
789  else
790    page = alloc_anon (NULL, entry_size);
791#endif
792#ifdef USING_MALLOC_PAGE_GROUPS
793  else
794    {
795      /* Allocate a large block of memory and serve out the aligned
796	 pages therein.  This results in much less memory wastage
797	 than the traditional implementation of valloc.  */
798
799      char *allocation, *a, *enda;
800      size_t alloc_size, head_slop, tail_slop;
801      int multiple_pages = (entry_size == G.pagesize);
802
803      if (multiple_pages)
804	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
805      else
806	alloc_size = entry_size + G.pagesize - 1;
807      allocation = xmalloc (alloc_size);
808
809      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
810      head_slop = page - allocation;
811      if (multiple_pages)
812	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
813      else
814	tail_slop = alloc_size - entry_size - head_slop;
815      enda = allocation + alloc_size - tail_slop;
816
817      /* We allocated N pages, which are likely not aligned, leaving
818	 us with N-1 usable pages.  We plan to place the page_group
819	 structure somewhere in the slop.  */
820      if (head_slop >= sizeof (page_group))
821	group = (page_group *)page - 1;
822      else
823	{
824	  /* We magically got an aligned allocation.  Too bad, we have
825	     to waste a page anyway.  */
826	  if (tail_slop == 0)
827	    {
828	      enda -= G.pagesize;
829	      tail_slop += G.pagesize;
830	    }
831	  gcc_assert (tail_slop >= sizeof (page_group));
832	  group = (page_group *)enda;
833	  tail_slop -= sizeof (page_group);
834	}
835
836      /* Remember that we allocated this memory.  */
837      group->next = G.page_groups;
838      group->allocation = allocation;
839      group->alloc_size = alloc_size;
840      group->in_use = 0;
841      G.page_groups = group;
842      G.bytes_mapped += alloc_size;
843
844      /* If we allocated multiple pages, put the rest on the free list.  */
845      if (multiple_pages)
846	{
847	  struct page_entry *e, *f = G.free_pages;
848	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
849	    {
850	      e = xcalloc (1, page_entry_size);
851	      e->order = order;
852	      e->bytes = G.pagesize;
853	      e->page = a;
854	      e->group = group;
855	      e->next = f;
856	      f = e;
857	    }
858	  G.free_pages = f;
859	}
860    }
861#endif
862
863  if (entry == NULL)
864    entry = xcalloc (1, page_entry_size);
865
866  entry->bytes = entry_size;
867  entry->page = page;
868  entry->context_depth = G.context_depth;
869  entry->order = order;
870  entry->num_free_objects = num_objects;
871  entry->next_bit_hint = 1;
872
873  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
874
875#ifdef USING_MALLOC_PAGE_GROUPS
876  entry->group = group;
877  set_page_group_in_use (group, page);
878#endif
879
880  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
881     increment the hint.  */
882  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
883    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
884
885  set_page_table_entry (page, entry);
886
887  if (GGC_DEBUG_LEVEL >= 2)
888    fprintf (G.debug_file,
889	     "Allocating page at %p, object size=%lu, data %p-%p\n",
890	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
891	     page + entry_size - 1);
892
893  return entry;
894}
895
896/* Adjust the size of G.depth so that no index greater than the one
897   used by the top of the G.by_depth is used.  */
898
899static inline void
900adjust_depth (void)
901{
902  page_entry *top;
903
904  if (G.by_depth_in_use)
905    {
906      top = G.by_depth[G.by_depth_in_use-1];
907
908      /* Peel back indices in depth that index into by_depth, so that
909	 as new elements are added to by_depth, we note the indices
910	 of those elements, if they are for new context depths.  */
911      while (G.depth_in_use > (size_t)top->context_depth+1)
912	--G.depth_in_use;
913    }
914}
915
916/* For a page that is no longer needed, put it on the free page list.  */
917
918static void
919free_page (page_entry *entry)
920{
921  if (GGC_DEBUG_LEVEL >= 2)
922    fprintf (G.debug_file,
923	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
924	     entry->page, entry->page + entry->bytes - 1);
925
926  /* Mark the page as inaccessible.  Discard the handle to avoid handle
927     leak.  */
928  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
929
930  set_page_table_entry (entry->page, NULL);
931
932#ifdef USING_MALLOC_PAGE_GROUPS
933  clear_page_group_in_use (entry->group, entry->page);
934#endif
935
936  if (G.by_depth_in_use > 1)
937    {
938      page_entry *top = G.by_depth[G.by_depth_in_use-1];
939      int i = entry->index_by_depth;
940
941      /* We cannot free a page from a context deeper than the current
942	 one.  */
943      gcc_assert (entry->context_depth == top->context_depth);
944
945      /* Put top element into freed slot.  */
946      G.by_depth[i] = top;
947      G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
948      top->index_by_depth = i;
949    }
950  --G.by_depth_in_use;
951
952  adjust_depth ();
953
954  entry->next = G.free_pages;
955  G.free_pages = entry;
956}
957
958/* Release the free page cache to the system.  */
959
960static void
961release_pages (void)
962{
963#ifdef USING_MMAP
964  page_entry *p, *next;
965  char *start;
966  size_t len;
967
968  /* Gather up adjacent pages so they are unmapped together.  */
969  p = G.free_pages;
970
971  while (p)
972    {
973      start = p->page;
974      next = p->next;
975      len = p->bytes;
976      free (p);
977      p = next;
978
979      while (p && p->page == start + len)
980	{
981	  next = p->next;
982	  len += p->bytes;
983	  free (p);
984	  p = next;
985	}
986
987      munmap (start, len);
988      G.bytes_mapped -= len;
989    }
990
991  G.free_pages = NULL;
992#endif
993#ifdef USING_MALLOC_PAGE_GROUPS
994  page_entry **pp, *p;
995  page_group **gp, *g;
996
997  /* Remove all pages from free page groups from the list.  */
998  pp = &G.free_pages;
999  while ((p = *pp) != NULL)
1000    if (p->group->in_use == 0)
1001      {
1002	*pp = p->next;
1003	free (p);
1004      }
1005    else
1006      pp = &p->next;
1007
1008  /* Remove all free page groups, and release the storage.  */
1009  gp = &G.page_groups;
1010  while ((g = *gp) != NULL)
1011    if (g->in_use == 0)
1012      {
1013	*gp = g->next;
1014	G.bytes_mapped -= g->alloc_size;
1015	free (g->allocation);
1016      }
1017    else
1018      gp = &g->next;
1019#endif
1020}
1021
1022/* This table provides a fast way to determine ceil(log_2(size)) for
1023   allocation requests.  The minimum allocation size is eight bytes.  */
1024
1025static unsigned char size_lookup[257] =
1026{
1027  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1028  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1029  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1030  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1031  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1032  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1033  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1034  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1035  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1036  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1037  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1038  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1039  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1040  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1041  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1042  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1043  8
1044};
1045
1046/* Typed allocation function.  Does nothing special in this collector.  */
1047
1048void *
1049ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1050		      MEM_STAT_DECL)
1051{
1052  return ggc_alloc_stat (size PASS_MEM_STAT);
1053}
1054
1055/* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1056
1057void *
1058ggc_alloc_stat (size_t size MEM_STAT_DECL)
1059{
1060  size_t order, word, bit, object_offset, object_size;
1061  struct page_entry *entry;
1062  void *result;
1063
1064  if (size <= 256)
1065    {
1066      order = size_lookup[size];
1067      object_size = OBJECT_SIZE (order);
1068    }
1069  else
1070    {
1071      order = 9;
1072      while (size > (object_size = OBJECT_SIZE (order)))
1073	order++;
1074    }
1075
1076  /* If there are non-full pages for this size allocation, they are at
1077     the head of the list.  */
1078  entry = G.pages[order];
1079
1080  /* If there is no page for this object size, or all pages in this
1081     context are full, allocate a new page.  */
1082  if (entry == NULL || entry->num_free_objects == 0)
1083    {
1084      struct page_entry *new_entry;
1085      new_entry = alloc_page (order);
1086
1087      new_entry->index_by_depth = G.by_depth_in_use;
1088      push_by_depth (new_entry, 0);
1089
1090      /* We can skip context depths, if we do, make sure we go all the
1091	 way to the new depth.  */
1092      while (new_entry->context_depth >= G.depth_in_use)
1093	push_depth (G.by_depth_in_use-1);
1094
1095      /* If this is the only entry, it's also the tail.  If it is not
1096	 the only entry, then we must update the PREV pointer of the
1097	 ENTRY (G.pages[order]) to point to our new page entry.  */
1098      if (entry == NULL)
1099	G.page_tails[order] = new_entry;
1100      else
1101	entry->prev = new_entry;
1102
1103      /* Put new pages at the head of the page list.  By definition the
1104	 entry at the head of the list always has a NULL pointer.  */
1105      new_entry->next = entry;
1106      new_entry->prev = NULL;
1107      entry = new_entry;
1108      G.pages[order] = new_entry;
1109
1110      /* For a new page, we know the word and bit positions (in the
1111	 in_use bitmap) of the first available object -- they're zero.  */
1112      new_entry->next_bit_hint = 1;
1113      word = 0;
1114      bit = 0;
1115      object_offset = 0;
1116    }
1117  else
1118    {
1119      /* First try to use the hint left from the previous allocation
1120	 to locate a clear bit in the in-use bitmap.  We've made sure
1121	 that the one-past-the-end bit is always set, so if the hint
1122	 has run over, this test will fail.  */
1123      unsigned hint = entry->next_bit_hint;
1124      word = hint / HOST_BITS_PER_LONG;
1125      bit = hint % HOST_BITS_PER_LONG;
1126
1127      /* If the hint didn't work, scan the bitmap from the beginning.  */
1128      if ((entry->in_use_p[word] >> bit) & 1)
1129	{
1130	  word = bit = 0;
1131	  while (~entry->in_use_p[word] == 0)
1132	    ++word;
1133
1134#if GCC_VERSION >= 3004
1135	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1136#else
1137	  while ((entry->in_use_p[word] >> bit) & 1)
1138	    ++bit;
1139#endif
1140
1141	  hint = word * HOST_BITS_PER_LONG + bit;
1142	}
1143
1144      /* Next time, try the next bit.  */
1145      entry->next_bit_hint = hint + 1;
1146
1147      object_offset = hint * object_size;
1148    }
1149
1150  /* Set the in-use bit.  */
1151  entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1152
1153  /* Keep a running total of the number of free objects.  If this page
1154     fills up, we may have to move it to the end of the list if the
1155     next page isn't full.  If the next page is full, all subsequent
1156     pages are full, so there's no need to move it.  */
1157  if (--entry->num_free_objects == 0
1158      && entry->next != NULL
1159      && entry->next->num_free_objects > 0)
1160    {
1161      /* We have a new head for the list.  */
1162      G.pages[order] = entry->next;
1163
1164      /* We are moving ENTRY to the end of the page table list.
1165	 The new page at the head of the list will have NULL in
1166	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1167      entry->next->prev = NULL;
1168      entry->next = NULL;
1169
1170      /* Append ENTRY to the tail of the list.  */
1171      entry->prev = G.page_tails[order];
1172      G.page_tails[order]->next = entry;
1173      G.page_tails[order] = entry;
1174    }
1175
1176  /* Calculate the object's address.  */
1177  result = entry->page + object_offset;
1178#ifdef GATHER_STATISTICS
1179  ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1180		       result PASS_MEM_STAT);
1181#endif
1182
1183#ifdef ENABLE_GC_CHECKING
1184  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1185     exact same semantics in presence of memory bugs, regardless of
1186     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1187     handle to avoid handle leak.  */
1188  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size));
1189
1190  /* `Poison' the entire allocated object, including any padding at
1191     the end.  */
1192  memset (result, 0xaf, object_size);
1193
1194  /* Make the bytes after the end of the object unaccessible.  Discard the
1195     handle to avoid handle leak.  */
1196  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1197					    object_size - size));
1198#endif
1199
1200  /* Tell Valgrind that the memory is there, but its content isn't
1201     defined.  The bytes at the end of the object are still marked
1202     unaccessible.  */
1203  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1204
1205  /* Keep track of how many bytes are being allocated.  This
1206     information is used in deciding when to collect.  */
1207  G.allocated += object_size;
1208
1209  /* For timevar statistics.  */
1210  timevar_ggc_mem_total += object_size;
1211
1212#ifdef GATHER_STATISTICS
1213  {
1214    size_t overhead = object_size - size;
1215
1216    G.stats.total_overhead += overhead;
1217    G.stats.total_allocated += object_size;
1218    G.stats.total_overhead_per_order[order] += overhead;
1219    G.stats.total_allocated_per_order[order] += object_size;
1220
1221    if (size <= 32)
1222      {
1223	G.stats.total_overhead_under32 += overhead;
1224	G.stats.total_allocated_under32 += object_size;
1225      }
1226    if (size <= 64)
1227      {
1228	G.stats.total_overhead_under64 += overhead;
1229	G.stats.total_allocated_under64 += object_size;
1230      }
1231    if (size <= 128)
1232      {
1233	G.stats.total_overhead_under128 += overhead;
1234	G.stats.total_allocated_under128 += object_size;
1235      }
1236  }
1237#endif
1238
1239  if (GGC_DEBUG_LEVEL >= 3)
1240    fprintf (G.debug_file,
1241	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1242	     (unsigned long) size, (unsigned long) object_size, result,
1243	     (void *) entry);
1244
1245  return result;
1246}
1247
1248/* If P is not marked, marks it and return false.  Otherwise return true.
1249   P must have been allocated by the GC allocator; it mustn't point to
1250   static objects, stack variables, or memory allocated with malloc.  */
1251
1252int
1253ggc_set_mark (const void *p)
1254{
1255  page_entry *entry;
1256  unsigned bit, word;
1257  unsigned long mask;
1258
1259  /* Look up the page on which the object is alloced.  If the object
1260     wasn't allocated by the collector, we'll probably die.  */
1261  entry = lookup_page_table_entry (p);
1262  gcc_assert (entry);
1263
1264  /* Calculate the index of the object on the page; this is its bit
1265     position in the in_use_p bitmap.  */
1266  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1267  word = bit / HOST_BITS_PER_LONG;
1268  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1269
1270  /* If the bit was previously set, skip it.  */
1271  if (entry->in_use_p[word] & mask)
1272    return 1;
1273
1274  /* Otherwise set it, and decrement the free object count.  */
1275  entry->in_use_p[word] |= mask;
1276  entry->num_free_objects -= 1;
1277
1278  if (GGC_DEBUG_LEVEL >= 4)
1279    fprintf (G.debug_file, "Marking %p\n", p);
1280
1281  return 0;
1282}
1283
1284/* Return 1 if P has been marked, zero otherwise.
1285   P must have been allocated by the GC allocator; it mustn't point to
1286   static objects, stack variables, or memory allocated with malloc.  */
1287
1288int
1289ggc_marked_p (const void *p)
1290{
1291  page_entry *entry;
1292  unsigned bit, word;
1293  unsigned long mask;
1294
1295  /* Look up the page on which the object is alloced.  If the object
1296     wasn't allocated by the collector, we'll probably die.  */
1297  entry = lookup_page_table_entry (p);
1298  gcc_assert (entry);
1299
1300  /* Calculate the index of the object on the page; this is its bit
1301     position in the in_use_p bitmap.  */
1302  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1303  word = bit / HOST_BITS_PER_LONG;
1304  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1305
1306  return (entry->in_use_p[word] & mask) != 0;
1307}
1308
1309/* Return the size of the gc-able object P.  */
1310
1311size_t
1312ggc_get_size (const void *p)
1313{
1314  page_entry *pe = lookup_page_table_entry (p);
1315  return OBJECT_SIZE (pe->order);
1316}
1317
1318/* Release the memory for object P.  */
1319
1320void
1321ggc_free (void *p)
1322{
1323  page_entry *pe = lookup_page_table_entry (p);
1324  size_t order = pe->order;
1325  size_t size = OBJECT_SIZE (order);
1326
1327#ifdef GATHER_STATISTICS
1328  ggc_free_overhead (p);
1329#endif
1330
1331  if (GGC_DEBUG_LEVEL >= 3)
1332    fprintf (G.debug_file,
1333	     "Freeing object, actual size=%lu, at %p on %p\n",
1334	     (unsigned long) size, p, (void *) pe);
1335
1336#ifdef ENABLE_GC_CHECKING
1337  /* Poison the data, to indicate the data is garbage.  */
1338  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size));
1339  memset (p, 0xa5, size);
1340#endif
1341  /* Let valgrind know the object is free.  */
1342  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size));
1343
1344#ifdef ENABLE_GC_ALWAYS_COLLECT
1345  /* In the completely-anal-checking mode, we do *not* immediately free
1346     the data, but instead verify that the data is *actually* not
1347     reachable the next time we collect.  */
1348  {
1349    struct free_object *fo = xmalloc (sizeof (struct free_object));
1350    fo->object = p;
1351    fo->next = G.free_object_list;
1352    G.free_object_list = fo;
1353  }
1354#else
1355  {
1356    unsigned int bit_offset, word, bit;
1357
1358    G.allocated -= size;
1359
1360    /* Mark the object not-in-use.  */
1361    bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1362    word = bit_offset / HOST_BITS_PER_LONG;
1363    bit = bit_offset % HOST_BITS_PER_LONG;
1364    pe->in_use_p[word] &= ~(1UL << bit);
1365
1366    if (pe->num_free_objects++ == 0)
1367      {
1368	page_entry *p, *q;
1369
1370	/* If the page is completely full, then it's supposed to
1371	   be after all pages that aren't.  Since we've freed one
1372	   object from a page that was full, we need to move the
1373	   page to the head of the list.
1374
1375	   PE is the node we want to move.  Q is the previous node
1376	   and P is the next node in the list.  */
1377	q = pe->prev;
1378	if (q && q->num_free_objects == 0)
1379	  {
1380	    p = pe->next;
1381
1382	    q->next = p;
1383
1384	    /* If PE was at the end of the list, then Q becomes the
1385	       new end of the list.  If PE was not the end of the
1386	       list, then we need to update the PREV field for P.  */
1387	    if (!p)
1388	      G.page_tails[order] = q;
1389	    else
1390	      p->prev = q;
1391
1392	    /* Move PE to the head of the list.  */
1393	    pe->next = G.pages[order];
1394	    pe->prev = NULL;
1395	    G.pages[order]->prev = pe;
1396	    G.pages[order] = pe;
1397	  }
1398
1399	/* Reset the hint bit to point to the only free object.  */
1400	pe->next_bit_hint = bit_offset;
1401      }
1402  }
1403#endif
1404}
1405
1406/* Subroutine of init_ggc which computes the pair of numbers used to
1407   perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1408
1409   This algorithm is taken from Granlund and Montgomery's paper
1410   "Division by Invariant Integers using Multiplication"
1411   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1412   constants).  */
1413
1414static void
1415compute_inverse (unsigned order)
1416{
1417  size_t size, inv;
1418  unsigned int e;
1419
1420  size = OBJECT_SIZE (order);
1421  e = 0;
1422  while (size % 2 == 0)
1423    {
1424      e++;
1425      size >>= 1;
1426    }
1427
1428  inv = size;
1429  while (inv * size != 1)
1430    inv = inv * (2 - inv*size);
1431
1432  DIV_MULT (order) = inv;
1433  DIV_SHIFT (order) = e;
1434}
1435
1436/* Initialize the ggc-mmap allocator.  */
1437void
1438init_ggc (void)
1439{
1440  unsigned order;
1441
1442  G.pagesize = getpagesize();
1443  G.lg_pagesize = exact_log2 (G.pagesize);
1444
1445#ifdef HAVE_MMAP_DEV_ZERO
1446  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1447  if (G.dev_zero_fd == -1)
1448    internal_error ("open /dev/zero: %m");
1449#endif
1450
1451#if 0
1452  G.debug_file = fopen ("ggc-mmap.debug", "w");
1453#else
1454  G.debug_file = stdout;
1455#endif
1456
1457#ifdef USING_MMAP
1458  /* StunOS has an amazing off-by-one error for the first mmap allocation
1459     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1460     believe, is an unaligned page allocation, which would cause us to
1461     hork badly if we tried to use it.  */
1462  {
1463    char *p = alloc_anon (NULL, G.pagesize);
1464    struct page_entry *e;
1465    if ((size_t)p & (G.pagesize - 1))
1466      {
1467	/* How losing.  Discard this one and try another.  If we still
1468	   can't get something useful, give up.  */
1469
1470	p = alloc_anon (NULL, G.pagesize);
1471	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1472      }
1473
1474    /* We have a good page, might as well hold onto it...  */
1475    e = xcalloc (1, sizeof (struct page_entry));
1476    e->bytes = G.pagesize;
1477    e->page = p;
1478    e->next = G.free_pages;
1479    G.free_pages = e;
1480  }
1481#endif
1482
1483  /* Initialize the object size table.  */
1484  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1485    object_size_table[order] = (size_t) 1 << order;
1486  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1487    {
1488      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1489
1490      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1491	 so that we're sure of getting aligned memory.  */
1492      s = ROUND_UP (s, MAX_ALIGNMENT);
1493      object_size_table[order] = s;
1494    }
1495
1496  /* Initialize the objects-per-page and inverse tables.  */
1497  for (order = 0; order < NUM_ORDERS; ++order)
1498    {
1499      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1500      if (objects_per_page_table[order] == 0)
1501	objects_per_page_table[order] = 1;
1502      compute_inverse (order);
1503    }
1504
1505  /* Reset the size_lookup array to put appropriately sized objects in
1506     the special orders.  All objects bigger than the previous power
1507     of two, but no greater than the special size, should go in the
1508     new order.  */
1509  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1510    {
1511      int o;
1512      int i;
1513
1514      o = size_lookup[OBJECT_SIZE (order)];
1515      for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
1516	size_lookup[i] = order;
1517    }
1518
1519  G.depth_in_use = 0;
1520  G.depth_max = 10;
1521  G.depth = xmalloc (G.depth_max * sizeof (unsigned int));
1522
1523  G.by_depth_in_use = 0;
1524  G.by_depth_max = INITIAL_PTE_COUNT;
1525  G.by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
1526  G.save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
1527}
1528
1529/* Start a new GGC zone.  */
1530
1531struct alloc_zone *
1532new_ggc_zone (const char *name ATTRIBUTE_UNUSED)
1533{
1534  return NULL;
1535}
1536
1537/* Destroy a GGC zone.  */
1538void
1539destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED)
1540{
1541}
1542
1543/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1544   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1545
1546static void
1547ggc_recalculate_in_use_p (page_entry *p)
1548{
1549  unsigned int i;
1550  size_t num_objects;
1551
1552  /* Because the past-the-end bit in in_use_p is always set, we
1553     pretend there is one additional object.  */
1554  num_objects = OBJECTS_IN_PAGE (p) + 1;
1555
1556  /* Reset the free object count.  */
1557  p->num_free_objects = num_objects;
1558
1559  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1560  for (i = 0;
1561       i < CEIL (BITMAP_SIZE (num_objects),
1562		 sizeof (*p->in_use_p));
1563       ++i)
1564    {
1565      unsigned long j;
1566
1567      /* Something is in use if it is marked, or if it was in use in a
1568	 context further down the context stack.  */
1569      p->in_use_p[i] |= save_in_use_p (p)[i];
1570
1571      /* Decrement the free object count for every object allocated.  */
1572      for (j = p->in_use_p[i]; j; j >>= 1)
1573	p->num_free_objects -= (j & 1);
1574    }
1575
1576  gcc_assert (p->num_free_objects < num_objects);
1577}
1578
1579/* Unmark all objects.  */
1580
1581static void
1582clear_marks (void)
1583{
1584  unsigned order;
1585
1586  for (order = 2; order < NUM_ORDERS; order++)
1587    {
1588      page_entry *p;
1589
1590      for (p = G.pages[order]; p != NULL; p = p->next)
1591	{
1592	  size_t num_objects = OBJECTS_IN_PAGE (p);
1593	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1594
1595	  /* The data should be page-aligned.  */
1596	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1597
1598	  /* Pages that aren't in the topmost context are not collected;
1599	     nevertheless, we need their in-use bit vectors to store GC
1600	     marks.  So, back them up first.  */
1601	  if (p->context_depth < G.context_depth)
1602	    {
1603	      if (! save_in_use_p (p))
1604		save_in_use_p (p) = xmalloc (bitmap_size);
1605	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1606	    }
1607
1608	  /* Reset reset the number of free objects and clear the
1609             in-use bits.  These will be adjusted by mark_obj.  */
1610	  p->num_free_objects = num_objects;
1611	  memset (p->in_use_p, 0, bitmap_size);
1612
1613	  /* Make sure the one-past-the-end bit is always set.  */
1614	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1615	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1616	}
1617    }
1618}
1619
1620/* Free all empty pages.  Partially empty pages need no attention
1621   because the `mark' bit doubles as an `unused' bit.  */
1622
1623static void
1624sweep_pages (void)
1625{
1626  unsigned order;
1627
1628  for (order = 2; order < NUM_ORDERS; order++)
1629    {
1630      /* The last page-entry to consider, regardless of entries
1631	 placed at the end of the list.  */
1632      page_entry * const last = G.page_tails[order];
1633
1634      size_t num_objects;
1635      size_t live_objects;
1636      page_entry *p, *previous;
1637      int done;
1638
1639      p = G.pages[order];
1640      if (p == NULL)
1641	continue;
1642
1643      previous = NULL;
1644      do
1645	{
1646	  page_entry *next = p->next;
1647
1648	  /* Loop until all entries have been examined.  */
1649	  done = (p == last);
1650
1651	  num_objects = OBJECTS_IN_PAGE (p);
1652
1653	  /* Add all live objects on this page to the count of
1654             allocated memory.  */
1655	  live_objects = num_objects - p->num_free_objects;
1656
1657	  G.allocated += OBJECT_SIZE (order) * live_objects;
1658
1659	  /* Only objects on pages in the topmost context should get
1660	     collected.  */
1661	  if (p->context_depth < G.context_depth)
1662	    ;
1663
1664	  /* Remove the page if it's empty.  */
1665	  else if (live_objects == 0)
1666	    {
1667	      /* If P was the first page in the list, then NEXT
1668		 becomes the new first page in the list, otherwise
1669		 splice P out of the forward pointers.  */
1670	      if (! previous)
1671		G.pages[order] = next;
1672	      else
1673		previous->next = next;
1674
1675	      /* Splice P out of the back pointers too.  */
1676	      if (next)
1677		next->prev = previous;
1678
1679	      /* Are we removing the last element?  */
1680	      if (p == G.page_tails[order])
1681		G.page_tails[order] = previous;
1682	      free_page (p);
1683	      p = previous;
1684	    }
1685
1686	  /* If the page is full, move it to the end.  */
1687	  else if (p->num_free_objects == 0)
1688	    {
1689	      /* Don't move it if it's already at the end.  */
1690	      if (p != G.page_tails[order])
1691		{
1692		  /* Move p to the end of the list.  */
1693		  p->next = NULL;
1694		  p->prev = G.page_tails[order];
1695		  G.page_tails[order]->next = p;
1696
1697		  /* Update the tail pointer...  */
1698		  G.page_tails[order] = p;
1699
1700		  /* ... and the head pointer, if necessary.  */
1701		  if (! previous)
1702		    G.pages[order] = next;
1703		  else
1704		    previous->next = next;
1705
1706		  /* And update the backpointer in NEXT if necessary.  */
1707		  if (next)
1708		    next->prev = previous;
1709
1710		  p = previous;
1711		}
1712	    }
1713
1714	  /* If we've fallen through to here, it's a page in the
1715	     topmost context that is neither full nor empty.  Such a
1716	     page must precede pages at lesser context depth in the
1717	     list, so move it to the head.  */
1718	  else if (p != G.pages[order])
1719	    {
1720	      previous->next = p->next;
1721
1722	      /* Update the backchain in the next node if it exists.  */
1723	      if (p->next)
1724		p->next->prev = previous;
1725
1726	      /* Move P to the head of the list.  */
1727	      p->next = G.pages[order];
1728	      p->prev = NULL;
1729	      G.pages[order]->prev = p;
1730
1731	      /* Update the head pointer.  */
1732	      G.pages[order] = p;
1733
1734	      /* Are we moving the last element?  */
1735	      if (G.page_tails[order] == p)
1736	        G.page_tails[order] = previous;
1737	      p = previous;
1738	    }
1739
1740	  previous = p;
1741	  p = next;
1742	}
1743      while (! done);
1744
1745      /* Now, restore the in_use_p vectors for any pages from contexts
1746         other than the current one.  */
1747      for (p = G.pages[order]; p; p = p->next)
1748	if (p->context_depth != G.context_depth)
1749	  ggc_recalculate_in_use_p (p);
1750    }
1751}
1752
1753#ifdef ENABLE_GC_CHECKING
1754/* Clobber all free objects.  */
1755
1756static void
1757poison_pages (void)
1758{
1759  unsigned order;
1760
1761  for (order = 2; order < NUM_ORDERS; order++)
1762    {
1763      size_t size = OBJECT_SIZE (order);
1764      page_entry *p;
1765
1766      for (p = G.pages[order]; p != NULL; p = p->next)
1767	{
1768	  size_t num_objects;
1769	  size_t i;
1770
1771	  if (p->context_depth != G.context_depth)
1772	    /* Since we don't do any collection for pages in pushed
1773	       contexts, there's no need to do any poisoning.  And
1774	       besides, the IN_USE_P array isn't valid until we pop
1775	       contexts.  */
1776	    continue;
1777
1778	  num_objects = OBJECTS_IN_PAGE (p);
1779	  for (i = 0; i < num_objects; i++)
1780	    {
1781	      size_t word, bit;
1782	      word = i / HOST_BITS_PER_LONG;
1783	      bit = i % HOST_BITS_PER_LONG;
1784	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1785		{
1786		  char *object = p->page + i * size;
1787
1788		  /* Keep poison-by-write when we expect to use Valgrind,
1789		     so the exact same memory semantics is kept, in case
1790		     there are memory errors.  We override this request
1791		     below.  */
1792		  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1793		  memset (object, 0xa5, size);
1794
1795		  /* Drop the handle to avoid handle leak.  */
1796		  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1797		}
1798	    }
1799	}
1800    }
1801}
1802#else
1803#define poison_pages()
1804#endif
1805
1806#ifdef ENABLE_GC_ALWAYS_COLLECT
1807/* Validate that the reportedly free objects actually are.  */
1808
1809static void
1810validate_free_objects (void)
1811{
1812  struct free_object *f, *next, *still_free = NULL;
1813
1814  for (f = G.free_object_list; f ; f = next)
1815    {
1816      page_entry *pe = lookup_page_table_entry (f->object);
1817      size_t bit, word;
1818
1819      bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
1820      word = bit / HOST_BITS_PER_LONG;
1821      bit = bit % HOST_BITS_PER_LONG;
1822      next = f->next;
1823
1824      /* Make certain it isn't visible from any root.  Notice that we
1825	 do this check before sweep_pages merges save_in_use_p.  */
1826      gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
1827
1828      /* If the object comes from an outer context, then retain the
1829	 free_object entry, so that we can verify that the address
1830	 isn't live on the stack in some outer context.  */
1831      if (pe->context_depth != G.context_depth)
1832	{
1833	  f->next = still_free;
1834	  still_free = f;
1835	}
1836      else
1837	free (f);
1838    }
1839
1840  G.free_object_list = still_free;
1841}
1842#else
1843#define validate_free_objects()
1844#endif
1845
1846/* Top level mark-and-sweep routine.  */
1847
1848void
1849ggc_collect (void)
1850{
1851  /* Avoid frequent unnecessary work by skipping collection if the
1852     total allocations haven't expanded much since the last
1853     collection.  */
1854  float allocated_last_gc =
1855    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1856
1857  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1858
1859  if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
1860    return;
1861
1862  timevar_push (TV_GC);
1863  if (!quiet_flag)
1864    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
1865  if (GGC_DEBUG_LEVEL >= 2)
1866    fprintf (G.debug_file, "BEGIN COLLECTING\n");
1867
1868  /* Zero the total allocated bytes.  This will be recalculated in the
1869     sweep phase.  */
1870  G.allocated = 0;
1871
1872  /* Release the pages we freed the last time we collected, but didn't
1873     reuse in the interim.  */
1874  release_pages ();
1875
1876  /* Indicate that we've seen collections at this context depth.  */
1877  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1878
1879  clear_marks ();
1880  ggc_mark_roots ();
1881#ifdef GATHER_STATISTICS
1882  ggc_prune_overhead_list ();
1883#endif
1884  poison_pages ();
1885  validate_free_objects ();
1886  sweep_pages ();
1887
1888  G.allocated_last_gc = G.allocated;
1889
1890  timevar_pop (TV_GC);
1891
1892  if (!quiet_flag)
1893    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
1894  if (GGC_DEBUG_LEVEL >= 2)
1895    fprintf (G.debug_file, "END COLLECTING\n");
1896}
1897
1898/* Print allocation statistics.  */
1899#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1900		  ? (x) \
1901		  : ((x) < 1024*1024*10 \
1902		     ? (x) / 1024 \
1903		     : (x) / (1024*1024))))
1904#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1905
1906void
1907ggc_print_statistics (void)
1908{
1909  struct ggc_statistics stats;
1910  unsigned int i;
1911  size_t total_overhead = 0;
1912
1913  /* Clear the statistics.  */
1914  memset (&stats, 0, sizeof (stats));
1915
1916  /* Make sure collection will really occur.  */
1917  G.allocated_last_gc = 0;
1918
1919  /* Collect and print the statistics common across collectors.  */
1920  ggc_print_common_statistics (stderr, &stats);
1921
1922  /* Release free pages so that we will not count the bytes allocated
1923     there as part of the total allocated memory.  */
1924  release_pages ();
1925
1926  /* Collect some information about the various sizes of
1927     allocation.  */
1928  fprintf (stderr,
1929           "Memory still allocated at the end of the compilation process\n");
1930  fprintf (stderr, "%-5s %10s  %10s  %10s\n",
1931	   "Size", "Allocated", "Used", "Overhead");
1932  for (i = 0; i < NUM_ORDERS; ++i)
1933    {
1934      page_entry *p;
1935      size_t allocated;
1936      size_t in_use;
1937      size_t overhead;
1938
1939      /* Skip empty entries.  */
1940      if (!G.pages[i])
1941	continue;
1942
1943      overhead = allocated = in_use = 0;
1944
1945      /* Figure out the total number of bytes allocated for objects of
1946	 this size, and how many of them are actually in use.  Also figure
1947	 out how much memory the page table is using.  */
1948      for (p = G.pages[i]; p; p = p->next)
1949	{
1950	  allocated += p->bytes;
1951	  in_use +=
1952	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
1953
1954	  overhead += (sizeof (page_entry) - sizeof (long)
1955		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
1956	}
1957      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1958	       (unsigned long) OBJECT_SIZE (i),
1959	       SCALE (allocated), STAT_LABEL (allocated),
1960	       SCALE (in_use), STAT_LABEL (in_use),
1961	       SCALE (overhead), STAT_LABEL (overhead));
1962      total_overhead += overhead;
1963    }
1964  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
1965	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
1966	   SCALE (G.allocated), STAT_LABEL(G.allocated),
1967	   SCALE (total_overhead), STAT_LABEL (total_overhead));
1968
1969#ifdef GATHER_STATISTICS
1970  {
1971    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
1972
1973    fprintf (stderr, "Total Overhead:                        %10lld\n",
1974             G.stats.total_overhead);
1975    fprintf (stderr, "Total Allocated:                       %10lld\n",
1976             G.stats.total_allocated);
1977
1978    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
1979             G.stats.total_overhead_under32);
1980    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
1981             G.stats.total_allocated_under32);
1982    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
1983             G.stats.total_overhead_under64);
1984    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
1985             G.stats.total_allocated_under64);
1986    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
1987             G.stats.total_overhead_under128);
1988    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
1989             G.stats.total_allocated_under128);
1990
1991    for (i = 0; i < NUM_ORDERS; i++)
1992      if (G.stats.total_allocated_per_order[i])
1993        {
1994          fprintf (stderr, "Total Overhead  page size %7d:     %10lld\n",
1995                   OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]);
1996          fprintf (stderr, "Total Allocated page size %7d:     %10lld\n",
1997                   OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]);
1998        }
1999  }
2000#endif
2001}
2002
2003struct ggc_pch_data
2004{
2005  struct ggc_pch_ondisk
2006  {
2007    unsigned totals[NUM_ORDERS];
2008  } d;
2009  size_t base[NUM_ORDERS];
2010  size_t written[NUM_ORDERS];
2011};
2012
2013struct ggc_pch_data *
2014init_ggc_pch (void)
2015{
2016  return xcalloc (sizeof (struct ggc_pch_data), 1);
2017}
2018
2019void
2020ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2021		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2022		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2023{
2024  unsigned order;
2025
2026  if (size <= 256)
2027    order = size_lookup[size];
2028  else
2029    {
2030      order = 9;
2031      while (size > OBJECT_SIZE (order))
2032	order++;
2033    }
2034
2035  d->d.totals[order]++;
2036}
2037
2038size_t
2039ggc_pch_total_size (struct ggc_pch_data *d)
2040{
2041  size_t a = 0;
2042  unsigned i;
2043
2044  for (i = 0; i < NUM_ORDERS; i++)
2045    a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2046  return a;
2047}
2048
2049void
2050ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2051{
2052  size_t a = (size_t) base;
2053  unsigned i;
2054
2055  for (i = 0; i < NUM_ORDERS; i++)
2056    {
2057      d->base[i] = a;
2058      a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2059    }
2060}
2061
2062
2063char *
2064ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2065		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2066		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2067{
2068  unsigned order;
2069  char *result;
2070
2071  if (size <= 256)
2072    order = size_lookup[size];
2073  else
2074    {
2075      order = 9;
2076      while (size > OBJECT_SIZE (order))
2077	order++;
2078    }
2079
2080  result = (char *) d->base[order];
2081  d->base[order] += OBJECT_SIZE (order);
2082  return result;
2083}
2084
2085void
2086ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2087		       FILE *f ATTRIBUTE_UNUSED)
2088{
2089  /* Nothing to do.  */
2090}
2091
2092void
2093ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2094		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2095		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2096{
2097  unsigned order;
2098  static const char emptyBytes[256];
2099
2100  if (size <= 256)
2101    order = size_lookup[size];
2102  else
2103    {
2104      order = 9;
2105      while (size > OBJECT_SIZE (order))
2106	order++;
2107    }
2108
2109  if (fwrite (x, size, 1, f) != 1)
2110    fatal_error ("can't write PCH file: %m");
2111
2112  /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2113     object out to OBJECT_SIZE(order).  This happens for strings.  */
2114
2115  if (size != OBJECT_SIZE (order))
2116    {
2117      unsigned padding = OBJECT_SIZE(order) - size;
2118
2119      /* To speed small writes, we use a nulled-out array that's larger
2120         than most padding requests as the source for our null bytes.  This
2121         permits us to do the padding with fwrite() rather than fseek(), and
2122         limits the chance the OS may try to flush any outstanding writes.  */
2123      if (padding <= sizeof(emptyBytes))
2124        {
2125          if (fwrite (emptyBytes, 1, padding, f) != padding)
2126            fatal_error ("can't write PCH file");
2127        }
2128      else
2129        {
2130          /* Larger than our buffer?  Just default to fseek.  */
2131          if (fseek (f, padding, SEEK_CUR) != 0)
2132            fatal_error ("can't write PCH file");
2133        }
2134    }
2135
2136  d->written[order]++;
2137  if (d->written[order] == d->d.totals[order]
2138      && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2139				   G.pagesize),
2140		SEEK_CUR) != 0)
2141    fatal_error ("can't write PCH file: %m");
2142}
2143
2144void
2145ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2146{
2147  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2148    fatal_error ("can't write PCH file: %m");
2149  free (d);
2150}
2151
2152/* Move the PCH PTE entries just added to the end of by_depth, to the
2153   front.  */
2154
2155static void
2156move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2157{
2158  unsigned i;
2159
2160  /* First, we swap the new entries to the front of the varrays.  */
2161  page_entry **new_by_depth;
2162  unsigned long **new_save_in_use;
2163
2164  new_by_depth = xmalloc (G.by_depth_max * sizeof (page_entry *));
2165  new_save_in_use = xmalloc (G.by_depth_max * sizeof (unsigned long *));
2166
2167  memcpy (&new_by_depth[0],
2168	  &G.by_depth[count_old_page_tables],
2169	  count_new_page_tables * sizeof (void *));
2170  memcpy (&new_by_depth[count_new_page_tables],
2171	  &G.by_depth[0],
2172	  count_old_page_tables * sizeof (void *));
2173  memcpy (&new_save_in_use[0],
2174	  &G.save_in_use[count_old_page_tables],
2175	  count_new_page_tables * sizeof (void *));
2176  memcpy (&new_save_in_use[count_new_page_tables],
2177	  &G.save_in_use[0],
2178	  count_old_page_tables * sizeof (void *));
2179
2180  free (G.by_depth);
2181  free (G.save_in_use);
2182
2183  G.by_depth = new_by_depth;
2184  G.save_in_use = new_save_in_use;
2185
2186  /* Now update all the index_by_depth fields.  */
2187  for (i = G.by_depth_in_use; i > 0; --i)
2188    {
2189      page_entry *p = G.by_depth[i-1];
2190      p->index_by_depth = i-1;
2191    }
2192
2193  /* And last, we update the depth pointers in G.depth.  The first
2194     entry is already 0, and context 0 entries always start at index
2195     0, so there is nothing to update in the first slot.  We need a
2196     second slot, only if we have old ptes, and if we do, they start
2197     at index count_new_page_tables.  */
2198  if (count_old_page_tables)
2199    push_depth (count_new_page_tables);
2200}
2201
2202void
2203ggc_pch_read (FILE *f, void *addr)
2204{
2205  struct ggc_pch_ondisk d;
2206  unsigned i;
2207  char *offs = addr;
2208  unsigned long count_old_page_tables;
2209  unsigned long count_new_page_tables;
2210
2211  count_old_page_tables = G.by_depth_in_use;
2212
2213  /* We've just read in a PCH file.  So, every object that used to be
2214     allocated is now free.  */
2215  clear_marks ();
2216#ifdef ENABLE_GC_CHECKING
2217  poison_pages ();
2218#endif
2219
2220  /* No object read from a PCH file should ever be freed.  So, set the
2221     context depth to 1, and set the depth of all the currently-allocated
2222     pages to be 1 too.  PCH pages will have depth 0.  */
2223  gcc_assert (!G.context_depth);
2224  G.context_depth = 1;
2225  for (i = 0; i < NUM_ORDERS; i++)
2226    {
2227      page_entry *p;
2228      for (p = G.pages[i]; p != NULL; p = p->next)
2229	p->context_depth = G.context_depth;
2230    }
2231
2232  /* Allocate the appropriate page-table entries for the pages read from
2233     the PCH file.  */
2234  if (fread (&d, sizeof (d), 1, f) != 1)
2235    fatal_error ("can't read PCH file: %m");
2236
2237  for (i = 0; i < NUM_ORDERS; i++)
2238    {
2239      struct page_entry *entry;
2240      char *pte;
2241      size_t bytes;
2242      size_t num_objs;
2243      size_t j;
2244
2245      if (d.totals[i] == 0)
2246	continue;
2247
2248      bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
2249      num_objs = bytes / OBJECT_SIZE (i);
2250      entry = xcalloc (1, (sizeof (struct page_entry)
2251			   - sizeof (long)
2252			   + BITMAP_SIZE (num_objs + 1)));
2253      entry->bytes = bytes;
2254      entry->page = offs;
2255      entry->context_depth = 0;
2256      offs += bytes;
2257      entry->num_free_objects = 0;
2258      entry->order = i;
2259
2260      for (j = 0;
2261	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2262	   j += HOST_BITS_PER_LONG)
2263	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2264      for (; j < num_objs + 1; j++)
2265	entry->in_use_p[j / HOST_BITS_PER_LONG]
2266	  |= 1L << (j % HOST_BITS_PER_LONG);
2267
2268      for (pte = entry->page;
2269	   pte < entry->page + entry->bytes;
2270	   pte += G.pagesize)
2271	set_page_table_entry (pte, entry);
2272
2273      if (G.page_tails[i] != NULL)
2274	G.page_tails[i]->next = entry;
2275      else
2276	G.pages[i] = entry;
2277      G.page_tails[i] = entry;
2278
2279      /* We start off by just adding all the new information to the
2280	 end of the varrays, later, we will move the new information
2281	 to the front of the varrays, as the PCH page tables are at
2282	 context 0.  */
2283      push_by_depth (entry, 0);
2284    }
2285
2286  /* Now, we update the various data structures that speed page table
2287     handling.  */
2288  count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2289
2290  move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2291
2292  /* Update the statistics.  */
2293  G.allocated = G.allocated_last_gc = offs - (char *)addr;
2294}
2295