ggc-page.c revision 117395
190075Sobrien/* "Bag-of-pages" garbage collector for the GNU compiler.
290075Sobrien   Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
390075Sobrien
490075SobrienThis file is part of GCC.
590075Sobrien
690075SobrienGCC is free software; you can redistribute it and/or modify it under
790075Sobrienthe terms of the GNU General Public License as published by the Free
890075SobrienSoftware Foundation; either version 2, or (at your option) any later
990075Sobrienversion.
1090075Sobrien
1190075SobrienGCC is distributed in the hope that it will be useful, but WITHOUT ANY
1290075SobrienWARRANTY; without even the implied warranty of MERCHANTABILITY or
1390075SobrienFITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1490075Sobrienfor more details.
1590075Sobrien
1690075SobrienYou should have received a copy of the GNU General Public License
1790075Sobrienalong with GCC; see the file COPYING.  If not, write to the Free
1890075SobrienSoftware Foundation, 59 Temple Place - Suite 330, Boston, MA
1990075Sobrien02111-1307, USA.  */
2090075Sobrien
2190075Sobrien#include "config.h"
2290075Sobrien#include "system.h"
2390075Sobrien#include "tree.h"
2490075Sobrien#include "rtl.h"
2590075Sobrien#include "tm_p.h"
2690075Sobrien#include "toplev.h"
2790075Sobrien#include "flags.h"
2890075Sobrien#include "ggc.h"
2990075Sobrien#include "timevar.h"
30117395Skan#include "params.h"
31117395Skan#ifdef ENABLE_VALGRIND_CHECKING
32117395Skan#include <valgrind.h>
33117395Skan#else
34117395Skan/* Avoid #ifdef:s when we can help it.  */
35117395Skan#define VALGRIND_DISCARD(x)
36117395Skan#endif
3790075Sobrien
3890075Sobrien/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
3990075Sobrien   file open.  Prefer either to valloc.  */
4090075Sobrien#ifdef HAVE_MMAP_ANON
4190075Sobrien# undef HAVE_MMAP_DEV_ZERO
4290075Sobrien
4390075Sobrien# include <sys/mman.h>
4490075Sobrien# ifndef MAP_FAILED
4590075Sobrien#  define MAP_FAILED -1
4690075Sobrien# endif
4790075Sobrien# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
4890075Sobrien#  define MAP_ANONYMOUS MAP_ANON
4990075Sobrien# endif
5090075Sobrien# define USING_MMAP
5190075Sobrien
5290075Sobrien#endif
5390075Sobrien
5490075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
5590075Sobrien
5690075Sobrien# include <sys/mman.h>
5790075Sobrien# ifndef MAP_FAILED
5890075Sobrien#  define MAP_FAILED -1
5990075Sobrien# endif
6090075Sobrien# define USING_MMAP
6190075Sobrien
6290075Sobrien#endif
6390075Sobrien
6490075Sobrien#ifndef USING_MMAP
6590075Sobrien#define USING_MALLOC_PAGE_GROUPS
6690075Sobrien#endif
6790075Sobrien
68117395Skan/* Stategy:
6990075Sobrien
7090075Sobrien   This garbage-collecting allocator allocates objects on one of a set
7190075Sobrien   of pages.  Each page can allocate objects of a single size only;
7290075Sobrien   available sizes are powers of two starting at four bytes.  The size
7390075Sobrien   of an allocation request is rounded up to the next power of two
7490075Sobrien   (`order'), and satisfied from the appropriate page.
7590075Sobrien
7690075Sobrien   Each page is recorded in a page-entry, which also maintains an
7790075Sobrien   in-use bitmap of object positions on the page.  This allows the
7890075Sobrien   allocation state of a particular object to be flipped without
7990075Sobrien   touching the page itself.
8090075Sobrien
8190075Sobrien   Each page-entry also has a context depth, which is used to track
8290075Sobrien   pushing and popping of allocation contexts.  Only objects allocated
83117395Skan   in the current (highest-numbered) context may be collected.
8490075Sobrien
8590075Sobrien   Page entries are arranged in an array of singly-linked lists.  The
8690075Sobrien   array is indexed by the allocation size, in bits, of the pages on
8790075Sobrien   it; i.e. all pages on a list allocate objects of the same size.
8890075Sobrien   Pages are ordered on the list such that all non-full pages precede
8990075Sobrien   all full pages, with non-full pages arranged in order of decreasing
9090075Sobrien   context depth.
9190075Sobrien
9290075Sobrien   Empty pages (of all orders) are kept on a single page cache list,
9390075Sobrien   and are considered first when new pages are required; they are
9490075Sobrien   deallocated at the start of the next collection if they haven't
9590075Sobrien   been recycled by then.  */
9690075Sobrien
9790075Sobrien/* Define GGC_DEBUG_LEVEL to print debugging information.
9890075Sobrien     0: No debugging output.
9990075Sobrien     1: GC statistics only.
10090075Sobrien     2: Page-entry allocations/deallocations as well.
10190075Sobrien     3: Object allocations as well.
10290075Sobrien     4: Object marks as well.  */
10390075Sobrien#define GGC_DEBUG_LEVEL (0)
10490075Sobrien
10590075Sobrien#ifndef HOST_BITS_PER_PTR
10690075Sobrien#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
10790075Sobrien#endif
10890075Sobrien
10990075Sobrien
11090075Sobrien/* A two-level tree is used to look up the page-entry for a given
11190075Sobrien   pointer.  Two chunks of the pointer's bits are extracted to index
11290075Sobrien   the first and second levels of the tree, as follows:
11390075Sobrien
11490075Sobrien				   HOST_PAGE_SIZE_BITS
11590075Sobrien			   32		|      |
11690075Sobrien       msb +----------------+----+------+------+ lsb
11790075Sobrien			    |    |      |
11890075Sobrien			 PAGE_L1_BITS   |
11990075Sobrien				 |      |
12090075Sobrien			       PAGE_L2_BITS
12190075Sobrien
12290075Sobrien   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
12390075Sobrien   pages are aligned on system page boundaries.  The next most
12490075Sobrien   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
125117395Skan   index values in the lookup table, respectively.
12690075Sobrien
12790075Sobrien   For 32-bit architectures and the settings below, there are no
12890075Sobrien   leftover bits.  For architectures with wider pointers, the lookup
12990075Sobrien   tree points to a list of pages, which must be scanned to find the
13090075Sobrien   correct one.  */
13190075Sobrien
13290075Sobrien#define PAGE_L1_BITS	(8)
13390075Sobrien#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
13490075Sobrien#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
13590075Sobrien#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
13690075Sobrien
13790075Sobrien#define LOOKUP_L1(p) \
13890075Sobrien  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
13990075Sobrien
14090075Sobrien#define LOOKUP_L2(p) \
14190075Sobrien  (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
14290075Sobrien
14390075Sobrien/* The number of objects per allocation page, for objects on a page of
14490075Sobrien   the indicated ORDER.  */
14590075Sobrien#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
14690075Sobrien
14790075Sobrien/* The size of an object on a page of the indicated ORDER.  */
14890075Sobrien#define OBJECT_SIZE(ORDER) object_size_table[ORDER]
14990075Sobrien
150117395Skan/* For speed, we avoid doing a general integer divide to locate the
151117395Skan   offset in the allocation bitmap, by precalculating numbers M, S
152117395Skan   such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
153117395Skan   within the page which is evenly divisible by the object size Z.  */
154117395Skan#define DIV_MULT(ORDER) inverse_table[ORDER].mult
155117395Skan#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
156117395Skan#define OFFSET_TO_BIT(OFFSET, ORDER) \
157117395Skan  (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
158117395Skan
15990075Sobrien/* The number of extra orders, not corresponding to power-of-two sized
16090075Sobrien   objects.  */
16190075Sobrien
162117395Skan#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
16390075Sobrien
164117395Skan#define RTL_SIZE(NSLOTS) \
165117395Skan  (sizeof (struct rtx_def) + ((NSLOTS) - 1) * sizeof (rtunion))
166117395Skan
16790075Sobrien/* The Ith entry is the maximum size of an object to be stored in the
16890075Sobrien   Ith extra order.  Adding a new entry to this array is the *only*
16990075Sobrien   thing you need to do to add a new special allocation size.  */
17090075Sobrien
17190075Sobrienstatic const size_t extra_order_size_table[] = {
17290075Sobrien  sizeof (struct tree_decl),
173117395Skan  sizeof (struct tree_list),
174117395Skan  RTL_SIZE (2),			/* REG, MEM, PLUS, etc.  */
175117395Skan  RTL_SIZE (10),		/* INSN, CALL_INSN, JUMP_INSN */
17690075Sobrien};
17790075Sobrien
17890075Sobrien/* The total number of orders.  */
17990075Sobrien
18090075Sobrien#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
18190075Sobrien
18290075Sobrien/* We use this structure to determine the alignment required for
18390075Sobrien   allocations.  For power-of-two sized allocations, that's not a
18490075Sobrien   problem, but it does matter for odd-sized allocations.  */
18590075Sobrien
18690075Sobrienstruct max_alignment {
18790075Sobrien  char c;
18890075Sobrien  union {
18990075Sobrien    HOST_WIDEST_INT i;
19090075Sobrien#ifdef HAVE_LONG_DOUBLE
19190075Sobrien    long double d;
19290075Sobrien#else
19390075Sobrien    double d;
19490075Sobrien#endif
19590075Sobrien  } u;
19690075Sobrien};
19790075Sobrien
19890075Sobrien/* The biggest alignment required.  */
19990075Sobrien
20090075Sobrien#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
20190075Sobrien
20290075Sobrien/* The Ith entry is the number of objects on a page or order I.  */
20390075Sobrien
20490075Sobrienstatic unsigned objects_per_page_table[NUM_ORDERS];
20590075Sobrien
20690075Sobrien/* The Ith entry is the size of an object on a page of order I.  */
20790075Sobrien
20890075Sobrienstatic size_t object_size_table[NUM_ORDERS];
20990075Sobrien
210117395Skan/* The Ith entry is a pair of numbers (mult, shift) such that
211117395Skan   ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
212117395Skan   for all k evenly divisible by OBJECT_SIZE(I).  */
213117395Skan
214117395Skanstatic struct
215117395Skan{
216117395Skan  unsigned int mult;
217117395Skan  unsigned int shift;
218117395Skan}
219117395Skaninverse_table[NUM_ORDERS];
220117395Skan
22190075Sobrien/* A page_entry records the status of an allocation page.  This
22290075Sobrien   structure is dynamically sized to fit the bitmap in_use_p.  */
223117395Skantypedef struct page_entry
22490075Sobrien{
22590075Sobrien  /* The next page-entry with objects of the same size, or NULL if
22690075Sobrien     this is the last page-entry.  */
22790075Sobrien  struct page_entry *next;
22890075Sobrien
22990075Sobrien  /* The number of bytes allocated.  (This will always be a multiple
23090075Sobrien     of the host system page size.)  */
23190075Sobrien  size_t bytes;
23290075Sobrien
23390075Sobrien  /* The address at which the memory is allocated.  */
23490075Sobrien  char *page;
23590075Sobrien
23690075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
23790075Sobrien  /* Back pointer to the page group this page came from.  */
23890075Sobrien  struct page_group *group;
23990075Sobrien#endif
24090075Sobrien
241117395Skan  /* This is the index in the by_depth varray where this page table
242117395Skan     can be found.  */
243117395Skan  unsigned long index_by_depth;
24490075Sobrien
24590075Sobrien  /* Context depth of this page.  */
24690075Sobrien  unsigned short context_depth;
24790075Sobrien
24890075Sobrien  /* The number of free objects remaining on this page.  */
24990075Sobrien  unsigned short num_free_objects;
25090075Sobrien
25190075Sobrien  /* A likely candidate for the bit position of a free object for the
25290075Sobrien     next allocation from this page.  */
25390075Sobrien  unsigned short next_bit_hint;
25490075Sobrien
25590075Sobrien  /* The lg of size of objects allocated from this page.  */
25690075Sobrien  unsigned char order;
25790075Sobrien
25890075Sobrien  /* A bit vector indicating whether or not objects are in use.  The
25990075Sobrien     Nth bit is one if the Nth object on this page is allocated.  This
26090075Sobrien     array is dynamically sized.  */
26190075Sobrien  unsigned long in_use_p[1];
26290075Sobrien} page_entry;
26390075Sobrien
26490075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
26590075Sobrien/* A page_group describes a large allocation from malloc, from which
26690075Sobrien   we parcel out aligned pages.  */
26790075Sobrientypedef struct page_group
26890075Sobrien{
26990075Sobrien  /* A linked list of all extant page groups.  */
27090075Sobrien  struct page_group *next;
27190075Sobrien
27290075Sobrien  /* The address we received from malloc.  */
27390075Sobrien  char *allocation;
27490075Sobrien
27590075Sobrien  /* The size of the block.  */
27690075Sobrien  size_t alloc_size;
27790075Sobrien
27890075Sobrien  /* A bitmask of pages in use.  */
27990075Sobrien  unsigned int in_use;
28090075Sobrien} page_group;
28190075Sobrien#endif
28290075Sobrien
28390075Sobrien#if HOST_BITS_PER_PTR <= 32
28490075Sobrien
28590075Sobrien/* On 32-bit hosts, we use a two level page table, as pictured above.  */
28690075Sobrientypedef page_entry **page_table[PAGE_L1_SIZE];
28790075Sobrien
28890075Sobrien#else
28990075Sobrien
29090075Sobrien/* On 64-bit hosts, we use the same two level page tables plus a linked
29190075Sobrien   list that disambiguates the top 32-bits.  There will almost always be
29290075Sobrien   exactly one entry in the list.  */
29390075Sobrientypedef struct page_table_chain
29490075Sobrien{
29590075Sobrien  struct page_table_chain *next;
29690075Sobrien  size_t high_bits;
29790075Sobrien  page_entry **table[PAGE_L1_SIZE];
29890075Sobrien} *page_table;
29990075Sobrien
30090075Sobrien#endif
30190075Sobrien
30290075Sobrien/* The rest of the global variables.  */
30390075Sobrienstatic struct globals
30490075Sobrien{
30590075Sobrien  /* The Nth element in this array is a page with objects of size 2^N.
30690075Sobrien     If there are any pages with free objects, they will be at the
30790075Sobrien     head of the list.  NULL if there are no page-entries for this
30890075Sobrien     object size.  */
30990075Sobrien  page_entry *pages[NUM_ORDERS];
31090075Sobrien
31190075Sobrien  /* The Nth element in this array is the last page with objects of
31290075Sobrien     size 2^N.  NULL if there are no page-entries for this object
31390075Sobrien     size.  */
31490075Sobrien  page_entry *page_tails[NUM_ORDERS];
31590075Sobrien
31690075Sobrien  /* Lookup table for associating allocation pages with object addresses.  */
31790075Sobrien  page_table lookup;
31890075Sobrien
31990075Sobrien  /* The system's page size.  */
32090075Sobrien  size_t pagesize;
32190075Sobrien  size_t lg_pagesize;
32290075Sobrien
32390075Sobrien  /* Bytes currently allocated.  */
32490075Sobrien  size_t allocated;
32590075Sobrien
32690075Sobrien  /* Bytes currently allocated at the end of the last collection.  */
32790075Sobrien  size_t allocated_last_gc;
32890075Sobrien
32990075Sobrien  /* Total amount of memory mapped.  */
33090075Sobrien  size_t bytes_mapped;
33190075Sobrien
332117395Skan  /* Bit N set if any allocations have been done at context depth N.  */
333117395Skan  unsigned long context_depth_allocations;
334117395Skan
335117395Skan  /* Bit N set if any collections have been done at context depth N.  */
336117395Skan  unsigned long context_depth_collections;
337117395Skan
33890075Sobrien  /* The current depth in the context stack.  */
33990075Sobrien  unsigned short context_depth;
34090075Sobrien
34190075Sobrien  /* A file descriptor open to /dev/zero for reading.  */
34290075Sobrien#if defined (HAVE_MMAP_DEV_ZERO)
34390075Sobrien  int dev_zero_fd;
34490075Sobrien#endif
34590075Sobrien
34690075Sobrien  /* A cache of free system pages.  */
34790075Sobrien  page_entry *free_pages;
34890075Sobrien
34990075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
35090075Sobrien  page_group *page_groups;
35190075Sobrien#endif
35290075Sobrien
35390075Sobrien  /* The file descriptor for debugging output.  */
35490075Sobrien  FILE *debug_file;
355117395Skan
356117395Skan  /* Current number of elements in use in depth below.  */
357117395Skan  unsigned int depth_in_use;
358117395Skan
359117395Skan  /* Maximum number of elements that can be used before resizing.  */
360117395Skan  unsigned int depth_max;
361117395Skan
362117395Skan  /* Each element of this arry is an index in by_depth where the given
363117395Skan     depth starts.  This structure is indexed by that given depth we
364117395Skan     are interested in.  */
365117395Skan  unsigned int *depth;
366117395Skan
367117395Skan  /* Current number of elements in use in by_depth below.  */
368117395Skan  unsigned int by_depth_in_use;
369117395Skan
370117395Skan  /* Maximum number of elements that can be used before resizing.  */
371117395Skan  unsigned int by_depth_max;
372117395Skan
373117395Skan  /* Each element of this array is a pointer to a page_entry, all
374117395Skan     page_entries can be found in here by increasing depth.
375117395Skan     index_by_depth in the page_entry is the index into this data
376117395Skan     structure where that page_entry can be found.  This is used to
377117395Skan     speed up finding all page_entries at a particular depth.  */
378117395Skan  page_entry **by_depth;
379117395Skan
380117395Skan  /* Each element is a pointer to the saved in_use_p bits, if any,
381117395Skan     zero otherwise.  We allocate them all together, to enable a
382117395Skan     better runtime data access pattern.  */
383117395Skan  unsigned long **save_in_use;
384117395Skan
38590075Sobrien} G;
38690075Sobrien
38790075Sobrien/* The size in bytes required to maintain a bitmap for the objects
38890075Sobrien   on a page-entry.  */
38990075Sobrien#define BITMAP_SIZE(Num_objects) \
39090075Sobrien  (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
39190075Sobrien
39290075Sobrien/* Allocate pages in chunks of this size, to throttle calls to memory
39390075Sobrien   allocation routines.  The first page is used, the rest go onto the
39490075Sobrien   free list.  This cannot be larger than HOST_BITS_PER_INT for the
39590075Sobrien   in_use bitmask for page_group.  */
39690075Sobrien#define GGC_QUIRE_SIZE 16
397117395Skan
398117395Skan/* Initial guess as to how many page table entries we might need.  */
399117395Skan#define INITIAL_PTE_COUNT 128
40090075Sobrien
40190075Sobrienstatic int ggc_allocated_p PARAMS ((const void *));
40290075Sobrienstatic page_entry *lookup_page_table_entry PARAMS ((const void *));
40390075Sobrienstatic void set_page_table_entry PARAMS ((void *, page_entry *));
40490075Sobrien#ifdef USING_MMAP
40590075Sobrienstatic char *alloc_anon PARAMS ((char *, size_t));
40690075Sobrien#endif
40790075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
40890075Sobrienstatic size_t page_group_index PARAMS ((char *, char *));
40990075Sobrienstatic void set_page_group_in_use PARAMS ((page_group *, char *));
41090075Sobrienstatic void clear_page_group_in_use PARAMS ((page_group *, char *));
41190075Sobrien#endif
41290075Sobrienstatic struct page_entry * alloc_page PARAMS ((unsigned));
41390075Sobrienstatic void free_page PARAMS ((struct page_entry *));
41490075Sobrienstatic void release_pages PARAMS ((void));
41590075Sobrienstatic void clear_marks PARAMS ((void));
41690075Sobrienstatic void sweep_pages PARAMS ((void));
41790075Sobrienstatic void ggc_recalculate_in_use_p PARAMS ((page_entry *));
418117395Skanstatic void compute_inverse PARAMS ((unsigned));
419117395Skanstatic inline void adjust_depth PARAMS ((void));
42090075Sobrien
421117395Skan#ifdef ENABLE_GC_CHECKING
42290075Sobrienstatic void poison_pages PARAMS ((void));
42390075Sobrien#endif
42490075Sobrien
42590075Sobrienvoid debug_print_page_list PARAMS ((int));
426117395Skanstatic void push_depth PARAMS ((unsigned int));
427117395Skanstatic void push_by_depth PARAMS ((page_entry *, unsigned long *));
42890075Sobrien
429117395Skan/* Push an entry onto G.depth.  */
43090075Sobrien
431117395Skaninline static void
432117395Skanpush_depth (i)
433117395Skan     unsigned int i;
434117395Skan{
435117395Skan  if (G.depth_in_use >= G.depth_max)
436117395Skan    {
437117395Skan      G.depth_max *= 2;
438117395Skan      G.depth = (unsigned int *) xrealloc ((char *) G.depth,
439117395Skan					   G.depth_max * sizeof (unsigned int));
440117395Skan    }
441117395Skan  G.depth[G.depth_in_use++] = i;
442117395Skan}
443117395Skan
444117395Skan/* Push an entry onto G.by_depth and G.save_in_use.  */
445117395Skan
446117395Skaninline static void
447117395Skanpush_by_depth (p, s)
448117395Skan     page_entry *p;
449117395Skan     unsigned long *s;
450117395Skan{
451117395Skan  if (G.by_depth_in_use >= G.by_depth_max)
452117395Skan    {
453117395Skan      G.by_depth_max *= 2;
454117395Skan      G.by_depth = (page_entry **) xrealloc ((char *) G.by_depth,
455117395Skan					     G.by_depth_max * sizeof (page_entry *));
456117395Skan      G.save_in_use = (unsigned long **) xrealloc ((char *) G.save_in_use,
457117395Skan						   G.by_depth_max * sizeof (unsigned long *));
458117395Skan    }
459117395Skan  G.by_depth[G.by_depth_in_use] = p;
460117395Skan  G.save_in_use[G.by_depth_in_use++] = s;
461117395Skan}
462117395Skan
463117395Skan/* For the 3.3 release, we will avoid prefetch, as it isn't tested widely.  */
464117395Skan#define prefetch(X) ((void) X)
465117395Skan
466117395Skan#define save_in_use_p_i(__i) \
467117395Skan  (G.save_in_use[__i])
468117395Skan#define save_in_use_p(__p) \
469117395Skan  (save_in_use_p_i (__p->index_by_depth))
470117395Skan
471117395Skan/* Returns nonzero if P was allocated in GC'able memory.  */
472117395Skan
47390075Sobrienstatic inline int
47490075Sobrienggc_allocated_p (p)
47590075Sobrien     const void *p;
47690075Sobrien{
47790075Sobrien  page_entry ***base;
47890075Sobrien  size_t L1, L2;
47990075Sobrien
48090075Sobrien#if HOST_BITS_PER_PTR <= 32
48190075Sobrien  base = &G.lookup[0];
48290075Sobrien#else
48390075Sobrien  page_table table = G.lookup;
48490075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
48590075Sobrien  while (1)
48690075Sobrien    {
48790075Sobrien      if (table == NULL)
48890075Sobrien	return 0;
48990075Sobrien      if (table->high_bits == high_bits)
49090075Sobrien	break;
49190075Sobrien      table = table->next;
49290075Sobrien    }
49390075Sobrien  base = &table->table[0];
49490075Sobrien#endif
49590075Sobrien
49690075Sobrien  /* Extract the level 1 and 2 indices.  */
49790075Sobrien  L1 = LOOKUP_L1 (p);
49890075Sobrien  L2 = LOOKUP_L2 (p);
49990075Sobrien
50090075Sobrien  return base[L1] && base[L1][L2];
50190075Sobrien}
50290075Sobrien
503117395Skan/* Traverse the page table and find the entry for a page.
50490075Sobrien   Die (probably) if the object wasn't allocated via GC.  */
50590075Sobrien
50690075Sobrienstatic inline page_entry *
50790075Sobrienlookup_page_table_entry(p)
50890075Sobrien     const void *p;
50990075Sobrien{
51090075Sobrien  page_entry ***base;
51190075Sobrien  size_t L1, L2;
51290075Sobrien
51390075Sobrien#if HOST_BITS_PER_PTR <= 32
51490075Sobrien  base = &G.lookup[0];
51590075Sobrien#else
51690075Sobrien  page_table table = G.lookup;
51790075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
51890075Sobrien  while (table->high_bits != high_bits)
51990075Sobrien    table = table->next;
52090075Sobrien  base = &table->table[0];
52190075Sobrien#endif
52290075Sobrien
52390075Sobrien  /* Extract the level 1 and 2 indices.  */
52490075Sobrien  L1 = LOOKUP_L1 (p);
52590075Sobrien  L2 = LOOKUP_L2 (p);
52690075Sobrien
52790075Sobrien  return base[L1][L2];
52890075Sobrien}
52990075Sobrien
53090075Sobrien/* Set the page table entry for a page.  */
53190075Sobrien
53290075Sobrienstatic void
53390075Sobrienset_page_table_entry(p, entry)
53490075Sobrien     void *p;
53590075Sobrien     page_entry *entry;
53690075Sobrien{
53790075Sobrien  page_entry ***base;
53890075Sobrien  size_t L1, L2;
53990075Sobrien
54090075Sobrien#if HOST_BITS_PER_PTR <= 32
54190075Sobrien  base = &G.lookup[0];
54290075Sobrien#else
54390075Sobrien  page_table table;
54490075Sobrien  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
54590075Sobrien  for (table = G.lookup; table; table = table->next)
54690075Sobrien    if (table->high_bits == high_bits)
54790075Sobrien      goto found;
54890075Sobrien
54990075Sobrien  /* Not found -- allocate a new table.  */
55090075Sobrien  table = (page_table) xcalloc (1, sizeof(*table));
55190075Sobrien  table->next = G.lookup;
55290075Sobrien  table->high_bits = high_bits;
55390075Sobrien  G.lookup = table;
55490075Sobrienfound:
55590075Sobrien  base = &table->table[0];
55690075Sobrien#endif
55790075Sobrien
55890075Sobrien  /* Extract the level 1 and 2 indices.  */
55990075Sobrien  L1 = LOOKUP_L1 (p);
56090075Sobrien  L2 = LOOKUP_L2 (p);
56190075Sobrien
56290075Sobrien  if (base[L1] == NULL)
56390075Sobrien    base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
56490075Sobrien
56590075Sobrien  base[L1][L2] = entry;
56690075Sobrien}
56790075Sobrien
56890075Sobrien/* Prints the page-entry for object size ORDER, for debugging.  */
56990075Sobrien
57090075Sobrienvoid
57190075Sobriendebug_print_page_list (order)
57290075Sobrien     int order;
57390075Sobrien{
57490075Sobrien  page_entry *p;
57590075Sobrien  printf ("Head=%p, Tail=%p:\n", (PTR) G.pages[order],
57690075Sobrien	  (PTR) G.page_tails[order]);
57790075Sobrien  p = G.pages[order];
57890075Sobrien  while (p != NULL)
57990075Sobrien    {
58090075Sobrien      printf ("%p(%1d|%3d) -> ", (PTR) p, p->context_depth,
58190075Sobrien	      p->num_free_objects);
58290075Sobrien      p = p->next;
58390075Sobrien    }
58490075Sobrien  printf ("NULL\n");
58590075Sobrien  fflush (stdout);
58690075Sobrien}
58790075Sobrien
58890075Sobrien#ifdef USING_MMAP
58990075Sobrien/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
59090075Sobrien   (if non-null).  The ifdef structure here is intended to cause a
59190075Sobrien   compile error unless exactly one of the HAVE_* is defined.  */
59290075Sobrien
59390075Sobrienstatic inline char *
59490075Sobrienalloc_anon (pref, size)
59590075Sobrien     char *pref ATTRIBUTE_UNUSED;
59690075Sobrien     size_t size;
59790075Sobrien{
59890075Sobrien#ifdef HAVE_MMAP_ANON
59990075Sobrien  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
60090075Sobrien			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
60190075Sobrien#endif
60290075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
60390075Sobrien  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
60490075Sobrien			      MAP_PRIVATE, G.dev_zero_fd, 0);
60590075Sobrien#endif
60690075Sobrien
60790075Sobrien  if (page == (char *) MAP_FAILED)
60890075Sobrien    {
60990075Sobrien      perror ("virtual memory exhausted");
61090075Sobrien      exit (FATAL_EXIT_CODE);
61190075Sobrien    }
61290075Sobrien
61390075Sobrien  /* Remember that we allocated this memory.  */
61490075Sobrien  G.bytes_mapped += size;
61590075Sobrien
616117395Skan  /* Pretend we don't have access to the allocated pages.  We'll enable
617117395Skan     access to smaller pieces of the area in ggc_alloc.  Discard the
618117395Skan     handle to avoid handle leak.  */
619117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
620117395Skan
62190075Sobrien  return page;
62290075Sobrien}
62390075Sobrien#endif
62490075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
62590075Sobrien/* Compute the index for this page into the page group.  */
62690075Sobrien
62790075Sobrienstatic inline size_t
62890075Sobrienpage_group_index (allocation, page)
62990075Sobrien     char *allocation, *page;
63090075Sobrien{
63190075Sobrien  return (size_t) (page - allocation) >> G.lg_pagesize;
63290075Sobrien}
63390075Sobrien
63490075Sobrien/* Set and clear the in_use bit for this page in the page group.  */
63590075Sobrien
63690075Sobrienstatic inline void
63790075Sobrienset_page_group_in_use (group, page)
63890075Sobrien     page_group *group;
63990075Sobrien     char *page;
64090075Sobrien{
64190075Sobrien  group->in_use |= 1 << page_group_index (group->allocation, page);
64290075Sobrien}
64390075Sobrien
64490075Sobrienstatic inline void
64590075Sobrienclear_page_group_in_use (group, page)
64690075Sobrien     page_group *group;
64790075Sobrien     char *page;
64890075Sobrien{
64990075Sobrien  group->in_use &= ~(1 << page_group_index (group->allocation, page));
65090075Sobrien}
65190075Sobrien#endif
65290075Sobrien
65390075Sobrien/* Allocate a new page for allocating objects of size 2^ORDER,
65490075Sobrien   and return an entry for it.  The entry is not added to the
65590075Sobrien   appropriate page_table list.  */
65690075Sobrien
65790075Sobrienstatic inline struct page_entry *
65890075Sobrienalloc_page (order)
65990075Sobrien     unsigned order;
66090075Sobrien{
66190075Sobrien  struct page_entry *entry, *p, **pp;
66290075Sobrien  char *page;
66390075Sobrien  size_t num_objects;
66490075Sobrien  size_t bitmap_size;
66590075Sobrien  size_t page_entry_size;
66690075Sobrien  size_t entry_size;
66790075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
66890075Sobrien  page_group *group;
66990075Sobrien#endif
67090075Sobrien
67190075Sobrien  num_objects = OBJECTS_PER_PAGE (order);
67290075Sobrien  bitmap_size = BITMAP_SIZE (num_objects + 1);
67390075Sobrien  page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
67490075Sobrien  entry_size = num_objects * OBJECT_SIZE (order);
67590075Sobrien  if (entry_size < G.pagesize)
67690075Sobrien    entry_size = G.pagesize;
67790075Sobrien
67890075Sobrien  entry = NULL;
67990075Sobrien  page = NULL;
68090075Sobrien
68190075Sobrien  /* Check the list of free pages for one we can use.  */
68290075Sobrien  for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
68390075Sobrien    if (p->bytes == entry_size)
68490075Sobrien      break;
68590075Sobrien
68690075Sobrien  if (p != NULL)
68790075Sobrien    {
68890075Sobrien      /* Recycle the allocated memory from this page ...  */
68990075Sobrien      *pp = p->next;
69090075Sobrien      page = p->page;
69190075Sobrien
69290075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
69390075Sobrien      group = p->group;
69490075Sobrien#endif
69590075Sobrien
69690075Sobrien      /* ... and, if possible, the page entry itself.  */
69790075Sobrien      if (p->order == order)
69890075Sobrien	{
69990075Sobrien	  entry = p;
70090075Sobrien	  memset (entry, 0, page_entry_size);
70190075Sobrien	}
70290075Sobrien      else
70390075Sobrien	free (p);
70490075Sobrien    }
70590075Sobrien#ifdef USING_MMAP
70690075Sobrien  else if (entry_size == G.pagesize)
70790075Sobrien    {
70890075Sobrien      /* We want just one page.  Allocate a bunch of them and put the
70990075Sobrien	 extras on the freelist.  (Can only do this optimization with
71090075Sobrien	 mmap for backing store.)  */
71190075Sobrien      struct page_entry *e, *f = G.free_pages;
71290075Sobrien      int i;
71390075Sobrien
71490075Sobrien      page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
71590075Sobrien
71690075Sobrien      /* This loop counts down so that the chain will be in ascending
71790075Sobrien	 memory order.  */
71890075Sobrien      for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
71990075Sobrien	{
72090075Sobrien	  e = (struct page_entry *) xcalloc (1, page_entry_size);
72190075Sobrien	  e->order = order;
72290075Sobrien	  e->bytes = G.pagesize;
72390075Sobrien	  e->page = page + (i << G.lg_pagesize);
72490075Sobrien	  e->next = f;
72590075Sobrien	  f = e;
72690075Sobrien	}
72790075Sobrien
72890075Sobrien      G.free_pages = f;
72990075Sobrien    }
73090075Sobrien  else
73190075Sobrien    page = alloc_anon (NULL, entry_size);
73290075Sobrien#endif
73390075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
73490075Sobrien  else
73590075Sobrien    {
73690075Sobrien      /* Allocate a large block of memory and serve out the aligned
73790075Sobrien	 pages therein.  This results in much less memory wastage
73890075Sobrien	 than the traditional implementation of valloc.  */
73990075Sobrien
74090075Sobrien      char *allocation, *a, *enda;
74190075Sobrien      size_t alloc_size, head_slop, tail_slop;
74290075Sobrien      int multiple_pages = (entry_size == G.pagesize);
74390075Sobrien
74490075Sobrien      if (multiple_pages)
74590075Sobrien	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
74690075Sobrien      else
74790075Sobrien	alloc_size = entry_size + G.pagesize - 1;
74890075Sobrien      allocation = xmalloc (alloc_size);
74990075Sobrien
75090075Sobrien      page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
75190075Sobrien      head_slop = page - allocation;
75290075Sobrien      if (multiple_pages)
75390075Sobrien	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
75490075Sobrien      else
75590075Sobrien	tail_slop = alloc_size - entry_size - head_slop;
75690075Sobrien      enda = allocation + alloc_size - tail_slop;
75790075Sobrien
75890075Sobrien      /* We allocated N pages, which are likely not aligned, leaving
75990075Sobrien	 us with N-1 usable pages.  We plan to place the page_group
76090075Sobrien	 structure somewhere in the slop.  */
76190075Sobrien      if (head_slop >= sizeof (page_group))
76290075Sobrien	group = (page_group *)page - 1;
76390075Sobrien      else
76490075Sobrien	{
76590075Sobrien	  /* We magically got an aligned allocation.  Too bad, we have
76690075Sobrien	     to waste a page anyway.  */
76790075Sobrien	  if (tail_slop == 0)
76890075Sobrien	    {
76990075Sobrien	      enda -= G.pagesize;
77090075Sobrien	      tail_slop += G.pagesize;
77190075Sobrien	    }
77290075Sobrien	  if (tail_slop < sizeof (page_group))
77390075Sobrien	    abort ();
77490075Sobrien	  group = (page_group *)enda;
77590075Sobrien	  tail_slop -= sizeof (page_group);
77690075Sobrien	}
77790075Sobrien
77890075Sobrien      /* Remember that we allocated this memory.  */
77990075Sobrien      group->next = G.page_groups;
78090075Sobrien      group->allocation = allocation;
78190075Sobrien      group->alloc_size = alloc_size;
78290075Sobrien      group->in_use = 0;
78390075Sobrien      G.page_groups = group;
78490075Sobrien      G.bytes_mapped += alloc_size;
78590075Sobrien
78690075Sobrien      /* If we allocated multiple pages, put the rest on the free list.  */
78790075Sobrien      if (multiple_pages)
78890075Sobrien	{
78990075Sobrien	  struct page_entry *e, *f = G.free_pages;
79090075Sobrien	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
79190075Sobrien	    {
79290075Sobrien	      e = (struct page_entry *) xcalloc (1, page_entry_size);
79390075Sobrien	      e->order = order;
79490075Sobrien	      e->bytes = G.pagesize;
79590075Sobrien	      e->page = a;
79690075Sobrien	      e->group = group;
79790075Sobrien	      e->next = f;
79890075Sobrien	      f = e;
79990075Sobrien	    }
80090075Sobrien	  G.free_pages = f;
80190075Sobrien	}
80290075Sobrien    }
80390075Sobrien#endif
80490075Sobrien
80590075Sobrien  if (entry == NULL)
80690075Sobrien    entry = (struct page_entry *) xcalloc (1, page_entry_size);
80790075Sobrien
80890075Sobrien  entry->bytes = entry_size;
80990075Sobrien  entry->page = page;
81090075Sobrien  entry->context_depth = G.context_depth;
81190075Sobrien  entry->order = order;
81290075Sobrien  entry->num_free_objects = num_objects;
81390075Sobrien  entry->next_bit_hint = 1;
81490075Sobrien
815117395Skan  G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
816117395Skan
81790075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
81890075Sobrien  entry->group = group;
81990075Sobrien  set_page_group_in_use (group, page);
82090075Sobrien#endif
82190075Sobrien
82290075Sobrien  /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
82390075Sobrien     increment the hint.  */
82490075Sobrien  entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
82590075Sobrien    = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
82690075Sobrien
82790075Sobrien  set_page_table_entry (page, entry);
82890075Sobrien
82990075Sobrien  if (GGC_DEBUG_LEVEL >= 2)
830117395Skan    fprintf (G.debug_file,
831117395Skan	     "Allocating page at %p, object size=%lu, data %p-%p\n",
832117395Skan	     (PTR) entry, (unsigned long) OBJECT_SIZE (order), page,
83390075Sobrien	     page + entry_size - 1);
83490075Sobrien
83590075Sobrien  return entry;
83690075Sobrien}
83790075Sobrien
838117395Skan/* Adjust the size of G.depth so that no index greater than the one
839117395Skan   used by the top of the G.by_depth is used.  */
840117395Skan
841117395Skanstatic inline void
842117395Skanadjust_depth ()
843117395Skan{
844117395Skan  page_entry *top;
845117395Skan
846117395Skan  if (G.by_depth_in_use)
847117395Skan    {
848117395Skan      top = G.by_depth[G.by_depth_in_use-1];
849117395Skan
850117395Skan      /* Peel back indicies in depth that index into by_depth, so that
851117395Skan	 as new elements are added to by_depth, we note the indicies
852117395Skan	 of those elements, if they are for new context depths.  */
853117395Skan      while (G.depth_in_use > (size_t)top->context_depth+1)
854117395Skan	--G.depth_in_use;
855117395Skan    }
856117395Skan}
857117395Skan
85890075Sobrien/* For a page that is no longer needed, put it on the free page list.  */
85990075Sobrien
86090075Sobrienstatic inline void
86190075Sobrienfree_page (entry)
86290075Sobrien     page_entry *entry;
86390075Sobrien{
86490075Sobrien  if (GGC_DEBUG_LEVEL >= 2)
865117395Skan    fprintf (G.debug_file,
86690075Sobrien	     "Deallocating page at %p, data %p-%p\n", (PTR) entry,
86790075Sobrien	     entry->page, entry->page + entry->bytes - 1);
86890075Sobrien
869117395Skan  /* Mark the page as inaccessible.  Discard the handle to avoid handle
870117395Skan     leak.  */
871117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
872117395Skan
87390075Sobrien  set_page_table_entry (entry->page, NULL);
87490075Sobrien
87590075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
87690075Sobrien  clear_page_group_in_use (entry->group, entry->page);
87790075Sobrien#endif
87890075Sobrien
879117395Skan  if (G.by_depth_in_use > 1)
880117395Skan    {
881117395Skan      page_entry *top = G.by_depth[G.by_depth_in_use-1];
882117395Skan
883117395Skan      /* If they are at the same depth, put top element into freed
884117395Skan	 slot.  */
885117395Skan      if (entry->context_depth == top->context_depth)
886117395Skan	{
887117395Skan	  int i = entry->index_by_depth;
888117395Skan	  G.by_depth[i] = top;
889117395Skan	  G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
890117395Skan	  top->index_by_depth = i;
891117395Skan	}
892117395Skan      else
893117395Skan	{
894117395Skan	  /* We cannot free a page from a context deeper than the
895117395Skan	     current one.  */
896117395Skan	  abort ();
897117395Skan	}
898117395Skan    }
899117395Skan  --G.by_depth_in_use;
900117395Skan
901117395Skan  adjust_depth ();
902117395Skan
90390075Sobrien  entry->next = G.free_pages;
90490075Sobrien  G.free_pages = entry;
90590075Sobrien}
90690075Sobrien
90790075Sobrien/* Release the free page cache to the system.  */
90890075Sobrien
90990075Sobrienstatic void
91090075Sobrienrelease_pages ()
91190075Sobrien{
91290075Sobrien#ifdef USING_MMAP
91390075Sobrien  page_entry *p, *next;
91490075Sobrien  char *start;
91590075Sobrien  size_t len;
91690075Sobrien
91790075Sobrien  /* Gather up adjacent pages so they are unmapped together.  */
91890075Sobrien  p = G.free_pages;
91990075Sobrien
92090075Sobrien  while (p)
92190075Sobrien    {
92290075Sobrien      start = p->page;
92390075Sobrien      next = p->next;
92490075Sobrien      len = p->bytes;
92590075Sobrien      free (p);
92690075Sobrien      p = next;
92790075Sobrien
92890075Sobrien      while (p && p->page == start + len)
92990075Sobrien	{
93090075Sobrien	  next = p->next;
93190075Sobrien	  len += p->bytes;
93290075Sobrien	  free (p);
93390075Sobrien	  p = next;
93490075Sobrien	}
93590075Sobrien
93690075Sobrien      munmap (start, len);
93790075Sobrien      G.bytes_mapped -= len;
93890075Sobrien    }
93990075Sobrien
94090075Sobrien  G.free_pages = NULL;
94190075Sobrien#endif
94290075Sobrien#ifdef USING_MALLOC_PAGE_GROUPS
94390075Sobrien  page_entry **pp, *p;
94490075Sobrien  page_group **gp, *g;
94590075Sobrien
94690075Sobrien  /* Remove all pages from free page groups from the list.  */
94790075Sobrien  pp = &G.free_pages;
94890075Sobrien  while ((p = *pp) != NULL)
94990075Sobrien    if (p->group->in_use == 0)
95090075Sobrien      {
95190075Sobrien	*pp = p->next;
95290075Sobrien	free (p);
95390075Sobrien      }
95490075Sobrien    else
95590075Sobrien      pp = &p->next;
95690075Sobrien
95790075Sobrien  /* Remove all free page groups, and release the storage.  */
95890075Sobrien  gp = &G.page_groups;
95990075Sobrien  while ((g = *gp) != NULL)
96090075Sobrien    if (g->in_use == 0)
96190075Sobrien      {
96290075Sobrien	*gp = g->next;
963117395Skan	G.bytes_mapped -= g->alloc_size;
96490075Sobrien	free (g->allocation);
96590075Sobrien      }
96690075Sobrien    else
96790075Sobrien      gp = &g->next;
96890075Sobrien#endif
96990075Sobrien}
97090075Sobrien
97190075Sobrien/* This table provides a fast way to determine ceil(log_2(size)) for
97290075Sobrien   allocation requests.  The minimum allocation size is eight bytes.  */
97390075Sobrien
974117395Skanstatic unsigned char size_lookup[257] =
97590075Sobrien{
976117395Skan  3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
977117395Skan  4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
978117395Skan  5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
979117395Skan  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
980117395Skan  6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
98190075Sobrien  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
982117395Skan  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
983117395Skan  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
98490075Sobrien  7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
98590075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
98690075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
98790075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
98890075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
98990075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
99090075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
99190075Sobrien  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
99290075Sobrien  8
99390075Sobrien};
99490075Sobrien
995117395Skan/* Allocate a chunk of memory of SIZE bytes.  If ZERO is nonzero, the
99690075Sobrien   memory is zeroed; otherwise, its contents are undefined.  */
99790075Sobrien
99890075Sobrienvoid *
99990075Sobrienggc_alloc (size)
100090075Sobrien     size_t size;
100190075Sobrien{
100290075Sobrien  unsigned order, word, bit, object_offset;
100390075Sobrien  struct page_entry *entry;
100490075Sobrien  void *result;
100590075Sobrien
100690075Sobrien  if (size <= 256)
100790075Sobrien    order = size_lookup[size];
100890075Sobrien  else
100990075Sobrien    {
101090075Sobrien      order = 9;
101190075Sobrien      while (size > OBJECT_SIZE (order))
101290075Sobrien	order++;
101390075Sobrien    }
101490075Sobrien
101590075Sobrien  /* If there are non-full pages for this size allocation, they are at
101690075Sobrien     the head of the list.  */
101790075Sobrien  entry = G.pages[order];
101890075Sobrien
101990075Sobrien  /* If there is no page for this object size, or all pages in this
102090075Sobrien     context are full, allocate a new page.  */
102190075Sobrien  if (entry == NULL || entry->num_free_objects == 0)
102290075Sobrien    {
102390075Sobrien      struct page_entry *new_entry;
102490075Sobrien      new_entry = alloc_page (order);
1025117395Skan
1026117395Skan      new_entry->index_by_depth = G.by_depth_in_use;
1027117395Skan      push_by_depth (new_entry, 0);
1028117395Skan
1029117395Skan      /* We can skip context depths, if we do, make sure we go all the
1030117395Skan	 way to the new depth.  */
1031117395Skan      while (new_entry->context_depth >= G.depth_in_use)
1032117395Skan	push_depth (G.by_depth_in_use-1);
1033117395Skan
103490075Sobrien      /* If this is the only entry, it's also the tail.  */
103590075Sobrien      if (entry == NULL)
103690075Sobrien	G.page_tails[order] = new_entry;
1037117395Skan
103890075Sobrien      /* Put new pages at the head of the page list.  */
103990075Sobrien      new_entry->next = entry;
104090075Sobrien      entry = new_entry;
104190075Sobrien      G.pages[order] = new_entry;
104290075Sobrien
104390075Sobrien      /* For a new page, we know the word and bit positions (in the
104490075Sobrien	 in_use bitmap) of the first available object -- they're zero.  */
104590075Sobrien      new_entry->next_bit_hint = 1;
104690075Sobrien      word = 0;
104790075Sobrien      bit = 0;
104890075Sobrien      object_offset = 0;
104990075Sobrien    }
105090075Sobrien  else
105190075Sobrien    {
105290075Sobrien      /* First try to use the hint left from the previous allocation
105390075Sobrien	 to locate a clear bit in the in-use bitmap.  We've made sure
105490075Sobrien	 that the one-past-the-end bit is always set, so if the hint
105590075Sobrien	 has run over, this test will fail.  */
105690075Sobrien      unsigned hint = entry->next_bit_hint;
105790075Sobrien      word = hint / HOST_BITS_PER_LONG;
105890075Sobrien      bit = hint % HOST_BITS_PER_LONG;
1059117395Skan
106090075Sobrien      /* If the hint didn't work, scan the bitmap from the beginning.  */
106190075Sobrien      if ((entry->in_use_p[word] >> bit) & 1)
106290075Sobrien	{
106390075Sobrien	  word = bit = 0;
106490075Sobrien	  while (~entry->in_use_p[word] == 0)
106590075Sobrien	    ++word;
106690075Sobrien	  while ((entry->in_use_p[word] >> bit) & 1)
106790075Sobrien	    ++bit;
106890075Sobrien	  hint = word * HOST_BITS_PER_LONG + bit;
106990075Sobrien	}
107090075Sobrien
107190075Sobrien      /* Next time, try the next bit.  */
107290075Sobrien      entry->next_bit_hint = hint + 1;
107390075Sobrien
107490075Sobrien      object_offset = hint * OBJECT_SIZE (order);
107590075Sobrien    }
107690075Sobrien
107790075Sobrien  /* Set the in-use bit.  */
107890075Sobrien  entry->in_use_p[word] |= ((unsigned long) 1 << bit);
107990075Sobrien
108090075Sobrien  /* Keep a running total of the number of free objects.  If this page
108190075Sobrien     fills up, we may have to move it to the end of the list if the
108290075Sobrien     next page isn't full.  If the next page is full, all subsequent
108390075Sobrien     pages are full, so there's no need to move it.  */
108490075Sobrien  if (--entry->num_free_objects == 0
108590075Sobrien      && entry->next != NULL
108690075Sobrien      && entry->next->num_free_objects > 0)
108790075Sobrien    {
108890075Sobrien      G.pages[order] = entry->next;
108990075Sobrien      entry->next = NULL;
109090075Sobrien      G.page_tails[order]->next = entry;
109190075Sobrien      G.page_tails[order] = entry;
109290075Sobrien    }
109390075Sobrien
109490075Sobrien  /* Calculate the object's address.  */
109590075Sobrien  result = entry->page + object_offset;
109690075Sobrien
1097117395Skan#ifdef ENABLE_GC_CHECKING
1098117395Skan  /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1099117395Skan     exact same semantics in presence of memory bugs, regardless of
1100117395Skan     ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1101117395Skan     handle to avoid handle leak.  */
1102117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
1103117395Skan
110490075Sobrien  /* `Poison' the entire allocated object, including any padding at
110590075Sobrien     the end.  */
110690075Sobrien  memset (result, 0xaf, OBJECT_SIZE (order));
1107117395Skan
1108117395Skan  /* Make the bytes after the end of the object unaccessible.  Discard the
1109117395Skan     handle to avoid handle leak.  */
1110117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
1111117395Skan					    OBJECT_SIZE (order) - size));
111290075Sobrien#endif
111390075Sobrien
1114117395Skan  /* Tell Valgrind that the memory is there, but its content isn't
1115117395Skan     defined.  The bytes at the end of the object are still marked
1116117395Skan     unaccessible.  */
1117117395Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1118117395Skan
111990075Sobrien  /* Keep track of how many bytes are being allocated.  This
112090075Sobrien     information is used in deciding when to collect.  */
112190075Sobrien  G.allocated += OBJECT_SIZE (order);
112290075Sobrien
112390075Sobrien  if (GGC_DEBUG_LEVEL >= 3)
1124117395Skan    fprintf (G.debug_file,
1125117395Skan	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1126117395Skan	     (unsigned long) size, (unsigned long) OBJECT_SIZE (order), result,
1127117395Skan	     (PTR) entry);
112890075Sobrien
112990075Sobrien  return result;
113090075Sobrien}
113190075Sobrien
113290075Sobrien/* If P is not marked, marks it and return false.  Otherwise return true.
113390075Sobrien   P must have been allocated by the GC allocator; it mustn't point to
113490075Sobrien   static objects, stack variables, or memory allocated with malloc.  */
113590075Sobrien
113690075Sobrienint
113790075Sobrienggc_set_mark (p)
113890075Sobrien     const void *p;
113990075Sobrien{
114090075Sobrien  page_entry *entry;
114190075Sobrien  unsigned bit, word;
114290075Sobrien  unsigned long mask;
114390075Sobrien
114490075Sobrien  /* Look up the page on which the object is alloced.  If the object
114590075Sobrien     wasn't allocated by the collector, we'll probably die.  */
114690075Sobrien  entry = lookup_page_table_entry (p);
114790075Sobrien#ifdef ENABLE_CHECKING
114890075Sobrien  if (entry == NULL)
114990075Sobrien    abort ();
115090075Sobrien#endif
115190075Sobrien
115290075Sobrien  /* Calculate the index of the object on the page; this is its bit
115390075Sobrien     position in the in_use_p bitmap.  */
1154117395Skan  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
115590075Sobrien  word = bit / HOST_BITS_PER_LONG;
115690075Sobrien  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1157117395Skan
115890075Sobrien  /* If the bit was previously set, skip it.  */
115990075Sobrien  if (entry->in_use_p[word] & mask)
116090075Sobrien    return 1;
116190075Sobrien
116290075Sobrien  /* Otherwise set it, and decrement the free object count.  */
116390075Sobrien  entry->in_use_p[word] |= mask;
116490075Sobrien  entry->num_free_objects -= 1;
116590075Sobrien
116690075Sobrien  if (GGC_DEBUG_LEVEL >= 4)
116790075Sobrien    fprintf (G.debug_file, "Marking %p\n", p);
116890075Sobrien
116990075Sobrien  return 0;
117090075Sobrien}
117190075Sobrien
1172117395Skan/* Return 1 if P has been marked, zero otherwise.
117390075Sobrien   P must have been allocated by the GC allocator; it mustn't point to
117490075Sobrien   static objects, stack variables, or memory allocated with malloc.  */
117590075Sobrien
117690075Sobrienint
117790075Sobrienggc_marked_p (p)
117890075Sobrien     const void *p;
117990075Sobrien{
118090075Sobrien  page_entry *entry;
118190075Sobrien  unsigned bit, word;
118290075Sobrien  unsigned long mask;
118390075Sobrien
118490075Sobrien  /* Look up the page on which the object is alloced.  If the object
118590075Sobrien     wasn't allocated by the collector, we'll probably die.  */
118690075Sobrien  entry = lookup_page_table_entry (p);
118790075Sobrien#ifdef ENABLE_CHECKING
118890075Sobrien  if (entry == NULL)
118990075Sobrien    abort ();
119090075Sobrien#endif
119190075Sobrien
119290075Sobrien  /* Calculate the index of the object on the page; this is its bit
119390075Sobrien     position in the in_use_p bitmap.  */
1194117395Skan  bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
119590075Sobrien  word = bit / HOST_BITS_PER_LONG;
119690075Sobrien  mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1197117395Skan
119890075Sobrien  return (entry->in_use_p[word] & mask) != 0;
119990075Sobrien}
120090075Sobrien
120190075Sobrien/* Return the size of the gc-able object P.  */
120290075Sobrien
120390075Sobriensize_t
120490075Sobrienggc_get_size (p)
120590075Sobrien     const void *p;
120690075Sobrien{
120790075Sobrien  page_entry *pe = lookup_page_table_entry (p);
120890075Sobrien  return OBJECT_SIZE (pe->order);
120990075Sobrien}
121090075Sobrien
1211117395Skan/* Subroutine of init_ggc which computes the pair of numbers used to
1212117395Skan   perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1213117395Skan
1214117395Skan   This algorithm is taken from Granlund and Montgomery's paper
1215117395Skan   "Division by Invariant Integers using Multiplication"
1216117395Skan   (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1217117395Skan   constants).  */
1218117395Skan
1219117395Skanstatic void
1220117395Skancompute_inverse (order)
1221117395Skan     unsigned order;
1222117395Skan{
1223117395Skan  unsigned size, inv, e;
1224117395Skan
1225117395Skan  /* There can be only one object per "page" in a bucket for sizes
1226117395Skan     larger than half a machine page; it will always have offset zero.  */
1227117395Skan  if (OBJECT_SIZE (order) > G.pagesize/2)
1228117395Skan    {
1229117395Skan      if (OBJECTS_PER_PAGE (order) != 1)
1230117395Skan	abort ();
1231117395Skan
1232117395Skan      DIV_MULT (order) = 1;
1233117395Skan      DIV_SHIFT (order) = 0;
1234117395Skan      return;
1235117395Skan    }
1236117395Skan
1237117395Skan  size = OBJECT_SIZE (order);
1238117395Skan  e = 0;
1239117395Skan  while (size % 2 == 0)
1240117395Skan    {
1241117395Skan      e++;
1242117395Skan      size >>= 1;
1243117395Skan    }
1244117395Skan
1245117395Skan  inv = size;
1246117395Skan  while (inv * size != 1)
1247117395Skan    inv = inv * (2 - inv*size);
1248117395Skan
1249117395Skan  DIV_MULT (order) = inv;
1250117395Skan  DIV_SHIFT (order) = e;
1251117395Skan}
1252117395Skan
125390075Sobrien/* Initialize the ggc-mmap allocator.  */
125490075Sobrienvoid
125590075Sobrieninit_ggc ()
125690075Sobrien{
125790075Sobrien  unsigned order;
125890075Sobrien
125990075Sobrien  G.pagesize = getpagesize();
126090075Sobrien  G.lg_pagesize = exact_log2 (G.pagesize);
126190075Sobrien
126290075Sobrien#ifdef HAVE_MMAP_DEV_ZERO
126390075Sobrien  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
126490075Sobrien  if (G.dev_zero_fd == -1)
1265117395Skan    fatal_io_error ("open /dev/zero");
126690075Sobrien#endif
126790075Sobrien
126890075Sobrien#if 0
126990075Sobrien  G.debug_file = fopen ("ggc-mmap.debug", "w");
127090075Sobrien#else
127190075Sobrien  G.debug_file = stdout;
127290075Sobrien#endif
127390075Sobrien
127490075Sobrien#ifdef USING_MMAP
127590075Sobrien  /* StunOS has an amazing off-by-one error for the first mmap allocation
127690075Sobrien     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
127790075Sobrien     believe, is an unaligned page allocation, which would cause us to
127890075Sobrien     hork badly if we tried to use it.  */
127990075Sobrien  {
128090075Sobrien    char *p = alloc_anon (NULL, G.pagesize);
128190075Sobrien    struct page_entry *e;
128290075Sobrien    if ((size_t)p & (G.pagesize - 1))
128390075Sobrien      {
128490075Sobrien	/* How losing.  Discard this one and try another.  If we still
128590075Sobrien	   can't get something useful, give up.  */
128690075Sobrien
128790075Sobrien	p = alloc_anon (NULL, G.pagesize);
128890075Sobrien	if ((size_t)p & (G.pagesize - 1))
128990075Sobrien	  abort ();
129090075Sobrien      }
129190075Sobrien
129290075Sobrien    /* We have a good page, might as well hold onto it...  */
129390075Sobrien    e = (struct page_entry *) xcalloc (1, sizeof (struct page_entry));
129490075Sobrien    e->bytes = G.pagesize;
129590075Sobrien    e->page = p;
129690075Sobrien    e->next = G.free_pages;
129790075Sobrien    G.free_pages = e;
129890075Sobrien  }
129990075Sobrien#endif
130090075Sobrien
130190075Sobrien  /* Initialize the object size table.  */
130290075Sobrien  for (order = 0; order < HOST_BITS_PER_PTR; ++order)
130390075Sobrien    object_size_table[order] = (size_t) 1 << order;
130490075Sobrien  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
130590075Sobrien    {
130690075Sobrien      size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
130790075Sobrien
130890075Sobrien      /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
130990075Sobrien	 so that we're sure of getting aligned memory.  */
131090075Sobrien      s = CEIL (s, MAX_ALIGNMENT) * MAX_ALIGNMENT;
131190075Sobrien      object_size_table[order] = s;
131290075Sobrien    }
131390075Sobrien
1314117395Skan  /* Initialize the objects-per-page and inverse tables.  */
131590075Sobrien  for (order = 0; order < NUM_ORDERS; ++order)
131690075Sobrien    {
131790075Sobrien      objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
131890075Sobrien      if (objects_per_page_table[order] == 0)
131990075Sobrien	objects_per_page_table[order] = 1;
1320117395Skan      compute_inverse (order);
132190075Sobrien    }
132290075Sobrien
132390075Sobrien  /* Reset the size_lookup array to put appropriately sized objects in
132490075Sobrien     the special orders.  All objects bigger than the previous power
132590075Sobrien     of two, but no greater than the special size, should go in the
132690075Sobrien     new order.  */
132790075Sobrien  for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
132890075Sobrien    {
132990075Sobrien      int o;
133090075Sobrien      int i;
133190075Sobrien
133290075Sobrien      o = size_lookup[OBJECT_SIZE (order)];
133390075Sobrien      for (i = OBJECT_SIZE (order); size_lookup [i] == o; --i)
133490075Sobrien	size_lookup[i] = order;
133590075Sobrien    }
1336117395Skan
1337117395Skan  G.depth_in_use = 0;
1338117395Skan  G.depth_max = 10;
1339117395Skan  G.depth = (unsigned int *) xmalloc (G.depth_max * sizeof (unsigned int));
1340117395Skan
1341117395Skan  G.by_depth_in_use = 0;
1342117395Skan  G.by_depth_max = INITIAL_PTE_COUNT;
1343117395Skan  G.by_depth = (page_entry **) xmalloc (G.by_depth_max * sizeof (page_entry *));
1344117395Skan  G.save_in_use = (unsigned long **) xmalloc (G.by_depth_max * sizeof (unsigned long *));
134590075Sobrien}
134690075Sobrien
134790075Sobrien/* Increment the `GC context'.  Objects allocated in an outer context
134890075Sobrien   are never freed, eliminating the need to register their roots.  */
134990075Sobrien
135090075Sobrienvoid
135190075Sobrienggc_push_context ()
135290075Sobrien{
135390075Sobrien  ++G.context_depth;
135490075Sobrien
135590075Sobrien  /* Die on wrap.  */
1356117395Skan  if (G.context_depth >= HOST_BITS_PER_LONG)
135790075Sobrien    abort ();
135890075Sobrien}
135990075Sobrien
136090075Sobrien/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
136190075Sobrien   reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
136290075Sobrien
136390075Sobrienstatic void
136490075Sobrienggc_recalculate_in_use_p (p)
136590075Sobrien     page_entry *p;
136690075Sobrien{
136790075Sobrien  unsigned int i;
136890075Sobrien  size_t num_objects;
136990075Sobrien
1370117395Skan  /* Because the past-the-end bit in in_use_p is always set, we
137190075Sobrien     pretend there is one additional object.  */
137290075Sobrien  num_objects = OBJECTS_PER_PAGE (p->order) + 1;
137390075Sobrien
137490075Sobrien  /* Reset the free object count.  */
137590075Sobrien  p->num_free_objects = num_objects;
137690075Sobrien
137790075Sobrien  /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1378117395Skan  for (i = 0;
137990075Sobrien       i < CEIL (BITMAP_SIZE (num_objects),
138090075Sobrien		 sizeof (*p->in_use_p));
138190075Sobrien       ++i)
138290075Sobrien    {
138390075Sobrien      unsigned long j;
138490075Sobrien
138590075Sobrien      /* Something is in use if it is marked, or if it was in use in a
138690075Sobrien	 context further down the context stack.  */
1387117395Skan      p->in_use_p[i] |= save_in_use_p (p)[i];
138890075Sobrien
138990075Sobrien      /* Decrement the free object count for every object allocated.  */
139090075Sobrien      for (j = p->in_use_p[i]; j; j >>= 1)
139190075Sobrien	p->num_free_objects -= (j & 1);
139290075Sobrien    }
139390075Sobrien
139490075Sobrien  if (p->num_free_objects >= num_objects)
139590075Sobrien    abort ();
139690075Sobrien}
139790075Sobrien
1398117395Skan/* Decrement the `GC context'.  All objects allocated since the
139990075Sobrien   previous ggc_push_context are migrated to the outer context.  */
140090075Sobrien
140190075Sobrienvoid
140290075Sobrienggc_pop_context ()
140390075Sobrien{
1404117395Skan  unsigned long omask;
1405117395Skan  unsigned int depth, i, e;
1406117395Skan#ifdef ENABLE_CHECKING
1407117395Skan  unsigned int order;
1408117395Skan#endif
140990075Sobrien
141090075Sobrien  depth = --G.context_depth;
1411117395Skan  omask = (unsigned long)1 << (depth + 1);
141290075Sobrien
1413117395Skan  if (!((G.context_depth_allocations | G.context_depth_collections) & omask))
1414117395Skan    return;
1415117395Skan
1416117395Skan  G.context_depth_allocations |= (G.context_depth_allocations & omask) >> 1;
1417117395Skan  G.context_depth_allocations &= omask - 1;
1418117395Skan  G.context_depth_collections &= omask - 1;
1419117395Skan
1420117395Skan  /* The G.depth array is shortend so that the last index is the
1421117395Skan     context_depth of the top element of by_depth.  */
1422117395Skan  if (depth+1 < G.depth_in_use)
1423117395Skan    e = G.depth[depth+1];
1424117395Skan  else
1425117395Skan    e = G.by_depth_in_use;
1426117395Skan
1427117395Skan  /* We might not have any PTEs of depth depth.  */
1428117395Skan  if (depth < G.depth_in_use)
1429117395Skan    {
1430117395Skan
1431117395Skan      /* First we go through all the pages at depth depth to
1432117395Skan	 recalculate the in use bits.  */
1433117395Skan      for (i = G.depth[depth]; i < e; ++i)
1434117395Skan	{
1435117395Skan	  page_entry *p;
1436117395Skan
1437117395Skan#ifdef ENABLE_CHECKING
1438117395Skan	  p = G.by_depth[i];
1439117395Skan
1440117395Skan	  /* Check that all of the pages really are at the depth that
1441117395Skan	     we expect.  */
1442117395Skan	  if (p->context_depth != depth)
1443117395Skan	    abort ();
1444117395Skan	  if (p->index_by_depth != i)
1445117395Skan	    abort ();
1446117395Skan#endif
1447117395Skan
1448117395Skan	  prefetch (&save_in_use_p_i (i+8));
1449117395Skan	  prefetch (&save_in_use_p_i (i+16));
1450117395Skan	  if (save_in_use_p_i (i))
1451117395Skan	    {
1452117395Skan	      p = G.by_depth[i];
1453117395Skan	      ggc_recalculate_in_use_p (p);
1454117395Skan	      free (save_in_use_p_i (i));
1455117395Skan	      save_in_use_p_i (i) = 0;
1456117395Skan	    }
1457117395Skan	}
1458117395Skan    }
1459117395Skan
1460117395Skan  /* Then, we reset all page_entries with a depth greater than depth
1461117395Skan     to be at depth.  */
1462117395Skan  for (i = e; i < G.by_depth_in_use; ++i)
1463117395Skan    {
1464117395Skan      page_entry *p = G.by_depth[i];
1465117395Skan
1466117395Skan      /* Check that all of the pages really are at the depth we
1467117395Skan	 expect.  */
1468117395Skan#ifdef ENABLE_CHECKING
1469117395Skan      if (p->context_depth <= depth)
1470117395Skan	abort ();
1471117395Skan      if (p->index_by_depth != i)
1472117395Skan	abort ();
1473117395Skan#endif
1474117395Skan      p->context_depth = depth;
1475117395Skan    }
1476117395Skan
1477117395Skan  adjust_depth ();
1478117395Skan
1479117395Skan#ifdef ENABLE_CHECKING
148090075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
148190075Sobrien    {
148290075Sobrien      page_entry *p;
148390075Sobrien
148490075Sobrien      for (p = G.pages[order]; p != NULL; p = p->next)
148590075Sobrien	{
148690075Sobrien	  if (p->context_depth > depth)
1487117395Skan	    abort ();
1488117395Skan	  else if (p->context_depth == depth && save_in_use_p (p))
1489117395Skan	    abort ();
149090075Sobrien	}
149190075Sobrien    }
1492117395Skan#endif
149390075Sobrien}
149490075Sobrien
149590075Sobrien/* Unmark all objects.  */
149690075Sobrien
149790075Sobrienstatic inline void
149890075Sobrienclear_marks ()
149990075Sobrien{
150090075Sobrien  unsigned order;
150190075Sobrien
150290075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
150390075Sobrien    {
150490075Sobrien      size_t num_objects = OBJECTS_PER_PAGE (order);
150590075Sobrien      size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
150690075Sobrien      page_entry *p;
150790075Sobrien
150890075Sobrien      for (p = G.pages[order]; p != NULL; p = p->next)
150990075Sobrien	{
151090075Sobrien#ifdef ENABLE_CHECKING
151190075Sobrien	  /* The data should be page-aligned.  */
151290075Sobrien	  if ((size_t) p->page & (G.pagesize - 1))
151390075Sobrien	    abort ();
151490075Sobrien#endif
151590075Sobrien
151690075Sobrien	  /* Pages that aren't in the topmost context are not collected;
151790075Sobrien	     nevertheless, we need their in-use bit vectors to store GC
151890075Sobrien	     marks.  So, back them up first.  */
151990075Sobrien	  if (p->context_depth < G.context_depth)
152090075Sobrien	    {
1521117395Skan	      if (! save_in_use_p (p))
1522117395Skan		save_in_use_p (p) = xmalloc (bitmap_size);
1523117395Skan	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
152490075Sobrien	    }
152590075Sobrien
152690075Sobrien	  /* Reset reset the number of free objects and clear the
152790075Sobrien             in-use bits.  These will be adjusted by mark_obj.  */
152890075Sobrien	  p->num_free_objects = num_objects;
152990075Sobrien	  memset (p->in_use_p, 0, bitmap_size);
153090075Sobrien
153190075Sobrien	  /* Make sure the one-past-the-end bit is always set.  */
1532117395Skan	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
153390075Sobrien	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
153490075Sobrien	}
153590075Sobrien    }
153690075Sobrien}
153790075Sobrien
153890075Sobrien/* Free all empty pages.  Partially empty pages need no attention
153990075Sobrien   because the `mark' bit doubles as an `unused' bit.  */
154090075Sobrien
154190075Sobrienstatic inline void
154290075Sobriensweep_pages ()
154390075Sobrien{
154490075Sobrien  unsigned order;
154590075Sobrien
154690075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
154790075Sobrien    {
154890075Sobrien      /* The last page-entry to consider, regardless of entries
154990075Sobrien	 placed at the end of the list.  */
155090075Sobrien      page_entry * const last = G.page_tails[order];
155190075Sobrien
155290075Sobrien      size_t num_objects = OBJECTS_PER_PAGE (order);
155390075Sobrien      size_t live_objects;
155490075Sobrien      page_entry *p, *previous;
155590075Sobrien      int done;
1556117395Skan
155790075Sobrien      p = G.pages[order];
155890075Sobrien      if (p == NULL)
155990075Sobrien	continue;
156090075Sobrien
156190075Sobrien      previous = NULL;
156290075Sobrien      do
156390075Sobrien	{
156490075Sobrien	  page_entry *next = p->next;
156590075Sobrien
156690075Sobrien	  /* Loop until all entries have been examined.  */
156790075Sobrien	  done = (p == last);
156890075Sobrien
156990075Sobrien	  /* Add all live objects on this page to the count of
157090075Sobrien             allocated memory.  */
157190075Sobrien	  live_objects = num_objects - p->num_free_objects;
157290075Sobrien
157390075Sobrien	  G.allocated += OBJECT_SIZE (order) * live_objects;
157490075Sobrien
157590075Sobrien	  /* Only objects on pages in the topmost context should get
157690075Sobrien	     collected.  */
157790075Sobrien	  if (p->context_depth < G.context_depth)
157890075Sobrien	    ;
157990075Sobrien
158090075Sobrien	  /* Remove the page if it's empty.  */
158190075Sobrien	  else if (live_objects == 0)
158290075Sobrien	    {
158390075Sobrien	      if (! previous)
158490075Sobrien		G.pages[order] = next;
158590075Sobrien	      else
158690075Sobrien		previous->next = next;
158790075Sobrien
158890075Sobrien	      /* Are we removing the last element?  */
158990075Sobrien	      if (p == G.page_tails[order])
159090075Sobrien		G.page_tails[order] = previous;
159190075Sobrien	      free_page (p);
159290075Sobrien	      p = previous;
159390075Sobrien	    }
159490075Sobrien
159590075Sobrien	  /* If the page is full, move it to the end.  */
159690075Sobrien	  else if (p->num_free_objects == 0)
159790075Sobrien	    {
159890075Sobrien	      /* Don't move it if it's already at the end.  */
159990075Sobrien	      if (p != G.page_tails[order])
160090075Sobrien		{
160190075Sobrien		  /* Move p to the end of the list.  */
160290075Sobrien		  p->next = NULL;
160390075Sobrien		  G.page_tails[order]->next = p;
160490075Sobrien
160590075Sobrien		  /* Update the tail pointer...  */
160690075Sobrien		  G.page_tails[order] = p;
160790075Sobrien
160890075Sobrien		  /* ... and the head pointer, if necessary.  */
160990075Sobrien		  if (! previous)
161090075Sobrien		    G.pages[order] = next;
161190075Sobrien		  else
161290075Sobrien		    previous->next = next;
161390075Sobrien		  p = previous;
161490075Sobrien		}
161590075Sobrien	    }
161690075Sobrien
161790075Sobrien	  /* If we've fallen through to here, it's a page in the
161890075Sobrien	     topmost context that is neither full nor empty.  Such a
161990075Sobrien	     page must precede pages at lesser context depth in the
162090075Sobrien	     list, so move it to the head.  */
162190075Sobrien	  else if (p != G.pages[order])
162290075Sobrien	    {
162390075Sobrien	      previous->next = p->next;
162490075Sobrien	      p->next = G.pages[order];
162590075Sobrien	      G.pages[order] = p;
162690075Sobrien	      /* Are we moving the last element?  */
162790075Sobrien	      if (G.page_tails[order] == p)
162890075Sobrien	        G.page_tails[order] = previous;
162990075Sobrien	      p = previous;
163090075Sobrien	    }
163190075Sobrien
163290075Sobrien	  previous = p;
163390075Sobrien	  p = next;
1634117395Skan	}
163590075Sobrien      while (! done);
163690075Sobrien
163790075Sobrien      /* Now, restore the in_use_p vectors for any pages from contexts
163890075Sobrien         other than the current one.  */
163990075Sobrien      for (p = G.pages[order]; p; p = p->next)
164090075Sobrien	if (p->context_depth != G.context_depth)
164190075Sobrien	  ggc_recalculate_in_use_p (p);
164290075Sobrien    }
164390075Sobrien}
164490075Sobrien
1645117395Skan#ifdef ENABLE_GC_CHECKING
164690075Sobrien/* Clobber all free objects.  */
164790075Sobrien
164890075Sobrienstatic inline void
164990075Sobrienpoison_pages ()
165090075Sobrien{
165190075Sobrien  unsigned order;
165290075Sobrien
165390075Sobrien  for (order = 2; order < NUM_ORDERS; order++)
165490075Sobrien    {
165590075Sobrien      size_t num_objects = OBJECTS_PER_PAGE (order);
165690075Sobrien      size_t size = OBJECT_SIZE (order);
165790075Sobrien      page_entry *p;
165890075Sobrien
165990075Sobrien      for (p = G.pages[order]; p != NULL; p = p->next)
166090075Sobrien	{
166190075Sobrien	  size_t i;
166290075Sobrien
166390075Sobrien	  if (p->context_depth != G.context_depth)
166490075Sobrien	    /* Since we don't do any collection for pages in pushed
166590075Sobrien	       contexts, there's no need to do any poisoning.  And
166690075Sobrien	       besides, the IN_USE_P array isn't valid until we pop
166790075Sobrien	       contexts.  */
166890075Sobrien	    continue;
166990075Sobrien
167090075Sobrien	  for (i = 0; i < num_objects; i++)
167190075Sobrien	    {
167290075Sobrien	      size_t word, bit;
167390075Sobrien	      word = i / HOST_BITS_PER_LONG;
167490075Sobrien	      bit = i % HOST_BITS_PER_LONG;
167590075Sobrien	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1676117395Skan		{
1677117395Skan		  char *object = p->page + i * size;
1678117395Skan
1679117395Skan		  /* Keep poison-by-write when we expect to use Valgrind,
1680117395Skan		     so the exact same memory semantics is kept, in case
1681117395Skan		     there are memory errors.  We override this request
1682117395Skan		     below.  */
1683117395Skan		  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
1684117395Skan		  memset (object, 0xa5, size);
1685117395Skan
1686117395Skan		  /* Drop the handle to avoid handle leak.  */
1687117395Skan		  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
1688117395Skan		}
168990075Sobrien	    }
169090075Sobrien	}
169190075Sobrien    }
169290075Sobrien}
169390075Sobrien#endif
169490075Sobrien
169590075Sobrien/* Top level mark-and-sweep routine.  */
169690075Sobrien
169790075Sobrienvoid
169890075Sobrienggc_collect ()
169990075Sobrien{
170090075Sobrien  /* Avoid frequent unnecessary work by skipping collection if the
170190075Sobrien     total allocations haven't expanded much since the last
170290075Sobrien     collection.  */
1703117395Skan  float allocated_last_gc =
1704117395Skan    MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1705117395Skan
1706117395Skan  float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1707117395Skan
1708117395Skan  if (G.allocated < allocated_last_gc + min_expand)
170990075Sobrien    return;
171090075Sobrien
171190075Sobrien  timevar_push (TV_GC);
171290075Sobrien  if (!quiet_flag)
171390075Sobrien    fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
171490075Sobrien
171590075Sobrien  /* Zero the total allocated bytes.  This will be recalculated in the
171690075Sobrien     sweep phase.  */
171790075Sobrien  G.allocated = 0;
171890075Sobrien
1719117395Skan  /* Release the pages we freed the last time we collected, but didn't
172090075Sobrien     reuse in the interim.  */
172190075Sobrien  release_pages ();
172290075Sobrien
1723117395Skan  /* Indicate that we've seen collections at this context depth.  */
1724117395Skan  G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
1725117395Skan
172690075Sobrien  clear_marks ();
172790075Sobrien  ggc_mark_roots ();
1728117395Skan
1729117395Skan#ifdef ENABLE_GC_CHECKING
173090075Sobrien  poison_pages ();
173190075Sobrien#endif
173290075Sobrien
173390075Sobrien  sweep_pages ();
173490075Sobrien
173590075Sobrien  G.allocated_last_gc = G.allocated;
173690075Sobrien
173790075Sobrien  timevar_pop (TV_GC);
173890075Sobrien
173990075Sobrien  if (!quiet_flag)
174090075Sobrien    fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
174190075Sobrien}
174290075Sobrien
174390075Sobrien/* Print allocation statistics.  */
174490075Sobrien#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
174590075Sobrien		  ? (x) \
174690075Sobrien		  : ((x) < 1024*1024*10 \
174790075Sobrien		     ? (x) / 1024 \
174890075Sobrien		     : (x) / (1024*1024))))
174990075Sobrien#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
175090075Sobrien
175190075Sobrienvoid
175290075Sobrienggc_print_statistics ()
175390075Sobrien{
175490075Sobrien  struct ggc_statistics stats;
175590075Sobrien  unsigned int i;
175690075Sobrien  size_t total_overhead = 0;
175790075Sobrien
175890075Sobrien  /* Clear the statistics.  */
175990075Sobrien  memset (&stats, 0, sizeof (stats));
1760117395Skan
176190075Sobrien  /* Make sure collection will really occur.  */
176290075Sobrien  G.allocated_last_gc = 0;
176390075Sobrien
176490075Sobrien  /* Collect and print the statistics common across collectors.  */
176590075Sobrien  ggc_print_common_statistics (stderr, &stats);
176690075Sobrien
176790075Sobrien  /* Release free pages so that we will not count the bytes allocated
176890075Sobrien     there as part of the total allocated memory.  */
176990075Sobrien  release_pages ();
177090075Sobrien
1771117395Skan  /* Collect some information about the various sizes of
177290075Sobrien     allocation.  */
177390075Sobrien  fprintf (stderr, "\n%-5s %10s  %10s  %10s\n",
177490075Sobrien	   "Size", "Allocated", "Used", "Overhead");
177590075Sobrien  for (i = 0; i < NUM_ORDERS; ++i)
177690075Sobrien    {
177790075Sobrien      page_entry *p;
177890075Sobrien      size_t allocated;
177990075Sobrien      size_t in_use;
178090075Sobrien      size_t overhead;
178190075Sobrien
178290075Sobrien      /* Skip empty entries.  */
178390075Sobrien      if (!G.pages[i])
178490075Sobrien	continue;
178590075Sobrien
178690075Sobrien      overhead = allocated = in_use = 0;
178790075Sobrien
178890075Sobrien      /* Figure out the total number of bytes allocated for objects of
178990075Sobrien	 this size, and how many of them are actually in use.  Also figure
179090075Sobrien	 out how much memory the page table is using.  */
179190075Sobrien      for (p = G.pages[i]; p; p = p->next)
179290075Sobrien	{
179390075Sobrien	  allocated += p->bytes;
1794117395Skan	  in_use +=
179590075Sobrien	    (OBJECTS_PER_PAGE (i) - p->num_free_objects) * OBJECT_SIZE (i);
179690075Sobrien
179790075Sobrien	  overhead += (sizeof (page_entry) - sizeof (long)
179890075Sobrien		       + BITMAP_SIZE (OBJECTS_PER_PAGE (i) + 1));
179990075Sobrien	}
1800117395Skan      fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
1801117395Skan	       (unsigned long) OBJECT_SIZE (i),
180290075Sobrien	       SCALE (allocated), LABEL (allocated),
180390075Sobrien	       SCALE (in_use), LABEL (in_use),
180490075Sobrien	       SCALE (overhead), LABEL (overhead));
180590075Sobrien      total_overhead += overhead;
180690075Sobrien    }
1807117395Skan  fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
180890075Sobrien	   SCALE (G.bytes_mapped), LABEL (G.bytes_mapped),
180990075Sobrien	   SCALE (G.allocated), LABEL(G.allocated),
181090075Sobrien	   SCALE (total_overhead), LABEL (total_overhead));
181190075Sobrien}
1812