1132718Skan/* "Bag-of-pages" zone garbage collector for the GNU compiler.
2169689Skan   Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3132718Skan   Free Software Foundation, Inc.
4169689Skan
5132718Skan   Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
6169689Skan   (dberlin@dberlin.org).  Rewritten by Daniel Jacobowitz
7169689Skan   <dan@codesourcery.com>.
8132718Skan
9132718SkanThis file is part of GCC.
10132718Skan
11132718SkanGCC is free software; you can redistribute it and/or modify it under
12132718Skanthe terms of the GNU General Public License as published by the Free
13132718SkanSoftware Foundation; either version 2, or (at your option) any later
14132718Skanversion.
15132718Skan
16132718SkanGCC is distributed in the hope that it will be useful, but WITHOUT ANY
17132718SkanWARRANTY; without even the implied warranty of MERCHANTABILITY or
18132718SkanFITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19132718Skanfor more details.
20132718Skan
21132718SkanYou should have received a copy of the GNU General Public License
22132718Skanalong with GCC; see the file COPYING.  If not, write to the Free
23169689SkanSoftware Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
24169689Skan02110-1301, USA.  */
25132718Skan
26132718Skan#include "config.h"
27132718Skan#include "system.h"
28132718Skan#include "coretypes.h"
29132718Skan#include "tm.h"
30132718Skan#include "tree.h"
31132718Skan#include "rtl.h"
32132718Skan#include "tm_p.h"
33132718Skan#include "toplev.h"
34132718Skan#include "varray.h"
35132718Skan#include "flags.h"
36132718Skan#include "ggc.h"
37132718Skan#include "timevar.h"
38132718Skan#include "params.h"
39132718Skan#include "bitmap.h"
40132718Skan
41132718Skan#ifdef ENABLE_VALGRIND_CHECKING
42132718Skan# ifdef HAVE_VALGRIND_MEMCHECK_H
43132718Skan#  include <valgrind/memcheck.h>
44132718Skan# elif defined HAVE_MEMCHECK_H
45132718Skan#  include <memcheck.h>
46132718Skan# else
47132718Skan#  include <valgrind.h>
48132718Skan# endif
49132718Skan#else
50132718Skan/* Avoid #ifdef:s when we can help it.  */
51132718Skan#define VALGRIND_DISCARD(x)
52132718Skan#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
53132718Skan#define VALGRIND_FREELIKE_BLOCK(x,y)
54132718Skan#endif
55169689Skan
56132718Skan/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
57132718Skan   file open.  Prefer either to valloc.  */
58132718Skan#ifdef HAVE_MMAP_ANON
59132718Skan# undef HAVE_MMAP_DEV_ZERO
60132718Skan
61132718Skan# include <sys/mman.h>
62132718Skan# ifndef MAP_FAILED
63132718Skan#  define MAP_FAILED -1
64132718Skan# endif
65132718Skan# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON)
66132718Skan#  define MAP_ANONYMOUS MAP_ANON
67132718Skan# endif
68132718Skan# define USING_MMAP
69132718Skan#endif
70132718Skan
71132718Skan#ifdef HAVE_MMAP_DEV_ZERO
72132718Skan# include <sys/mman.h>
73132718Skan# ifndef MAP_FAILED
74132718Skan#  define MAP_FAILED -1
75132718Skan# endif
76132718Skan# define USING_MMAP
77132718Skan#endif
78132718Skan
79132718Skan#ifndef USING_MMAP
80169689Skan#error Zone collector requires mmap
81132718Skan#endif
82132718Skan
83132718Skan#if (GCC_VERSION < 3001)
84132718Skan#define prefetch(X) ((void) X)
85169689Skan#define prefetchw(X) ((void) X)
86132718Skan#else
87132718Skan#define prefetch(X) __builtin_prefetch (X)
88169689Skan#define prefetchw(X) __builtin_prefetch (X, 1, 3)
89132718Skan#endif
90132718Skan
91169689Skan/* FUTURE NOTES:
92169689Skan
93132718Skan   If we track inter-zone pointers, we can mark single zones at a
94132718Skan   time.
95169689Skan
96132718Skan   If we have a zone where we guarantee no inter-zone pointers, we
97132718Skan   could mark that zone separately.
98169689Skan
99132718Skan   The garbage zone should not be marked, and we should return 1 in
100132718Skan   ggc_set_mark for any object in the garbage zone, which cuts off
101132718Skan   marking quickly.  */
102132718Skan
103169689Skan/* Strategy:
104169689Skan
105132718Skan   This garbage-collecting allocator segregates objects into zones.
106132718Skan   It also segregates objects into "large" and "small" bins.  Large
107169689Skan   objects are greater than page size.
108132718Skan
109169689Skan   Pages for small objects are broken up into chunks.  The page has
110169689Skan   a bitmap which marks the start position of each chunk (whether
111169689Skan   allocated or free).  Free chunks are on one of the zone's free
112169689Skan   lists and contain a pointer to the next free chunk.  Chunks in
113169689Skan   most of the free lists have a fixed size determined by the
114169689Skan   free list.  Chunks in the "other" sized free list have their size
115169689Skan   stored right after their chain pointer.
116132718Skan
117132718Skan   Empty pages (of all sizes) are kept on a single page cache list,
118132718Skan   and are considered first when new pages are required; they are
119132718Skan   deallocated at the start of the next collection if they haven't
120169689Skan   been recycled by then.  The free page list is currently per-zone.  */
121132718Skan
122132718Skan/* Define GGC_DEBUG_LEVEL to print debugging information.
123132718Skan     0: No debugging output.
124132718Skan     1: GC statistics only.
125132718Skan     2: Page-entry allocations/deallocations as well.
126132718Skan     3: Object allocations as well.
127132718Skan     4: Object marks as well.  */
128132718Skan#define GGC_DEBUG_LEVEL (0)
129132718Skan
130132718Skan#ifndef HOST_BITS_PER_PTR
131132718Skan#define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
132132718Skan#endif
133132718Skan
134169689Skan/* This structure manages small free chunks.  The SIZE field is only
135169689Skan   initialized if the chunk is in the "other" sized free list.  Large
136169689Skan   chunks are allocated one at a time to their own page, and so don't
137169689Skan   come in here.  */
138132718Skan
139132718Skanstruct alloc_chunk {
140169689Skan  struct alloc_chunk *next_free;
141169689Skan  unsigned int size;
142169689Skan};
143132718Skan
144169689Skan/* The size of the fixed-size portion of a small page descriptor.  */
145169689Skan#define PAGE_OVERHEAD   (offsetof (struct small_page_entry, alloc_bits))
146169689Skan
147169689Skan/* The collector's idea of the page size.  This must be a power of two
148169689Skan   no larger than the system page size, because pages must be aligned
149169689Skan   to this amount and are tracked at this granularity in the page
150169689Skan   table.  We choose a size at compile time for efficiency.
151169689Skan
152169689Skan   We could make a better guess at compile time if PAGE_SIZE is a
153169689Skan   constant in system headers, and PAGE_SHIFT is defined...  */
154169689Skan#define GGC_PAGE_SIZE	4096
155169689Skan#define GGC_PAGE_MASK	(GGC_PAGE_SIZE - 1)
156169689Skan#define GGC_PAGE_SHIFT	12
157169689Skan
158169689Skan#if 0
159169689Skan/* Alternative definitions which use the runtime page size.  */
160169689Skan#define GGC_PAGE_SIZE	G.pagesize
161169689Skan#define GGC_PAGE_MASK	G.page_mask
162169689Skan#define GGC_PAGE_SHIFT	G.lg_pagesize
163132718Skan#endif
164132718Skan
165169689Skan/* The size of a small page managed by the garbage collector.  This
166169689Skan   must currently be GGC_PAGE_SIZE, but with a few changes could
167169689Skan   be any multiple of it to reduce certain kinds of overhead.  */
168169689Skan#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
169132718Skan
170169689Skan/* Free bin information.  These numbers may be in need of re-tuning.
171169689Skan   In general, decreasing the number of free bins would seem to
172169689Skan   increase the time it takes to allocate... */
173132718Skan
174169689Skan/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
175169689Skan   today.  */
176132718Skan
177132718Skan#define NUM_FREE_BINS		64
178169689Skan#define FREE_BIN_DELTA		MAX_ALIGNMENT
179132718Skan#define SIZE_BIN_DOWN(SIZE)	((SIZE) / FREE_BIN_DELTA)
180132718Skan
181169689Skan/* Allocation and marking parameters.  */
182132718Skan
183169689Skan/* The smallest allocatable unit to keep track of.  */
184169689Skan#define BYTES_PER_ALLOC_BIT	MAX_ALIGNMENT
185169689Skan
186169689Skan/* The smallest markable unit.  If we require each allocated object
187169689Skan   to contain at least two allocatable units, we can use half as many
188169689Skan   bits for the mark bitmap.  But this adds considerable complexity
189169689Skan   to sweeping.  */
190169689Skan#define BYTES_PER_MARK_BIT	BYTES_PER_ALLOC_BIT
191169689Skan
192169689Skan#define BYTES_PER_MARK_WORD	(8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
193169689Skan
194132718Skan/* We use this structure to determine the alignment required for
195169689Skan   allocations.
196132718Skan
197169689Skan   There are several things wrong with this estimation of alignment.
198169689Skan
199169689Skan   The maximum alignment for a structure is often less than the
200169689Skan   maximum alignment for a basic data type; for instance, on some
201169689Skan   targets long long must be aligned to sizeof (int) in a structure
202169689Skan   and sizeof (long long) in a variable.  i386-linux is one example;
203169689Skan   Darwin is another (sometimes, depending on the compiler in use).
204169689Skan
205169689Skan   Also, long double is not included.  Nothing in GCC uses long
206169689Skan   double, so we assume that this is OK.  On powerpc-darwin, adding
207169689Skan   long double would bring the maximum alignment up to 16 bytes,
208169689Skan   and until we need long double (or to vectorize compiler operations)
209169689Skan   that's painfully wasteful.  This will need to change, some day.  */
210169689Skan
211132718Skanstruct max_alignment {
212132718Skan  char c;
213132718Skan  union {
214132718Skan    HOST_WIDEST_INT i;
215132718Skan    double d;
216132718Skan  } u;
217132718Skan};
218132718Skan
219132718Skan/* The biggest alignment required.  */
220132718Skan
221132718Skan#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
222132718Skan
223132718Skan/* Compute the smallest multiple of F that is >= X.  */
224132718Skan
225132718Skan#define ROUND_UP(x, f) (CEIL (x, f) * (f))
226132718Skan
227169689Skan/* Types to use for the allocation and mark bitmaps.  It might be
228169689Skan   a good idea to add ffsl to libiberty and use unsigned long
229169689Skan   instead; that could speed us up where long is wider than int.  */
230132718Skan
231169689Skantypedef unsigned int alloc_type;
232169689Skantypedef unsigned int mark_type;
233169689Skan#define alloc_ffs(x) ffs(x)
234169689Skan
235169689Skan/* A page_entry records the status of an allocation page.  This is the
236169689Skan   common data between all three kinds of pages - small, large, and
237169689Skan   PCH.  */
238132718Skantypedef struct page_entry
239132718Skan{
240169689Skan  /* The address at which the memory is allocated.  */
241169689Skan  char *page;
242132718Skan
243169689Skan  /* The zone that this page entry belongs to.  */
244169689Skan  struct alloc_zone *zone;
245132718Skan
246169689Skan#ifdef GATHER_STATISTICS
247132718Skan  /* How many collections we've survived.  */
248132718Skan  size_t survived;
249169689Skan#endif
250132718Skan
251132718Skan  /* Does this page contain small objects, or one large object?  */
252132718Skan  bool large_p;
253132718Skan
254169689Skan  /* Is this page part of the loaded PCH?  */
255169689Skan  bool pch_p;
256132718Skan} page_entry;
257132718Skan
258169689Skan/* Additional data needed for small pages.  */
259169689Skanstruct small_page_entry
260169689Skan{
261169689Skan  struct page_entry common;
262132718Skan
263169689Skan  /* The next small page entry, or NULL if this is the last.  */
264169689Skan  struct small_page_entry *next;
265169689Skan
266169689Skan  /* If currently marking this zone, a pointer to the mark bits
267169689Skan     for this page.  If we aren't currently marking this zone,
268169689Skan     this pointer may be stale (pointing to freed memory).  */
269169689Skan  mark_type *mark_bits;
270169689Skan
271169689Skan  /* The allocation bitmap.  This array extends far enough to have
272169689Skan     one bit for every BYTES_PER_ALLOC_BIT bytes in the page.  */
273169689Skan  alloc_type alloc_bits[1];
274169689Skan};
275169689Skan
276169689Skan/* Additional data needed for large pages.  */
277169689Skanstruct large_page_entry
278169689Skan{
279169689Skan  struct page_entry common;
280169689Skan
281169689Skan  /* The next large page entry, or NULL if this is the last.  */
282169689Skan  struct large_page_entry *next;
283169689Skan
284169689Skan  /* The number of bytes allocated, not including the page entry.  */
285169689Skan  size_t bytes;
286169689Skan
287169689Skan  /* The previous page in the list, so that we can unlink this one.  */
288169689Skan  struct large_page_entry *prev;
289169689Skan
290169689Skan  /* During marking, is this object marked?  */
291169689Skan  bool mark_p;
292169689Skan};
293169689Skan
294169689Skan/* A two-level tree is used to look up the page-entry for a given
295169689Skan   pointer.  Two chunks of the pointer's bits are extracted to index
296169689Skan   the first and second levels of the tree, as follows:
297169689Skan
298169689Skan				   HOST_PAGE_SIZE_BITS
299169689Skan			   32		|      |
300169689Skan       msb +----------------+----+------+------+ lsb
301169689Skan			    |    |      |
302169689Skan			 PAGE_L1_BITS   |
303169689Skan				 |      |
304169689Skan			       PAGE_L2_BITS
305169689Skan
306169689Skan   The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
307169689Skan   pages are aligned on system page boundaries.  The next most
308169689Skan   significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
309169689Skan   index values in the lookup table, respectively.
310169689Skan
311169689Skan   For 32-bit architectures and the settings below, there are no
312169689Skan   leftover bits.  For architectures with wider pointers, the lookup
313169689Skan   tree points to a list of pages, which must be scanned to find the
314169689Skan   correct one.  */
315169689Skan
316169689Skan#define PAGE_L1_BITS	(8)
317169689Skan#define PAGE_L2_BITS	(32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
318169689Skan#define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
319169689Skan#define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
320169689Skan
321169689Skan#define LOOKUP_L1(p) \
322169689Skan  (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
323169689Skan
324169689Skan#define LOOKUP_L2(p) \
325169689Skan  (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
326169689Skan
327169689Skan#if HOST_BITS_PER_PTR <= 32
328169689Skan
329169689Skan/* On 32-bit hosts, we use a two level page table, as pictured above.  */
330169689Skantypedef page_entry **page_table[PAGE_L1_SIZE];
331169689Skan
332169689Skan#else
333169689Skan
334169689Skan/* On 64-bit hosts, we use the same two level page tables plus a linked
335169689Skan   list that disambiguates the top 32-bits.  There will almost always be
336169689Skan   exactly one entry in the list.  */
337169689Skantypedef struct page_table_chain
338169689Skan{
339169689Skan  struct page_table_chain *next;
340169689Skan  size_t high_bits;
341169689Skan  page_entry **table[PAGE_L1_SIZE];
342169689Skan} *page_table;
343169689Skan
344169689Skan#endif
345169689Skan
346132718Skan/* The global variables.  */
347132718Skanstatic struct globals
348132718Skan{
349132718Skan  /* The linked list of zones.  */
350132718Skan  struct alloc_zone *zones;
351132718Skan
352169689Skan  /* Lookup table for associating allocation pages with object addresses.  */
353169689Skan  page_table lookup;
354169689Skan
355169689Skan  /* The system's page size, and related constants.  */
356132718Skan  size_t pagesize;
357132718Skan  size_t lg_pagesize;
358169689Skan  size_t page_mask;
359132718Skan
360169689Skan  /* The size to allocate for a small page entry.  This includes
361169689Skan     the size of the structure and the size of the allocation
362169689Skan     bitmap.  */
363169689Skan  size_t small_page_overhead;
364169689Skan
365169689Skan#if defined (HAVE_MMAP_DEV_ZERO)
366132718Skan  /* A file descriptor open to /dev/zero for reading.  */
367132718Skan  int dev_zero_fd;
368132718Skan#endif
369132718Skan
370169689Skan  /* Allocate pages in chunks of this size, to throttle calls to memory
371169689Skan     allocation routines.  The first page is used, the rest go onto the
372169689Skan     free list.  */
373169689Skan  size_t quire_size;
374169689Skan
375132718Skan  /* The file descriptor for debugging output.  */
376132718Skan  FILE *debug_file;
377132718Skan} G;
378132718Skan
379169689Skan/* A zone allocation structure.  There is one of these for every
380169689Skan   distinct allocation zone.  */
381132718Skanstruct alloc_zone
382132718Skan{
383169689Skan  /* The most recent free chunk is saved here, instead of in the linked
384169689Skan     free list, to decrease list manipulation.  It is most likely that we
385169689Skan     will want this one.  */
386169689Skan  char *cached_free;
387169689Skan  size_t cached_free_size;
388132718Skan
389132718Skan  /* Linked lists of free storage.  Slots 1 ... NUM_FREE_BINS have chunks of size
390132718Skan     FREE_BIN_DELTA.  All other chunks are in slot 0.  */
391132718Skan  struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
392132718Skan
393169689Skan  /* The highest bin index which might be non-empty.  It may turn out
394169689Skan     to be empty, in which case we have to search downwards.  */
395169689Skan  size_t high_free_bin;
396169689Skan
397169689Skan  /* Bytes currently allocated in this zone.  */
398132718Skan  size_t allocated;
399132718Skan
400169689Skan  /* Linked list of the small pages in this zone.  */
401169689Skan  struct small_page_entry *pages;
402169689Skan
403169689Skan  /* Doubly linked list of large pages in this zone.  */
404169689Skan  struct large_page_entry *large_pages;
405169689Skan
406169689Skan  /* If we are currently marking this zone, a pointer to the mark bits.  */
407169689Skan  mark_type *mark_bits;
408169689Skan
409169689Skan  /* Name of the zone.  */
410169689Skan  const char *name;
411169689Skan
412169689Skan  /* The number of small pages currently allocated in this zone.  */
413169689Skan  size_t n_small_pages;
414169689Skan
415169689Skan  /* Bytes allocated at the end of the last collection.  */
416132718Skan  size_t allocated_last_gc;
417132718Skan
418132718Skan  /* Total amount of memory mapped.  */
419132718Skan  size_t bytes_mapped;
420132718Skan
421132718Skan  /* A cache of free system pages.  */
422169689Skan  struct small_page_entry *free_pages;
423132718Skan
424132718Skan  /* Next zone in the linked list of zones.  */
425132718Skan  struct alloc_zone *next_zone;
426132718Skan
427132718Skan  /* True if this zone was collected during this collection.  */
428132718Skan  bool was_collected;
429132718Skan
430132718Skan  /* True if this zone should be destroyed after the next collection.  */
431132718Skan  bool dead;
432169689Skan
433169689Skan#ifdef GATHER_STATISTICS
434169689Skan  struct
435169689Skan  {
436169689Skan    /* Total memory allocated with ggc_alloc.  */
437169689Skan    unsigned long long total_allocated;
438169689Skan    /* Total overhead for memory to be allocated with ggc_alloc.  */
439169689Skan    unsigned long long total_overhead;
440169689Skan
441169689Skan    /* Total allocations and overhead for sizes less than 32, 64 and 128.
442169689Skan       These sizes are interesting because they are typical cache line
443169689Skan       sizes.  */
444169689Skan
445169689Skan    unsigned long long total_allocated_under32;
446169689Skan    unsigned long long total_overhead_under32;
447169689Skan
448169689Skan    unsigned long long total_allocated_under64;
449169689Skan    unsigned long long total_overhead_under64;
450169689Skan
451169689Skan    unsigned long long total_allocated_under128;
452169689Skan    unsigned long long total_overhead_under128;
453169689Skan  } stats;
454169689Skan#endif
455132718Skan} main_zone;
456132718Skan
457169689Skan/* Some default zones.  */
458169689Skanstruct alloc_zone rtl_zone;
459169689Skanstruct alloc_zone tree_zone;
460169689Skanstruct alloc_zone tree_id_zone;
461132718Skan
462169689Skan/* The PCH zone does not need a normal zone structure, and it does
463169689Skan   not live on the linked list of zones.  */
464169689Skanstruct pch_zone
465169689Skan{
466169689Skan  /* The start of the PCH zone.  NULL if there is none.  */
467169689Skan  char *page;
468132718Skan
469169689Skan  /* The end of the PCH zone.  NULL if there is none.  */
470169689Skan  char *end;
471169689Skan
472169689Skan  /* The size of the PCH zone.  0 if there is none.  */
473169689Skan  size_t bytes;
474169689Skan
475169689Skan  /* The allocation bitmap for the PCH zone.  */
476169689Skan  alloc_type *alloc_bits;
477169689Skan
478169689Skan  /* If we are currently marking, the mark bitmap for the PCH zone.
479169689Skan     When it is first read in, we could avoid marking the PCH,
480169689Skan     because it will not contain any pointers to GC memory outside
481169689Skan     of the PCH; however, the PCH is currently mapped as writable,
482169689Skan     so we must mark it in case new pointers are added.  */
483169689Skan  mark_type *mark_bits;
484169689Skan} pch_zone;
485169689Skan
486132718Skan#ifdef USING_MMAP
487132718Skanstatic char *alloc_anon (char *, size_t, struct alloc_zone *);
488132718Skan#endif
489169689Skanstatic struct small_page_entry * alloc_small_page (struct alloc_zone *);
490169689Skanstatic struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
491169689Skanstatic void free_chunk (char *, size_t, struct alloc_zone *);
492169689Skanstatic void free_small_page (struct small_page_entry *);
493169689Skanstatic void free_large_page (struct large_page_entry *);
494132718Skanstatic void release_pages (struct alloc_zone *);
495132718Skanstatic void sweep_pages (struct alloc_zone *);
496132718Skanstatic bool ggc_collect_1 (struct alloc_zone *, bool);
497169689Skanstatic void new_ggc_zone_1 (struct alloc_zone *, const char *);
498132718Skan
499169689Skan/* Traverse the page table and find the entry for a page.
500169689Skan   Die (probably) if the object wasn't allocated via GC.  */
501132718Skan
502169689Skanstatic inline page_entry *
503169689Skanlookup_page_table_entry (const void *p)
504169689Skan{
505169689Skan  page_entry ***base;
506169689Skan  size_t L1, L2;
507132718Skan
508169689Skan#if HOST_BITS_PER_PTR <= 32
509169689Skan  base = &G.lookup[0];
510169689Skan#else
511169689Skan  page_table table = G.lookup;
512169689Skan  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
513169689Skan  while (table->high_bits != high_bits)
514169689Skan    table = table->next;
515169689Skan  base = &table->table[0];
516169689Skan#endif
517169689Skan
518169689Skan  /* Extract the level 1 and 2 indices.  */
519169689Skan  L1 = LOOKUP_L1 (p);
520169689Skan  L2 = LOOKUP_L2 (p);
521169689Skan
522169689Skan  return base[L1][L2];
523169689Skan}
524169689Skan
525169689Skan/* Set the page table entry for the page that starts at P.  If ENTRY
526169689Skan   is NULL, clear the entry.  */
527169689Skan
528169689Skanstatic void
529169689Skanset_page_table_entry (void *p, page_entry *entry)
530132718Skan{
531169689Skan  page_entry ***base;
532169689Skan  size_t L1, L2;
533169689Skan
534169689Skan#if HOST_BITS_PER_PTR <= 32
535169689Skan  base = &G.lookup[0];
536169689Skan#else
537169689Skan  page_table table;
538169689Skan  size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
539169689Skan  for (table = G.lookup; table; table = table->next)
540169689Skan    if (table->high_bits == high_bits)
541169689Skan      goto found;
542169689Skan
543169689Skan  /* Not found -- allocate a new table.  */
544169689Skan  table = xcalloc (1, sizeof(*table));
545169689Skan  table->next = G.lookup;
546169689Skan  table->high_bits = high_bits;
547169689Skan  G.lookup = table;
548169689Skanfound:
549169689Skan  base = &table->table[0];
550132718Skan#endif
551169689Skan
552169689Skan  /* Extract the level 1 and 2 indices.  */
553169689Skan  L1 = LOOKUP_L1 (p);
554169689Skan  L2 = LOOKUP_L2 (p);
555169689Skan
556169689Skan  if (base[L1] == NULL)
557169689Skan    base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
558169689Skan
559169689Skan  base[L1][L2] = entry;
560132718Skan}
561132718Skan
562169689Skan/* Find the page table entry associated with OBJECT.  */
563132718Skan
564169689Skanstatic inline struct page_entry *
565169689Skanzone_get_object_page (const void *object)
566169689Skan{
567169689Skan  return lookup_page_table_entry (object);
568169689Skan}
569169689Skan
570169689Skan/* Find which element of the alloc_bits array OBJECT should be
571169689Skan   recorded in.  */
572169689Skanstatic inline unsigned int
573169689Skanzone_get_object_alloc_word (const void *object)
574169689Skan{
575169689Skan  return (((size_t) object & (GGC_PAGE_SIZE - 1))
576169689Skan	  / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
577169689Skan}
578169689Skan
579169689Skan/* Find which bit of the appropriate word in the alloc_bits array
580169689Skan   OBJECT should be recorded in.  */
581169689Skanstatic inline unsigned int
582169689Skanzone_get_object_alloc_bit (const void *object)
583169689Skan{
584169689Skan  return (((size_t) object / BYTES_PER_ALLOC_BIT)
585169689Skan	  % (8 * sizeof (alloc_type)));
586169689Skan}
587169689Skan
588169689Skan/* Find which element of the mark_bits array OBJECT should be recorded
589169689Skan   in.  */
590169689Skanstatic inline unsigned int
591169689Skanzone_get_object_mark_word (const void *object)
592169689Skan{
593169689Skan  return (((size_t) object & (GGC_PAGE_SIZE - 1))
594169689Skan	  / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
595169689Skan}
596169689Skan
597169689Skan/* Find which bit of the appropriate word in the mark_bits array
598169689Skan   OBJECT should be recorded in.  */
599169689Skanstatic inline unsigned int
600169689Skanzone_get_object_mark_bit (const void *object)
601169689Skan{
602169689Skan  return (((size_t) object / BYTES_PER_MARK_BIT)
603169689Skan	  % (8 * sizeof (mark_type)));
604169689Skan}
605169689Skan
606169689Skan/* Set the allocation bit corresponding to OBJECT in its page's
607169689Skan   bitmap.  Used to split this object from the preceding one.  */
608169689Skanstatic inline void
609169689Skanzone_set_object_alloc_bit (const void *object)
610169689Skan{
611169689Skan  struct small_page_entry *page
612169689Skan    = (struct small_page_entry *) zone_get_object_page (object);
613169689Skan  unsigned int start_word = zone_get_object_alloc_word (object);
614169689Skan  unsigned int start_bit = zone_get_object_alloc_bit (object);
615169689Skan
616169689Skan  page->alloc_bits[start_word] |= 1L << start_bit;
617169689Skan}
618169689Skan
619169689Skan/* Clear the allocation bit corresponding to OBJECT in PAGE's
620169689Skan   bitmap.  Used to coalesce this object with the preceding
621169689Skan   one.  */
622169689Skanstatic inline void
623169689Skanzone_clear_object_alloc_bit (struct small_page_entry *page,
624169689Skan			     const void *object)
625169689Skan{
626169689Skan  unsigned int start_word = zone_get_object_alloc_word (object);
627169689Skan  unsigned int start_bit = zone_get_object_alloc_bit (object);
628169689Skan
629169689Skan  /* Would xor be quicker?  */
630169689Skan  page->alloc_bits[start_word] &= ~(1L << start_bit);
631169689Skan}
632169689Skan
633169689Skan/* Find the size of the object which starts at START_WORD and
634169689Skan   START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
635169689Skan   Helper function for ggc_get_size and zone_find_object_size.  */
636169689Skan
637169689Skanstatic inline size_t
638169689Skanzone_object_size_1 (alloc_type *alloc_bits,
639169689Skan		    size_t start_word, size_t start_bit,
640169689Skan		    size_t max_size)
641169689Skan{
642169689Skan  size_t size;
643169689Skan  alloc_type alloc_word;
644169689Skan  int indx;
645169689Skan
646169689Skan  /* Load the first word.  */
647169689Skan  alloc_word = alloc_bits[start_word++];
648169689Skan
649169689Skan  /* If that was the last bit in this word, we'll want to continue
650169689Skan     with the next word.  Otherwise, handle the rest of this word.  */
651169689Skan  if (start_bit)
652169689Skan    {
653169689Skan      indx = alloc_ffs (alloc_word >> start_bit);
654169689Skan      if (indx)
655169689Skan	/* indx is 1-based.  We started at the bit after the object's
656169689Skan	   start, but we also ended at the bit after the object's end.
657169689Skan	   It cancels out.  */
658169689Skan	return indx * BYTES_PER_ALLOC_BIT;
659169689Skan
660169689Skan      /* The extra 1 accounts for the starting unit, before start_bit.  */
661169689Skan      size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
662169689Skan
663169689Skan      if (size >= max_size)
664169689Skan	return max_size;
665169689Skan
666169689Skan      alloc_word = alloc_bits[start_word++];
667169689Skan    }
668169689Skan  else
669169689Skan    size = BYTES_PER_ALLOC_BIT;
670169689Skan
671169689Skan  while (alloc_word == 0)
672169689Skan    {
673169689Skan      size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
674169689Skan      if (size >= max_size)
675169689Skan	return max_size;
676169689Skan      alloc_word = alloc_bits[start_word++];
677169689Skan    }
678169689Skan
679169689Skan  indx = alloc_ffs (alloc_word);
680169689Skan  return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
681169689Skan}
682169689Skan
683169689Skan/* Find the size of OBJECT on small page PAGE.  */
684169689Skan
685169689Skanstatic inline size_t
686169689Skanzone_find_object_size (struct small_page_entry *page,
687169689Skan		       const void *object)
688169689Skan{
689169689Skan  const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
690169689Skan  unsigned int start_word = zone_get_object_alloc_word (object_midptr);
691169689Skan  unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
692169689Skan  size_t max_size = (page->common.page + SMALL_PAGE_SIZE
693169689Skan		     - (char *) object);
694169689Skan
695169689Skan  return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
696169689Skan			     max_size);
697169689Skan}
698169689Skan
699169689Skan/* Allocate the mark bits for every zone, and set the pointers on each
700169689Skan   page.  */
701169689Skanstatic void
702169689Skanzone_allocate_marks (void)
703169689Skan{
704169689Skan  struct alloc_zone *zone;
705169689Skan
706169689Skan  for (zone = G.zones; zone; zone = zone->next_zone)
707169689Skan    {
708169689Skan      struct small_page_entry *page;
709169689Skan      mark_type *cur_marks;
710169689Skan      size_t mark_words, mark_words_per_page;
711169689Skan#ifdef ENABLE_CHECKING
712169689Skan      size_t n = 0;
713169689Skan#endif
714169689Skan
715169689Skan      mark_words_per_page
716169689Skan	= (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
717169689Skan      mark_words = zone->n_small_pages * mark_words_per_page;
718169689Skan      zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
719169689Skan						   mark_words);
720169689Skan      cur_marks = zone->mark_bits;
721169689Skan      for (page = zone->pages; page; page = page->next)
722169689Skan	{
723169689Skan	  page->mark_bits = cur_marks;
724169689Skan	  cur_marks += mark_words_per_page;
725169689Skan#ifdef ENABLE_CHECKING
726169689Skan	  n++;
727169689Skan#endif
728169689Skan	}
729169689Skan#ifdef ENABLE_CHECKING
730169689Skan      gcc_assert (n == zone->n_small_pages);
731169689Skan#endif
732169689Skan    }
733169689Skan
734169689Skan  /* We don't collect the PCH zone, but we do have to mark it
735169689Skan     (for now).  */
736169689Skan  if (pch_zone.bytes)
737169689Skan    pch_zone.mark_bits
738169689Skan      = (mark_type *) xcalloc (sizeof (mark_type),
739169689Skan			       CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
740169689Skan}
741169689Skan
742169689Skan/* After marking and sweeping, release the memory used for mark bits.  */
743169689Skanstatic void
744169689Skanzone_free_marks (void)
745169689Skan{
746169689Skan  struct alloc_zone *zone;
747169689Skan
748169689Skan  for (zone = G.zones; zone; zone = zone->next_zone)
749169689Skan    if (zone->mark_bits)
750169689Skan      {
751169689Skan	free (zone->mark_bits);
752169689Skan	zone->mark_bits = NULL;
753169689Skan      }
754169689Skan
755169689Skan  if (pch_zone.bytes)
756169689Skan    {
757169689Skan      free (pch_zone.mark_bits);
758169689Skan      pch_zone.mark_bits = NULL;
759169689Skan    }
760169689Skan}
761169689Skan
762132718Skan#ifdef USING_MMAP
763132718Skan/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
764132718Skan   (if non-null).  The ifdef structure here is intended to cause a
765132718Skan   compile error unless exactly one of the HAVE_* is defined.  */
766132718Skan
767132718Skanstatic inline char *
768132718Skanalloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
769132718Skan{
770132718Skan#ifdef HAVE_MMAP_ANON
771132718Skan  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
772132718Skan			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
773132718Skan#endif
774132718Skan#ifdef HAVE_MMAP_DEV_ZERO
775132718Skan  char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
776132718Skan			      MAP_PRIVATE, G.dev_zero_fd, 0);
777132718Skan#endif
778132718Skan
779132718Skan  if (page == (char *) MAP_FAILED)
780132718Skan    {
781132718Skan      perror ("virtual memory exhausted");
782132718Skan      exit (FATAL_EXIT_CODE);
783132718Skan    }
784132718Skan
785132718Skan  /* Remember that we allocated this memory.  */
786132718Skan  zone->bytes_mapped += size;
787169689Skan
788132718Skan  /* Pretend we don't have access to the allocated pages.  We'll enable
789132718Skan     access to smaller pieces of the area in ggc_alloc.  Discard the
790132718Skan     handle to avoid handle leak.  */
791132718Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
792169689Skan
793132718Skan  return page;
794132718Skan}
795132718Skan#endif
796132718Skan
797169689Skan/* Allocate a new page for allocating small objects in ZONE, and
798169689Skan   return an entry for it.  */
799132718Skan
800169689Skanstatic struct small_page_entry *
801132718Skanalloc_small_page (struct alloc_zone *zone)
802132718Skan{
803169689Skan  struct small_page_entry *entry;
804132718Skan
805132718Skan  /* Check the list of free pages for one we can use.  */
806132718Skan  entry = zone->free_pages;
807132718Skan  if (entry != NULL)
808132718Skan    {
809132718Skan      /* Recycle the allocated memory from this page ...  */
810132718Skan      zone->free_pages = entry->next;
811132718Skan    }
812132718Skan  else
813132718Skan    {
814132718Skan      /* We want just one page.  Allocate a bunch of them and put the
815132718Skan	 extras on the freelist.  (Can only do this optimization with
816132718Skan	 mmap for backing store.)  */
817169689Skan      struct small_page_entry *e, *f = zone->free_pages;
818132718Skan      int i;
819169689Skan      char *page;
820132718Skan
821169689Skan      page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
822132718Skan
823132718Skan      /* This loop counts down so that the chain will be in ascending
824132718Skan	 memory order.  */
825169689Skan      for (i = G.quire_size - 1; i >= 1; i--)
826132718Skan	{
827169689Skan	  e = xcalloc (1, G.small_page_overhead);
828169689Skan	  e->common.page = page + (i << GGC_PAGE_SHIFT);
829169689Skan	  e->common.zone = zone;
830132718Skan	  e->next = f;
831132718Skan	  f = e;
832169689Skan	  set_page_table_entry (e->common.page, &e->common);
833132718Skan	}
834132718Skan
835132718Skan      zone->free_pages = f;
836169689Skan
837169689Skan      entry = xcalloc (1, G.small_page_overhead);
838169689Skan      entry->common.page = page;
839169689Skan      entry->common.zone = zone;
840169689Skan      set_page_table_entry (page, &entry->common);
841132718Skan    }
842132718Skan
843169689Skan  zone->n_small_pages++;
844132718Skan
845132718Skan  if (GGC_DEBUG_LEVEL >= 2)
846132718Skan    fprintf (G.debug_file,
847169689Skan	     "Allocating %s page at %p, data %p-%p\n",
848169689Skan	     entry->common.zone->name, (PTR) entry, entry->common.page,
849169689Skan	     entry->common.page + SMALL_PAGE_SIZE - 1);
850132718Skan
851132718Skan  return entry;
852132718Skan}
853132718Skan
854132718Skan/* Allocate a large page of size SIZE in ZONE.  */
855132718Skan
856169689Skanstatic struct large_page_entry *
857132718Skanalloc_large_page (size_t size, struct alloc_zone *zone)
858132718Skan{
859169689Skan  struct large_page_entry *entry;
860132718Skan  char *page;
861169689Skan  size_t needed_size;
862132718Skan
863169689Skan  needed_size = size + sizeof (struct large_page_entry);
864169689Skan  page = xmalloc (needed_size);
865169689Skan
866169689Skan  entry = (struct large_page_entry *) page;
867169689Skan
868169689Skan  entry->next = NULL;
869169689Skan  entry->common.page = page + sizeof (struct large_page_entry);
870169689Skan  entry->common.large_p = true;
871169689Skan  entry->common.pch_p = false;
872169689Skan  entry->common.zone = zone;
873169689Skan#ifdef GATHER_STATISTICS
874169689Skan  entry->common.survived = 0;
875169689Skan#endif
876169689Skan  entry->mark_p = false;
877132718Skan  entry->bytes = size;
878169689Skan  entry->prev = NULL;
879132718Skan
880169689Skan  set_page_table_entry (entry->common.page, &entry->common);
881169689Skan
882132718Skan  if (GGC_DEBUG_LEVEL >= 2)
883132718Skan    fprintf (G.debug_file,
884169689Skan	     "Allocating %s large page at %p, data %p-%p\n",
885169689Skan	     entry->common.zone->name, (PTR) entry, entry->common.page,
886169689Skan	     entry->common.page + SMALL_PAGE_SIZE - 1);
887132718Skan
888132718Skan  return entry;
889132718Skan}
890132718Skan
891132718Skan
892132718Skan/* For a page that is no longer needed, put it on the free page list.  */
893132718Skan
894132718Skanstatic inline void
895169689Skanfree_small_page (struct small_page_entry *entry)
896132718Skan{
897132718Skan  if (GGC_DEBUG_LEVEL >= 2)
898132718Skan    fprintf (G.debug_file,
899169689Skan	     "Deallocating %s page at %p, data %p-%p\n",
900169689Skan	     entry->common.zone->name, (PTR) entry,
901169689Skan	     entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
902132718Skan
903169689Skan  gcc_assert (!entry->common.large_p);
904132718Skan
905169689Skan  /* Mark the page as inaccessible.  Discard the handle to
906169689Skan     avoid handle leak.  */
907169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
908169689Skan					    SMALL_PAGE_SIZE));
909169689Skan
910169689Skan  entry->next = entry->common.zone->free_pages;
911169689Skan  entry->common.zone->free_pages = entry;
912169689Skan  entry->common.zone->n_small_pages--;
913132718Skan}
914132718Skan
915169689Skan/* Release a large page that is no longer needed.  */
916169689Skan
917169689Skanstatic inline void
918169689Skanfree_large_page (struct large_page_entry *entry)
919169689Skan{
920169689Skan  if (GGC_DEBUG_LEVEL >= 2)
921169689Skan    fprintf (G.debug_file,
922169689Skan	     "Deallocating %s page at %p, data %p-%p\n",
923169689Skan	     entry->common.zone->name, (PTR) entry,
924169689Skan	     entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
925169689Skan
926169689Skan  gcc_assert (entry->common.large_p);
927169689Skan
928169689Skan  set_page_table_entry (entry->common.page, NULL);
929169689Skan  free (entry);
930169689Skan}
931169689Skan
932132718Skan/* Release the free page cache to the system.  */
933132718Skan
934132718Skanstatic void
935132718Skanrelease_pages (struct alloc_zone *zone)
936132718Skan{
937132718Skan#ifdef USING_MMAP
938169689Skan  struct small_page_entry *p, *next;
939132718Skan  char *start;
940132718Skan  size_t len;
941132718Skan
942132718Skan  /* Gather up adjacent pages so they are unmapped together.  */
943132718Skan  p = zone->free_pages;
944132718Skan
945132718Skan  while (p)
946132718Skan    {
947169689Skan      start = p->common.page;
948132718Skan      next = p->next;
949169689Skan      len = SMALL_PAGE_SIZE;
950169689Skan      set_page_table_entry (p->common.page, NULL);
951132718Skan      p = next;
952132718Skan
953169689Skan      while (p && p->common.page == start + len)
954132718Skan	{
955132718Skan	  next = p->next;
956169689Skan	  len += SMALL_PAGE_SIZE;
957169689Skan	  set_page_table_entry (p->common.page, NULL);
958132718Skan	  p = next;
959132718Skan	}
960132718Skan
961132718Skan      munmap (start, len);
962132718Skan      zone->bytes_mapped -= len;
963132718Skan    }
964132718Skan
965132718Skan  zone->free_pages = NULL;
966132718Skan#endif
967132718Skan}
968132718Skan
969169689Skan/* Place the block at PTR of size SIZE on the free list for ZONE.  */
970132718Skan
971132718Skanstatic inline void
972169689Skanfree_chunk (char *ptr, size_t size, struct alloc_zone *zone)
973132718Skan{
974169689Skan  struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
975132718Skan  size_t bin = 0;
976132718Skan
977132718Skan  bin = SIZE_BIN_DOWN (size);
978169689Skan  gcc_assert (bin != 0);
979132718Skan  if (bin > NUM_FREE_BINS)
980169689Skan    {
981169689Skan      bin = 0;
982169689Skan      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
983169689Skan      chunk->size = size;
984169689Skan      chunk->next_free = zone->free_chunks[bin];
985169689Skan      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
986169689Skan						size - sizeof (struct alloc_chunk)));
987169689Skan    }
988169689Skan  else
989169689Skan    {
990169689Skan      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
991169689Skan      chunk->next_free = zone->free_chunks[bin];
992169689Skan      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
993169689Skan						size - sizeof (struct alloc_chunk *)));
994169689Skan    }
995169689Skan
996132718Skan  zone->free_chunks[bin] = chunk;
997169689Skan  if (bin > zone->high_free_bin)
998169689Skan    zone->high_free_bin = bin;
999132718Skan  if (GGC_DEBUG_LEVEL >= 3)
1000132718Skan    fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
1001132718Skan}
1002132718Skan
1003169689Skan/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE.  */
1004132718Skan
1005169689Skanvoid *
1006169689Skanggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
1007169689Skan		     MEM_STAT_DECL)
1008132718Skan{
1009169689Skan  size_t bin;
1010169689Skan  size_t csize;
1011169689Skan  struct small_page_entry *entry;
1012169689Skan  struct alloc_chunk *chunk, **pp;
1013132718Skan  void *result;
1014169689Skan  size_t size = orig_size;
1015132718Skan
1016169689Skan  /* Make sure that zero-sized allocations get a unique and freeable
1017169689Skan     pointer.  */
1018169689Skan  if (size == 0)
1019169689Skan    size = MAX_ALIGNMENT;
1020169689Skan  else
1021169689Skan    size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
1022132718Skan
1023169689Skan  /* Try to allocate the object from several different sources.  Each
1024169689Skan     of these cases is responsible for setting RESULT and SIZE to
1025169689Skan     describe the allocated block, before jumping to FOUND.  If a
1026169689Skan     chunk is split, the allocate bit for the new chunk should also be
1027169689Skan     set.
1028169689Skan
1029169689Skan     Large objects are handled specially.  However, they'll just fail
1030169689Skan     the next couple of conditions, so we can wait to check for them
1031169689Skan     below.  The large object case is relatively rare (< 1%), so this
1032169689Skan     is a win.  */
1033169689Skan
1034169689Skan  /* First try to split the last chunk we allocated.  For best
1035169689Skan     fragmentation behavior it would be better to look for a
1036169689Skan     free bin of the appropriate size for a small object.  However,
1037169689Skan     we're unlikely (1% - 7%) to find one, and this gives better
1038169689Skan     locality behavior anyway.  This case handles the lion's share
1039169689Skan     of all calls to this function.  */
1040169689Skan  if (size <= zone->cached_free_size)
1041132718Skan    {
1042169689Skan      result = zone->cached_free;
1043132718Skan
1044169689Skan      zone->cached_free_size -= size;
1045169689Skan      if (zone->cached_free_size)
1046169689Skan	{
1047169689Skan	  zone->cached_free += size;
1048169689Skan	  zone_set_object_alloc_bit (zone->cached_free);
1049169689Skan	}
1050132718Skan
1051132718Skan      goto found;
1052132718Skan    }
1053132718Skan
1054169689Skan  /* Next, try to find a free bin of the exactly correct size.  */
1055169689Skan
1056169689Skan  /* We want to round SIZE up, rather than down, but we know it's
1057169689Skan     already aligned to at least FREE_BIN_DELTA, so we can just
1058169689Skan     shift.  */
1059169689Skan  bin = SIZE_BIN_DOWN (size);
1060169689Skan
1061169689Skan  if (bin <= NUM_FREE_BINS
1062169689Skan      && (chunk = zone->free_chunks[bin]) != NULL)
1063132718Skan    {
1064169689Skan      /* We have a chunk of the right size.  Pull it off the free list
1065169689Skan	 and use it.  */
1066169689Skan
1067169689Skan      zone->free_chunks[bin] = chunk->next_free;
1068169689Skan
1069169689Skan      /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
1070169689Skan	 == FREE_BIN_DELTA.  */
1071169689Skan      result = chunk;
1072169689Skan
1073169689Skan      /* The allocation bits are already set correctly.  HIGH_FREE_BIN
1074169689Skan	 may now be wrong, if this was the last chunk in the high bin.
1075169689Skan	 Rather than fixing it up now, wait until we need to search
1076169689Skan	 the free bins.  */
1077169689Skan
1078169689Skan      goto found;
1079169689Skan    }
1080169689Skan
1081169689Skan  /* Next, if there wasn't a chunk of the ideal size, look for a chunk
1082169689Skan     to split.  We can find one in the too-big bin, or in the largest
1083169689Skan     sized bin with a chunk in it.  Try the largest normal-sized bin
1084169689Skan     first.  */
1085169689Skan
1086169689Skan  if (zone->high_free_bin > bin)
1087169689Skan    {
1088169689Skan      /* Find the highest numbered free bin.  It will be at or below
1089169689Skan	 the watermark.  */
1090169689Skan      while (zone->high_free_bin > bin
1091169689Skan	     && zone->free_chunks[zone->high_free_bin] == NULL)
1092169689Skan	zone->high_free_bin--;
1093169689Skan
1094169689Skan      if (zone->high_free_bin > bin)
1095132718Skan	{
1096169689Skan	  size_t tbin = zone->high_free_bin;
1097169689Skan	  chunk = zone->free_chunks[tbin];
1098169689Skan
1099169689Skan	  /* Remove the chunk from its previous bin.  */
1100169689Skan	  zone->free_chunks[tbin] = chunk->next_free;
1101169689Skan
1102169689Skan	  result = (char *) chunk;
1103169689Skan
1104169689Skan	  /* Save the rest of the chunk for future allocation.  */
1105169689Skan	  if (zone->cached_free_size)
1106169689Skan	    free_chunk (zone->cached_free, zone->cached_free_size, zone);
1107169689Skan
1108169689Skan	  chunk = (struct alloc_chunk *) ((char *) result + size);
1109169689Skan	  zone->cached_free = (char *) chunk;
1110169689Skan	  zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
1111169689Skan
1112169689Skan	  /* Mark the new free chunk as an object, so that we can
1113169689Skan	     find the size of the newly allocated object.  */
1114169689Skan	  zone_set_object_alloc_bit (chunk);
1115169689Skan
1116169689Skan	  /* HIGH_FREE_BIN may now be wrong, if this was the last
1117169689Skan	     chunk in the high bin.  Rather than fixing it up now,
1118169689Skan	     wait until we need to search the free bins.  */
1119169689Skan
1120132718Skan	  goto found;
1121132718Skan	}
1122132718Skan    }
1123132718Skan
1124132718Skan  /* Failing that, look through the "other" bucket for a chunk
1125132718Skan     that is large enough.  */
1126132718Skan  pp = &(zone->free_chunks[0]);
1127132718Skan  chunk = *pp;
1128132718Skan  while (chunk && chunk->size < size)
1129132718Skan    {
1130169689Skan      pp = &chunk->next_free;
1131132718Skan      chunk = *pp;
1132132718Skan    }
1133132718Skan
1134169689Skan  if (chunk)
1135132718Skan    {
1136169689Skan      /* Remove the chunk from its previous bin.  */
1137169689Skan      *pp = chunk->next_free;
1138132718Skan
1139169689Skan      result = (char *) chunk;
1140169689Skan
1141169689Skan      /* Save the rest of the chunk for future allocation, if there's any
1142169689Skan	 left over.  */
1143169689Skan      csize = chunk->size;
1144169689Skan      if (csize > size)
1145169689Skan	{
1146169689Skan	  if (zone->cached_free_size)
1147169689Skan	    free_chunk (zone->cached_free, zone->cached_free_size, zone);
1148169689Skan
1149169689Skan	  chunk = (struct alloc_chunk *) ((char *) result + size);
1150169689Skan	  zone->cached_free = (char *) chunk;
1151169689Skan	  zone->cached_free_size = csize - size;
1152169689Skan
1153169689Skan	  /* Mark the new free chunk as an object.  */
1154169689Skan	  zone_set_object_alloc_bit (chunk);
1155169689Skan	}
1156169689Skan
1157169689Skan      goto found;
1158132718Skan    }
1159169689Skan
1160169689Skan  /* Handle large allocations.  We could choose any threshold between
1161169689Skan     GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
1162169689Skan     GGC_PAGE_SIZE.  It can't be smaller, because then it wouldn't
1163169689Skan     be guaranteed to have a unique entry in the lookup table.  Large
1164169689Skan     allocations will always fall through to here.  */
1165169689Skan  if (size > GGC_PAGE_SIZE)
1166132718Skan    {
1167169689Skan      struct large_page_entry *entry = alloc_large_page (size, zone);
1168169689Skan
1169169689Skan#ifdef GATHER_STATISTICS
1170169689Skan      entry->common.survived = 0;
1171169689Skan#endif
1172169689Skan
1173169689Skan      entry->next = zone->large_pages;
1174169689Skan      if (zone->large_pages)
1175169689Skan	zone->large_pages->prev = entry;
1176169689Skan      zone->large_pages = entry;
1177169689Skan
1178169689Skan      result = entry->common.page;
1179169689Skan
1180169689Skan      goto found;
1181132718Skan    }
1182169689Skan
1183169689Skan  /* Failing everything above, allocate a new small page.  */
1184169689Skan
1185169689Skan  entry = alloc_small_page (zone);
1186169689Skan  entry->next = zone->pages;
1187169689Skan  zone->pages = entry;
1188169689Skan
1189169689Skan  /* Mark the first chunk in the new page.  */
1190169689Skan  entry->alloc_bits[0] = 1;
1191169689Skan
1192169689Skan  result = entry->common.page;
1193169689Skan  if (size < SMALL_PAGE_SIZE)
1194132718Skan    {
1195169689Skan      if (zone->cached_free_size)
1196169689Skan	free_chunk (zone->cached_free, zone->cached_free_size, zone);
1197132718Skan
1198169689Skan      zone->cached_free = (char *) result + size;
1199169689Skan      zone->cached_free_size = SMALL_PAGE_SIZE - size;
1200169689Skan
1201169689Skan      /* Mark the new free chunk as an object.  */
1202169689Skan      zone_set_object_alloc_bit (zone->cached_free);
1203132718Skan    }
1204169689Skan
1205132718Skan found:
1206132718Skan
1207169689Skan  /* We could save TYPE in the chunk, but we don't use that for
1208169689Skan     anything yet.  If we wanted to, we could do it by adding it
1209169689Skan     either before the beginning of the chunk or after its end,
1210169689Skan     and adjusting the size and pointer appropriately.  */
1211169689Skan
1212169689Skan  /* We'll probably write to this after we return.  */
1213169689Skan  prefetchw (result);
1214169689Skan
1215132718Skan#ifdef ENABLE_GC_CHECKING
1216169689Skan  /* `Poison' the entire allocated object.  */
1217132718Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
1218132718Skan  memset (result, 0xaf, size);
1219169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
1220169689Skan					    size - orig_size));
1221132718Skan#endif
1222132718Skan
1223132718Skan  /* Tell Valgrind that the memory is there, but its content isn't
1224132718Skan     defined.  The bytes at the end of the object are still marked
1225132718Skan     unaccessible.  */
1226169689Skan  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
1227132718Skan
1228132718Skan  /* Keep track of how many bytes are being allocated.  This
1229132718Skan     information is used in deciding when to collect.  */
1230169689Skan  zone->allocated += size;
1231169689Skan
1232169689Skan  timevar_ggc_mem_total += size;
1233132718Skan
1234169689Skan#ifdef GATHER_STATISTICS
1235169689Skan  ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
1236169689Skan
1237169689Skan  {
1238169689Skan    size_t object_size = size;
1239169689Skan    size_t overhead = object_size - orig_size;
1240169689Skan
1241169689Skan    zone->stats.total_overhead += overhead;
1242169689Skan    zone->stats.total_allocated += object_size;
1243169689Skan
1244169689Skan    if (orig_size <= 32)
1245169689Skan      {
1246169689Skan	zone->stats.total_overhead_under32 += overhead;
1247169689Skan	zone->stats.total_allocated_under32 += object_size;
1248169689Skan      }
1249169689Skan    if (orig_size <= 64)
1250169689Skan      {
1251169689Skan	zone->stats.total_overhead_under64 += overhead;
1252169689Skan	zone->stats.total_allocated_under64 += object_size;
1253169689Skan      }
1254169689Skan    if (orig_size <= 128)
1255169689Skan      {
1256169689Skan	zone->stats.total_overhead_under128 += overhead;
1257169689Skan	zone->stats.total_allocated_under128 += object_size;
1258169689Skan      }
1259169689Skan  }
1260169689Skan#endif
1261169689Skan
1262132718Skan  if (GGC_DEBUG_LEVEL >= 3)
1263169689Skan    fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
1264169689Skan	     (unsigned long) size, result);
1265132718Skan
1266132718Skan  return result;
1267132718Skan}
1268132718Skan
1269132718Skan/* Allocate a SIZE of chunk memory of GTE type, into an appropriate zone
1270132718Skan   for that type.  */
1271132718Skan
1272132718Skanvoid *
1273169689Skanggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
1274169689Skan		      MEM_STAT_DECL)
1275132718Skan{
1276132718Skan  switch (gte)
1277132718Skan    {
1278132718Skan    case gt_ggc_e_14lang_tree_node:
1279169689Skan      return ggc_alloc_zone_pass_stat (size, &tree_zone);
1280132718Skan
1281132718Skan    case gt_ggc_e_7rtx_def:
1282169689Skan      return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1283132718Skan
1284132718Skan    case gt_ggc_e_9rtvec_def:
1285169689Skan      return ggc_alloc_zone_pass_stat (size, &rtl_zone);
1286132718Skan
1287132718Skan    default:
1288169689Skan      return ggc_alloc_zone_pass_stat (size, &main_zone);
1289132718Skan    }
1290132718Skan}
1291132718Skan
1292132718Skan/* Normal ggc_alloc simply allocates into the main zone.  */
1293132718Skan
1294132718Skanvoid *
1295169689Skanggc_alloc_stat (size_t size MEM_STAT_DECL)
1296132718Skan{
1297169689Skan  return ggc_alloc_zone_pass_stat (size, &main_zone);
1298132718Skan}
1299132718Skan
1300169689Skan/* Poison the chunk.  */
1301169689Skan#ifdef ENABLE_GC_CHECKING
1302169689Skan#define poison_region(PTR, SIZE) \
1303169689Skan  memset ((PTR), 0xa5, (SIZE))
1304169689Skan#else
1305169689Skan#define poison_region(PTR, SIZE)
1306169689Skan#endif
1307132718Skan
1308169689Skan/* Free the object at P.  */
1309169689Skan
1310169689Skanvoid
1311169689Skanggc_free (void *p)
1312132718Skan{
1313169689Skan  struct page_entry *page;
1314169689Skan
1315169689Skan#ifdef GATHER_STATISTICS
1316169689Skan  ggc_free_overhead (p);
1317169689Skan#endif
1318169689Skan
1319169689Skan  poison_region (p, ggc_get_size (p));
1320169689Skan
1321169689Skan  page = zone_get_object_page (p);
1322169689Skan
1323169689Skan  if (page->large_p)
1324169689Skan    {
1325169689Skan      struct large_page_entry *large_page
1326169689Skan	= (struct large_page_entry *) page;
1327169689Skan
1328169689Skan      /* Remove the page from the linked list.  */
1329169689Skan      if (large_page->prev)
1330169689Skan	large_page->prev->next = large_page->next;
1331169689Skan      else
1332169689Skan	{
1333169689Skan	  gcc_assert (large_page->common.zone->large_pages == large_page);
1334169689Skan	  large_page->common.zone->large_pages = large_page->next;
1335169689Skan	}
1336169689Skan      if (large_page->next)
1337169689Skan	large_page->next->prev = large_page->prev;
1338169689Skan
1339169689Skan      large_page->common.zone->allocated -= large_page->bytes;
1340169689Skan
1341169689Skan      /* Release the memory associated with this object.  */
1342169689Skan      free_large_page (large_page);
1343169689Skan    }
1344169689Skan  else if (page->pch_p)
1345169689Skan    /* Don't do anything.  We won't allocate a new object from the
1346169689Skan       PCH zone so there's no point in releasing anything.  */
1347169689Skan    ;
1348169689Skan  else
1349169689Skan    {
1350169689Skan      size_t size = ggc_get_size (p);
1351169689Skan
1352169689Skan      page->zone->allocated -= size;
1353169689Skan
1354169689Skan      /* Add the chunk to the free list.  We don't bother with coalescing,
1355169689Skan	 since we are likely to want a chunk of this size again.  */
1356169689Skan      free_chunk (p, size, page->zone);
1357169689Skan    }
1358132718Skan}
1359132718Skan
1360132718Skan/* If P is not marked, mark it and return false.  Otherwise return true.
1361132718Skan   P must have been allocated by the GC allocator; it mustn't point to
1362132718Skan   static objects, stack variables, or memory allocated with malloc.  */
1363132718Skan
1364132718Skanint
1365132718Skanggc_set_mark (const void *p)
1366132718Skan{
1367169689Skan  struct page_entry *page;
1368169689Skan  const char *ptr = (const char *) p;
1369132718Skan
1370169689Skan  page = zone_get_object_page (p);
1371132718Skan
1372169689Skan  if (page->pch_p)
1373169689Skan    {
1374169689Skan      size_t mark_word, mark_bit, offset;
1375169689Skan      offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1376169689Skan      mark_word = offset / (8 * sizeof (mark_type));
1377169689Skan      mark_bit = offset % (8 * sizeof (mark_type));
1378169689Skan
1379169689Skan      if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
1380169689Skan	return 1;
1381169689Skan      pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
1382169689Skan    }
1383169689Skan  else if (page->large_p)
1384169689Skan    {
1385169689Skan      struct large_page_entry *large_page
1386169689Skan	= (struct large_page_entry *) page;
1387169689Skan
1388169689Skan      if (large_page->mark_p)
1389169689Skan	return 1;
1390169689Skan      large_page->mark_p = true;
1391169689Skan    }
1392169689Skan  else
1393169689Skan    {
1394169689Skan      struct small_page_entry *small_page
1395169689Skan	= (struct small_page_entry *) page;
1396169689Skan
1397169689Skan      if (small_page->mark_bits[zone_get_object_mark_word (p)]
1398169689Skan	  & (1 << zone_get_object_mark_bit (p)))
1399169689Skan	return 1;
1400169689Skan      small_page->mark_bits[zone_get_object_mark_word (p)]
1401169689Skan	|= (1 << zone_get_object_mark_bit (p));
1402169689Skan    }
1403169689Skan
1404132718Skan  if (GGC_DEBUG_LEVEL >= 4)
1405132718Skan    fprintf (G.debug_file, "Marking %p\n", p);
1406132718Skan
1407132718Skan  return 0;
1408132718Skan}
1409132718Skan
1410132718Skan/* Return 1 if P has been marked, zero otherwise.
1411132718Skan   P must have been allocated by the GC allocator; it mustn't point to
1412132718Skan   static objects, stack variables, or memory allocated with malloc.  */
1413132718Skan
1414132718Skanint
1415132718Skanggc_marked_p (const void *p)
1416132718Skan{
1417169689Skan  struct page_entry *page;
1418169689Skan  const char *ptr = p;
1419132718Skan
1420169689Skan  page = zone_get_object_page (p);
1421169689Skan
1422169689Skan  if (page->pch_p)
1423169689Skan    {
1424169689Skan      size_t mark_word, mark_bit, offset;
1425169689Skan      offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
1426169689Skan      mark_word = offset / (8 * sizeof (mark_type));
1427169689Skan      mark_bit = offset % (8 * sizeof (mark_type));
1428169689Skan
1429169689Skan      return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
1430169689Skan    }
1431169689Skan
1432169689Skan  if (page->large_p)
1433169689Skan    {
1434169689Skan      struct large_page_entry *large_page
1435169689Skan	= (struct large_page_entry *) page;
1436169689Skan
1437169689Skan      return large_page->mark_p;
1438169689Skan    }
1439169689Skan  else
1440169689Skan    {
1441169689Skan      struct small_page_entry *small_page
1442169689Skan	= (struct small_page_entry *) page;
1443169689Skan
1444169689Skan      return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
1445169689Skan		   & (1 << zone_get_object_mark_bit (p)));
1446169689Skan    }
1447132718Skan}
1448132718Skan
1449132718Skan/* Return the size of the gc-able object P.  */
1450132718Skan
1451132718Skansize_t
1452132718Skanggc_get_size (const void *p)
1453132718Skan{
1454169689Skan  struct page_entry *page;
1455169689Skan  const char *ptr = (const char *) p;
1456132718Skan
1457169689Skan  page = zone_get_object_page (p);
1458132718Skan
1459169689Skan  if (page->pch_p)
1460169689Skan    {
1461169689Skan      size_t alloc_word, alloc_bit, offset, max_size;
1462169689Skan      offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
1463169689Skan      alloc_word = offset / (8 * sizeof (alloc_type));
1464169689Skan      alloc_bit = offset % (8 * sizeof (alloc_type));
1465169689Skan      max_size = pch_zone.bytes - (ptr - pch_zone.page);
1466169689Skan      return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
1467169689Skan				 max_size);
1468169689Skan    }
1469169689Skan
1470169689Skan  if (page->large_p)
1471169689Skan    return ((struct large_page_entry *)page)->bytes;
1472169689Skan  else
1473169689Skan    return zone_find_object_size ((struct small_page_entry *) page, p);
1474132718Skan}
1475132718Skan
1476132718Skan/* Initialize the ggc-zone-mmap allocator.  */
1477132718Skanvoid
1478132718Skaninit_ggc (void)
1479132718Skan{
1480169689Skan  /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
1481169689Skan     a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
1482169689Skan     the current assumptions to hold.  */
1483169689Skan
1484169689Skan  gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
1485169689Skan
1486132718Skan  /* Set up the main zone by hand.  */
1487132718Skan  main_zone.name = "Main zone";
1488132718Skan  G.zones = &main_zone;
1489132718Skan
1490132718Skan  /* Allocate the default zones.  */
1491169689Skan  new_ggc_zone_1 (&rtl_zone, "RTL zone");
1492169689Skan  new_ggc_zone_1 (&tree_zone, "Tree zone");
1493169689Skan  new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
1494132718Skan
1495132718Skan  G.pagesize = getpagesize();
1496132718Skan  G.lg_pagesize = exact_log2 (G.pagesize);
1497169689Skan  G.page_mask = ~(G.pagesize - 1);
1498169689Skan
1499169689Skan  /* Require the system page size to be a multiple of GGC_PAGE_SIZE.  */
1500169689Skan  gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
1501169689Skan
1502169689Skan  /* Allocate 16 system pages at a time.  */
1503169689Skan  G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
1504169689Skan
1505169689Skan  /* Calculate the size of the allocation bitmap and other overhead.  */
1506169689Skan  /* Right now we allocate bits for the page header and bitmap.  These
1507169689Skan     are wasted, but a little tricky to eliminate.  */
1508169689Skan  G.small_page_overhead
1509169689Skan    = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
1510169689Skan  /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
1511169689Skan
1512132718Skan#ifdef HAVE_MMAP_DEV_ZERO
1513132718Skan  G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1514169689Skan  gcc_assert (G.dev_zero_fd != -1);
1515132718Skan#endif
1516132718Skan
1517132718Skan#if 0
1518132718Skan  G.debug_file = fopen ("ggc-mmap.debug", "w");
1519132718Skan  setlinebuf (G.debug_file);
1520132718Skan#else
1521132718Skan  G.debug_file = stdout;
1522132718Skan#endif
1523132718Skan
1524132718Skan#ifdef USING_MMAP
1525132718Skan  /* StunOS has an amazing off-by-one error for the first mmap allocation
1526132718Skan     after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1527132718Skan     believe, is an unaligned page allocation, which would cause us to
1528132718Skan     hork badly if we tried to use it.  */
1529132718Skan  {
1530132718Skan    char *p = alloc_anon (NULL, G.pagesize, &main_zone);
1531169689Skan    struct small_page_entry *e;
1532132718Skan    if ((size_t)p & (G.pagesize - 1))
1533132718Skan      {
1534132718Skan	/* How losing.  Discard this one and try another.  If we still
1535132718Skan	   can't get something useful, give up.  */
1536132718Skan
1537132718Skan	p = alloc_anon (NULL, G.pagesize, &main_zone);
1538169689Skan	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1539132718Skan      }
1540132718Skan
1541169689Skan    if (GGC_PAGE_SIZE == G.pagesize)
1542169689Skan      {
1543169689Skan	/* We have a good page, might as well hold onto it...  */
1544169689Skan	e = xcalloc (1, G.small_page_overhead);
1545169689Skan	e->common.page = p;
1546169689Skan	e->common.zone = &main_zone;
1547169689Skan	e->next = main_zone.free_pages;
1548169689Skan	set_page_table_entry (e->common.page, &e->common);
1549169689Skan	main_zone.free_pages = e;
1550169689Skan      }
1551169689Skan    else
1552169689Skan      {
1553169689Skan	munmap (p, G.pagesize);
1554169689Skan      }
1555132718Skan  }
1556132718Skan#endif
1557132718Skan}
1558132718Skan
1559132718Skan/* Start a new GGC zone.  */
1560132718Skan
1561169689Skanstatic void
1562169689Skannew_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
1563169689Skan{
1564169689Skan  new_zone->name = name;
1565169689Skan  new_zone->next_zone = G.zones->next_zone;
1566169689Skan  G.zones->next_zone = new_zone;
1567169689Skan}
1568169689Skan
1569132718Skanstruct alloc_zone *
1570132718Skannew_ggc_zone (const char * name)
1571132718Skan{
1572132718Skan  struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
1573169689Skan  new_ggc_zone_1 (new_zone, name);
1574132718Skan  return new_zone;
1575132718Skan}
1576132718Skan
1577132718Skan/* Destroy a GGC zone.  */
1578132718Skanvoid
1579132718Skandestroy_ggc_zone (struct alloc_zone * dead_zone)
1580132718Skan{
1581132718Skan  struct alloc_zone *z;
1582132718Skan
1583132718Skan  for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
1584169689Skan    /* Just find that zone.  */
1585169689Skan    continue;
1586132718Skan
1587132718Skan  /* We should have found the zone in the list.  Anything else is fatal.  */
1588169689Skan  gcc_assert (z);
1589132718Skan
1590132718Skan  /* z is dead, baby. z is dead.  */
1591169689Skan  z->dead = true;
1592132718Skan}
1593132718Skan
1594132718Skan/* Free all empty pages and objects within a page for a given zone  */
1595132718Skan
1596132718Skanstatic void
1597132718Skansweep_pages (struct alloc_zone *zone)
1598132718Skan{
1599169689Skan  struct large_page_entry **lpp, *lp, *lnext;
1600169689Skan  struct small_page_entry **spp, *sp, *snext;
1601169689Skan  char *last_free;
1602169689Skan  size_t allocated = 0;
1603132718Skan  bool nomarksinpage;
1604169689Skan
1605132718Skan  /* First, reset the free_chunks lists, since we are going to
1606132718Skan     re-free free chunks in hopes of coalescing them into large chunks.  */
1607132718Skan  memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
1608169689Skan  zone->high_free_bin = 0;
1609169689Skan  zone->cached_free = NULL;
1610169689Skan  zone->cached_free_size = 0;
1611169689Skan
1612169689Skan  /* Large pages are all or none affairs. Either they are completely
1613169689Skan     empty, or they are completely full.  */
1614169689Skan  lpp = &zone->large_pages;
1615169689Skan  for (lp = zone->large_pages; lp != NULL; lp = lnext)
1616132718Skan    {
1617169689Skan      gcc_assert (lp->common.large_p);
1618169689Skan
1619169689Skan      lnext = lp->next;
1620169689Skan
1621169689Skan#ifdef GATHER_STATISTICS
1622169689Skan      /* This page has now survived another collection.  */
1623169689Skan      lp->common.survived++;
1624169689Skan#endif
1625169689Skan
1626169689Skan      if (lp->mark_p)
1627132718Skan	{
1628169689Skan	  lp->mark_p = false;
1629169689Skan	  allocated += lp->bytes;
1630169689Skan	  lpp = &lp->next;
1631169689Skan	}
1632169689Skan      else
1633169689Skan	{
1634169689Skan	  *lpp = lnext;
1635132718Skan#ifdef ENABLE_GC_CHECKING
1636132718Skan	  /* Poison the page.  */
1637169689Skan	  memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
1638132718Skan#endif
1639169689Skan	  if (lp->prev)
1640169689Skan	    lp->prev->next = lp->next;
1641169689Skan	  if (lp->next)
1642169689Skan	    lp->next->prev = lp->prev;
1643169689Skan	  free_large_page (lp);
1644132718Skan	}
1645169689Skan    }
1646132718Skan
1647169689Skan  spp = &zone->pages;
1648169689Skan  for (sp = zone->pages; sp != NULL; sp = snext)
1649169689Skan    {
1650169689Skan      char *object, *last_object;
1651169689Skan      char *end;
1652169689Skan      alloc_type *alloc_word_p;
1653169689Skan      mark_type *mark_word_p;
1654169689Skan
1655169689Skan      gcc_assert (!sp->common.large_p);
1656169689Skan
1657169689Skan      snext = sp->next;
1658169689Skan
1659169689Skan#ifdef GATHER_STATISTICS
1660132718Skan      /* This page has now survived another collection.  */
1661169689Skan      sp->common.survived++;
1662169689Skan#endif
1663132718Skan
1664169689Skan      /* Step through all chunks, consolidate those that are free and
1665169689Skan	 insert them into the free lists.  Note that consolidation
1666169689Skan	 slows down collection slightly.  */
1667132718Skan
1668169689Skan      last_object = object = sp->common.page;
1669169689Skan      end = sp->common.page + SMALL_PAGE_SIZE;
1670132718Skan      last_free = NULL;
1671132718Skan      nomarksinpage = true;
1672169689Skan      mark_word_p = sp->mark_bits;
1673169689Skan      alloc_word_p = sp->alloc_bits;
1674169689Skan
1675169689Skan      gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
1676169689Skan
1677169689Skan      object = sp->common.page;
1678132718Skan      do
1679132718Skan	{
1680169689Skan	  unsigned int i, n;
1681169689Skan	  alloc_type alloc_word;
1682169689Skan	  mark_type mark_word;
1683169689Skan
1684169689Skan	  alloc_word = *alloc_word_p++;
1685169689Skan	  mark_word = *mark_word_p++;
1686169689Skan
1687169689Skan	  if (mark_word)
1688169689Skan	    nomarksinpage = false;
1689169689Skan
1690169689Skan	  /* There ought to be some way to do this without looping...  */
1691169689Skan	  i = 0;
1692169689Skan	  while ((n = alloc_ffs (alloc_word)) != 0)
1693132718Skan	    {
1694169689Skan	      /* Extend the current state for n - 1 bits.  We can't
1695169689Skan		 shift alloc_word by n, even though it isn't used in the
1696169689Skan		 loop, in case only the highest bit was set.  */
1697169689Skan	      alloc_word >>= n - 1;
1698169689Skan	      mark_word >>= n - 1;
1699169689Skan	      object += BYTES_PER_MARK_BIT * (n - 1);
1700169689Skan
1701169689Skan	      if (mark_word & 1)
1702132718Skan		{
1703169689Skan		  if (last_free)
1704169689Skan		    {
1705169689Skan		      VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1706169689Skan								object
1707169689Skan								- last_free));
1708169689Skan		      poison_region (last_free, object - last_free);
1709169689Skan		      free_chunk (last_free, object - last_free, zone);
1710169689Skan		      last_free = NULL;
1711169689Skan		    }
1712169689Skan		  else
1713169689Skan		    allocated += object - last_object;
1714169689Skan		  last_object = object;
1715132718Skan		}
1716132718Skan	      else
1717132718Skan		{
1718169689Skan		  if (last_free == NULL)
1719169689Skan		    {
1720169689Skan		      last_free = object;
1721169689Skan		      allocated += object - last_object;
1722169689Skan		    }
1723169689Skan		  else
1724169689Skan		    zone_clear_object_alloc_bit (sp, object);
1725132718Skan		}
1726169689Skan
1727169689Skan	      /* Shift to just after the alloc bit we handled.  */
1728169689Skan	      alloc_word >>= 1;
1729169689Skan	      mark_word >>= 1;
1730169689Skan	      object += BYTES_PER_MARK_BIT;
1731169689Skan
1732169689Skan	      i += n;
1733132718Skan	    }
1734132718Skan
1735169689Skan	  object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
1736132718Skan	}
1737169689Skan      while (object < end);
1738132718Skan
1739132718Skan      if (nomarksinpage)
1740132718Skan	{
1741169689Skan	  *spp = snext;
1742132718Skan#ifdef ENABLE_GC_CHECKING
1743169689Skan	  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
1744132718Skan	  /* Poison the page.  */
1745169689Skan	  memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
1746132718Skan#endif
1747169689Skan	  free_small_page (sp);
1748132718Skan	  continue;
1749132718Skan	}
1750132718Skan      else if (last_free)
1751132718Skan	{
1752169689Skan	  VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
1753169689Skan						    object - last_free));
1754169689Skan	  poison_region (last_free, object - last_free);
1755169689Skan	  free_chunk (last_free, object - last_free, zone);
1756132718Skan	}
1757169689Skan      else
1758169689Skan	allocated += object - last_object;
1759169689Skan
1760169689Skan      spp = &sp->next;
1761132718Skan    }
1762132718Skan
1763132718Skan  zone->allocated = allocated;
1764132718Skan}
1765132718Skan
1766132718Skan/* mark-and-sweep routine for collecting a single zone.  NEED_MARKING
1767132718Skan   is true if we need to mark before sweeping, false if some other
1768132718Skan   zone collection has already performed marking for us.  Returns true
1769132718Skan   if we collected, false otherwise.  */
1770132718Skan
1771132718Skanstatic bool
1772132718Skanggc_collect_1 (struct alloc_zone *zone, bool need_marking)
1773132718Skan{
1774169689Skan#if 0
1775169689Skan  /* */
1776169689Skan  {
1777169689Skan    int i;
1778169689Skan    for (i = 0; i < NUM_FREE_BINS + 1; i++)
1779169689Skan      {
1780169689Skan	struct alloc_chunk *chunk;
1781169689Skan	int n, tot;
1782132718Skan
1783169689Skan	n = 0;
1784169689Skan	tot = 0;
1785169689Skan	chunk = zone->free_chunks[i];
1786169689Skan	while (chunk)
1787169689Skan	  {
1788169689Skan	    n++;
1789169689Skan	    tot += chunk->size;
1790169689Skan	    chunk = chunk->next_free;
1791169689Skan	  }
1792169689Skan	fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
1793169689Skan		 i, n, tot);
1794169689Skan      }
1795169689Skan  }
1796169689Skan  /* */
1797169689Skan#endif
1798132718Skan
1799132718Skan  if (!quiet_flag)
1800132718Skan    fprintf (stderr, " {%s GC %luk -> ",
1801132718Skan	     zone->name, (unsigned long) zone->allocated / 1024);
1802132718Skan
1803132718Skan  /* Zero the total allocated bytes.  This will be recalculated in the
1804132718Skan     sweep phase.  */
1805132718Skan  zone->allocated = 0;
1806132718Skan
1807132718Skan  /* Release the pages we freed the last time we collected, but didn't
1808132718Skan     reuse in the interim.  */
1809132718Skan  release_pages (zone);
1810132718Skan
1811132718Skan  if (need_marking)
1812169689Skan    {
1813169689Skan      zone_allocate_marks ();
1814169689Skan      ggc_mark_roots ();
1815169689Skan#ifdef GATHER_STATISTICS
1816169689Skan      ggc_prune_overhead_list ();
1817169689Skan#endif
1818169689Skan    }
1819169689Skan
1820132718Skan  sweep_pages (zone);
1821132718Skan  zone->was_collected = true;
1822132718Skan  zone->allocated_last_gc = zone->allocated;
1823132718Skan
1824132718Skan  if (!quiet_flag)
1825132718Skan    fprintf (stderr, "%luk}", (unsigned long) zone->allocated / 1024);
1826132718Skan  return true;
1827132718Skan}
1828132718Skan
1829169689Skan#ifdef GATHER_STATISTICS
1830132718Skan/* Calculate the average page survival rate in terms of number of
1831132718Skan   collections.  */
1832132718Skan
1833132718Skanstatic float
1834132718Skancalculate_average_page_survival (struct alloc_zone *zone)
1835132718Skan{
1836132718Skan  float count = 0.0;
1837132718Skan  float survival = 0.0;
1838169689Skan  struct small_page_entry *p;
1839169689Skan  struct large_page_entry *lp;
1840132718Skan  for (p = zone->pages; p; p = p->next)
1841132718Skan    {
1842132718Skan      count += 1.0;
1843169689Skan      survival += p->common.survived;
1844132718Skan    }
1845169689Skan  for (lp = zone->large_pages; lp; lp = lp->next)
1846169689Skan    {
1847169689Skan      count += 1.0;
1848169689Skan      survival += lp->common.survived;
1849169689Skan    }
1850132718Skan  return survival/count;
1851132718Skan}
1852169689Skan#endif
1853132718Skan
1854169689Skan/* Top level collection routine.  */
1855132718Skan
1856169689Skanvoid
1857169689Skanggc_collect (void)
1858132718Skan{
1859132718Skan  struct alloc_zone *zone;
1860169689Skan  bool marked = false;
1861132718Skan
1862169689Skan  timevar_push (TV_GC);
1863169689Skan
1864169689Skan  if (!ggc_force_collect)
1865132718Skan    {
1866169689Skan      float allocated_last_gc = 0, allocated = 0, min_expand;
1867169689Skan
1868169689Skan      for (zone = G.zones; zone; zone = zone->next_zone)
1869132718Skan	{
1870169689Skan	  allocated_last_gc += zone->allocated_last_gc;
1871169689Skan	  allocated += zone->allocated;
1872132718Skan	}
1873169689Skan
1874169689Skan      allocated_last_gc =
1875169689Skan	MAX (allocated_last_gc,
1876169689Skan	     (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
1877169689Skan      min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
1878169689Skan
1879169689Skan      if (allocated < allocated_last_gc + min_expand)
1880169689Skan	{
1881169689Skan	  timevar_pop (TV_GC);
1882169689Skan	  return;
1883169689Skan	}
1884132718Skan    }
1885132718Skan
1886132718Skan  /* Start by possibly collecting the main zone.  */
1887132718Skan  main_zone.was_collected = false;
1888132718Skan  marked |= ggc_collect_1 (&main_zone, true);
1889132718Skan
1890132718Skan  /* In order to keep the number of collections down, we don't
1891132718Skan     collect other zones unless we are collecting the main zone.  This
1892132718Skan     gives us roughly the same number of collections as we used to
1893132718Skan     have with the old gc.  The number of collection is important
1894132718Skan     because our main slowdown (according to profiling) is now in
1895132718Skan     marking.  So if we mark twice as often as we used to, we'll be
1896132718Skan     twice as slow.  Hopefully we'll avoid this cost when we mark
1897132718Skan     zone-at-a-time.  */
1898169689Skan  /* NOTE drow/2004-07-28: We now always collect the main zone, but
1899169689Skan     keep this code in case the heuristics are further refined.  */
1900132718Skan
1901132718Skan  if (main_zone.was_collected)
1902132718Skan    {
1903132718Skan      struct alloc_zone *zone;
1904132718Skan
1905132718Skan      for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
1906132718Skan	{
1907132718Skan	  zone->was_collected = false;
1908132718Skan	  marked |= ggc_collect_1 (zone, !marked);
1909132718Skan	}
1910132718Skan    }
1911132718Skan
1912169689Skan#ifdef GATHER_STATISTICS
1913132718Skan  /* Print page survival stats, if someone wants them.  */
1914132718Skan  if (GGC_DEBUG_LEVEL >= 2)
1915132718Skan    {
1916132718Skan      for (zone = G.zones; zone; zone = zone->next_zone)
1917132718Skan	{
1918132718Skan	  if (zone->was_collected)
1919132718Skan	    {
1920169689Skan	      float f = calculate_average_page_survival (zone);
1921132718Skan	      printf ("Average page survival in zone `%s' is %f\n",
1922132718Skan		      zone->name, f);
1923132718Skan	    }
1924132718Skan	}
1925132718Skan    }
1926169689Skan#endif
1927132718Skan
1928132718Skan  if (marked)
1929169689Skan    zone_free_marks ();
1930132718Skan
1931132718Skan  /* Free dead zones.  */
1932132718Skan  for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
1933132718Skan    {
1934132718Skan      if (zone->next_zone->dead)
1935132718Skan	{
1936132718Skan	  struct alloc_zone *dead_zone = zone->next_zone;
1937132718Skan
1938132718Skan	  printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
1939132718Skan
1940132718Skan	  /* The zone must be empty.  */
1941169689Skan	  gcc_assert (!dead_zone->allocated);
1942132718Skan
1943132718Skan	  /* Unchain the dead zone, release all its pages and free it.  */
1944132718Skan	  zone->next_zone = zone->next_zone->next_zone;
1945132718Skan	  release_pages (dead_zone);
1946132718Skan	  free (dead_zone);
1947132718Skan	}
1948132718Skan    }
1949132718Skan
1950132718Skan  timevar_pop (TV_GC);
1951132718Skan}
1952132718Skan
1953132718Skan/* Print allocation statistics.  */
1954169689Skan#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
1955169689Skan		  ? (x) \
1956169689Skan		  : ((x) < 1024*1024*10 \
1957169689Skan		     ? (x) / 1024 \
1958169689Skan		     : (x) / (1024*1024))))
1959169689Skan#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
1960132718Skan
1961132718Skanvoid
1962132718Skanggc_print_statistics (void)
1963132718Skan{
1964169689Skan  struct alloc_zone *zone;
1965169689Skan  struct ggc_statistics stats;
1966169689Skan  size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
1967169689Skan  size_t pte_overhead, i;
1968169689Skan
1969169689Skan  /* Clear the statistics.  */
1970169689Skan  memset (&stats, 0, sizeof (stats));
1971169689Skan
1972169689Skan  /* Make sure collection will really occur.  */
1973169689Skan  ggc_force_collect = true;
1974169689Skan
1975169689Skan  /* Collect and print the statistics common across collectors.  */
1976169689Skan  ggc_print_common_statistics (stderr, &stats);
1977169689Skan
1978169689Skan  ggc_force_collect = false;
1979169689Skan
1980169689Skan  /* Release free pages so that we will not count the bytes allocated
1981169689Skan     there as part of the total allocated memory.  */
1982169689Skan  for (zone = G.zones; zone; zone = zone->next_zone)
1983169689Skan    release_pages (zone);
1984169689Skan
1985169689Skan  /* Collect some information about the various sizes of
1986169689Skan     allocation.  */
1987169689Skan  fprintf (stderr,
1988169689Skan           "Memory still allocated at the end of the compilation process\n");
1989169689Skan
1990169689Skan  fprintf (stderr, "%20s %10s  %10s  %10s\n",
1991169689Skan	   "Zone", "Allocated", "Used", "Overhead");
1992169689Skan  for (zone = G.zones; zone; zone = zone->next_zone)
1993169689Skan    {
1994169689Skan      struct large_page_entry *large_page;
1995169689Skan      size_t overhead, allocated, in_use;
1996169689Skan
1997169689Skan      /* Skip empty zones.  */
1998169689Skan      if (!zone->pages && !zone->large_pages)
1999169689Skan	continue;
2000169689Skan
2001169689Skan      allocated = in_use = 0;
2002169689Skan
2003169689Skan      overhead = sizeof (struct alloc_zone);
2004169689Skan
2005169689Skan      for (large_page = zone->large_pages; large_page != NULL;
2006169689Skan	   large_page = large_page->next)
2007169689Skan	{
2008169689Skan	  allocated += large_page->bytes;
2009169689Skan	  in_use += large_page->bytes;
2010169689Skan	  overhead += sizeof (struct large_page_entry);
2011169689Skan	}
2012169689Skan
2013169689Skan      /* There's no easy way to walk through the small pages finding
2014169689Skan	 used and unused objects.  Instead, add all the pages, and
2015169689Skan	 subtract out the free list.  */
2016169689Skan
2017169689Skan      allocated += GGC_PAGE_SIZE * zone->n_small_pages;
2018169689Skan      in_use += GGC_PAGE_SIZE * zone->n_small_pages;
2019169689Skan      overhead += G.small_page_overhead * zone->n_small_pages;
2020169689Skan
2021169689Skan      for (i = 0; i <= NUM_FREE_BINS; i++)
2022169689Skan	{
2023169689Skan	  struct alloc_chunk *chunk = zone->free_chunks[i];
2024169689Skan	  while (chunk)
2025169689Skan	    {
2026169689Skan	      in_use -= ggc_get_size (chunk);
2027169689Skan	      chunk = chunk->next_free;
2028169689Skan	    }
2029169689Skan	}
2030169689Skan
2031169689Skan      fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
2032169689Skan	       zone->name,
2033169689Skan	       SCALE (allocated), LABEL (allocated),
2034169689Skan	       SCALE (in_use), LABEL (in_use),
2035169689Skan	       SCALE (overhead), LABEL (overhead));
2036169689Skan
2037169689Skan      gcc_assert (in_use == zone->allocated);
2038169689Skan
2039169689Skan      total_overhead += overhead;
2040169689Skan      total_allocated += zone->allocated;
2041169689Skan      total_bytes_mapped += zone->bytes_mapped;
2042169689Skan    }
2043169689Skan
2044169689Skan  /* Count the size of the page table as best we can.  */
2045169689Skan#if HOST_BITS_PER_PTR <= 32
2046169689Skan  pte_overhead = sizeof (G.lookup);
2047169689Skan  for (i = 0; i < PAGE_L1_SIZE; i++)
2048169689Skan    if (G.lookup[i])
2049169689Skan      pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2050169689Skan#else
2051169689Skan  {
2052169689Skan    page_table table = G.lookup;
2053169689Skan    pte_overhead = 0;
2054169689Skan    while (table)
2055169689Skan      {
2056169689Skan	pte_overhead += sizeof (*table);
2057169689Skan	for (i = 0; i < PAGE_L1_SIZE; i++)
2058169689Skan	  if (table->table[i])
2059169689Skan	    pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
2060169689Skan	table = table->next;
2061169689Skan      }
2062169689Skan  }
2063169689Skan#endif
2064169689Skan  fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
2065169689Skan	   "", "", SCALE (pte_overhead), LABEL (pte_overhead));
2066169689Skan  total_overhead += pte_overhead;
2067169689Skan
2068169689Skan  fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
2069169689Skan	   SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
2070169689Skan	   SCALE (total_allocated), LABEL(total_allocated),
2071169689Skan	   SCALE (total_overhead), LABEL (total_overhead));
2072169689Skan
2073169689Skan#ifdef GATHER_STATISTICS
2074169689Skan  {
2075169689Skan    unsigned long long all_overhead = 0, all_allocated = 0;
2076169689Skan    unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
2077169689Skan    unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
2078169689Skan    unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
2079169689Skan
2080169689Skan    fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2081169689Skan
2082169689Skan    for (zone = G.zones; zone; zone = zone->next_zone)
2083169689Skan      {
2084169689Skan	all_overhead += zone->stats.total_overhead;
2085169689Skan	all_allocated += zone->stats.total_allocated;
2086169689Skan
2087169689Skan	all_allocated_under32 += zone->stats.total_allocated_under32;
2088169689Skan	all_overhead_under32 += zone->stats.total_overhead_under32;
2089169689Skan
2090169689Skan	all_allocated_under64 += zone->stats.total_allocated_under64;
2091169689Skan	all_overhead_under64 += zone->stats.total_overhead_under64;
2092169689Skan
2093169689Skan	all_allocated_under128 += zone->stats.total_allocated_under128;
2094169689Skan	all_overhead_under128 += zone->stats.total_overhead_under128;
2095169689Skan
2096169689Skan	fprintf (stderr, "%20s:                  %10lld\n",
2097169689Skan		 zone->name, zone->stats.total_allocated);
2098169689Skan      }
2099169689Skan
2100169689Skan    fprintf (stderr, "\n");
2101169689Skan
2102169689Skan    fprintf (stderr, "Total Overhead:                        %10lld\n",
2103169689Skan             all_overhead);
2104169689Skan    fprintf (stderr, "Total Allocated:                       %10lld\n",
2105169689Skan             all_allocated);
2106169689Skan
2107169689Skan    fprintf (stderr, "Total Overhead  under  32B:            %10lld\n",
2108169689Skan             all_overhead_under32);
2109169689Skan    fprintf (stderr, "Total Allocated under  32B:            %10lld\n",
2110169689Skan             all_allocated_under32);
2111169689Skan    fprintf (stderr, "Total Overhead  under  64B:            %10lld\n",
2112169689Skan             all_overhead_under64);
2113169689Skan    fprintf (stderr, "Total Allocated under  64B:            %10lld\n",
2114169689Skan             all_allocated_under64);
2115169689Skan    fprintf (stderr, "Total Overhead  under 128B:            %10lld\n",
2116169689Skan             all_overhead_under128);
2117169689Skan    fprintf (stderr, "Total Allocated under 128B:            %10lld\n",
2118169689Skan             all_allocated_under128);
2119169689Skan  }
2120169689Skan#endif
2121132718Skan}
2122132718Skan
2123169689Skan/* Precompiled header support.  */
2124169689Skan
2125169689Skan/* For precompiled headers, we sort objects based on their type.  We
2126169689Skan   also sort various objects into their own buckets; currently this
2127169689Skan   covers strings and IDENTIFIER_NODE trees.  The choices of how
2128169689Skan   to sort buckets have not yet been tuned.  */
2129169689Skan
2130169689Skan#define NUM_PCH_BUCKETS		(gt_types_enum_last + 3)
2131169689Skan
2132169689Skan#define OTHER_BUCKET		(gt_types_enum_last + 0)
2133169689Skan#define IDENTIFIER_BUCKET	(gt_types_enum_last + 1)
2134169689Skan#define STRING_BUCKET		(gt_types_enum_last + 2)
2135169689Skan
2136169689Skanstruct ggc_pch_ondisk
2137169689Skan{
2138169689Skan  size_t total;
2139169689Skan  size_t type_totals[NUM_PCH_BUCKETS];
2140169689Skan};
2141169689Skan
2142132718Skanstruct ggc_pch_data
2143132718Skan{
2144169689Skan  struct ggc_pch_ondisk d;
2145132718Skan  size_t base;
2146169689Skan  size_t orig_base;
2147169689Skan  size_t alloc_size;
2148169689Skan  alloc_type *alloc_bits;
2149169689Skan  size_t type_bases[NUM_PCH_BUCKETS];
2150169689Skan  size_t start_offset;
2151132718Skan};
2152132718Skan
2153169689Skan/* Initialize the PCH data structure.  */
2154132718Skan
2155132718Skanstruct ggc_pch_data *
2156132718Skaninit_ggc_pch (void)
2157132718Skan{
2158132718Skan  return xcalloc (sizeof (struct ggc_pch_data), 1);
2159132718Skan}
2160132718Skan
2161169689Skan/* Return which of the page-aligned buckets the object at X, with type
2162169689Skan   TYPE, should be sorted into in the PCH.  Strings will have
2163169689Skan   IS_STRING set and TYPE will be gt_types_enum_last.  Other objects
2164169689Skan   of unknown type will also have TYPE equal to gt_types_enum_last.  */
2165169689Skan
2166169689Skanstatic int
2167169689Skanpch_bucket (void *x, enum gt_types_enum type,
2168169689Skan	    bool is_string)
2169169689Skan{
2170169689Skan  /* Sort identifiers into their own bucket, to improve locality
2171169689Skan     when searching the identifier hash table.  */
2172169689Skan  if (type == gt_ggc_e_14lang_tree_node
2173169689Skan      && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
2174169689Skan    return IDENTIFIER_BUCKET;
2175169689Skan  else if (type == gt_types_enum_last)
2176169689Skan    {
2177169689Skan      if (is_string)
2178169689Skan	return STRING_BUCKET;
2179169689Skan      return OTHER_BUCKET;
2180169689Skan    }
2181169689Skan  return type;
2182169689Skan}
2183169689Skan
2184132718Skan/* Add the size of object X to the size of the PCH data.  */
2185132718Skan
2186132718Skanvoid
2187132718Skanggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2188169689Skan		      size_t size, bool is_string, enum gt_types_enum type)
2189132718Skan{
2190169689Skan  /* NOTE: Right now we don't need to align up the size of any objects.
2191169689Skan     Strings can be unaligned, and everything else is allocated to a
2192169689Skan     MAX_ALIGNMENT boundary already.  */
2193169689Skan
2194169689Skan  d->d.type_totals[pch_bucket (x, type, is_string)] += size;
2195132718Skan}
2196132718Skan
2197132718Skan/* Return the total size of the PCH data.  */
2198132718Skan
2199132718Skansize_t
2200132718Skanggc_pch_total_size (struct ggc_pch_data *d)
2201132718Skan{
2202169689Skan  enum gt_types_enum i;
2203169689Skan  size_t alloc_size, total_size;
2204169689Skan
2205169689Skan  total_size = 0;
2206169689Skan  for (i = 0; i < NUM_PCH_BUCKETS; i++)
2207169689Skan    {
2208169689Skan      d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
2209169689Skan      total_size += d->d.type_totals[i];
2210169689Skan    }
2211169689Skan  d->d.total = total_size;
2212169689Skan
2213169689Skan  /* Include the size of the allocation bitmap.  */
2214169689Skan  alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
2215169689Skan  alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2216169689Skan  d->alloc_size = alloc_size;
2217169689Skan
2218169689Skan  return d->d.total + alloc_size;
2219132718Skan}
2220132718Skan
2221132718Skan/* Set the base address for the objects in the PCH file.  */
2222132718Skan
2223132718Skanvoid
2224169689Skanggc_pch_this_base (struct ggc_pch_data *d, void *base_)
2225132718Skan{
2226169689Skan  int i;
2227169689Skan  size_t base = (size_t) base_;
2228169689Skan
2229169689Skan  d->base = d->orig_base = base;
2230169689Skan  for (i = 0; i < NUM_PCH_BUCKETS; i++)
2231169689Skan    {
2232169689Skan      d->type_bases[i] = base;
2233169689Skan      base += d->d.type_totals[i];
2234169689Skan    }
2235169689Skan
2236169689Skan  if (d->alloc_bits == NULL)
2237169689Skan    d->alloc_bits = xcalloc (1, d->alloc_size);
2238132718Skan}
2239132718Skan
2240132718Skan/* Allocate a place for object X of size SIZE in the PCH file.  */
2241132718Skan
2242132718Skanchar *
2243132718Skanggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
2244169689Skan		      size_t size, bool is_string,
2245169689Skan		      enum gt_types_enum type)
2246132718Skan{
2247169689Skan  size_t alloc_word, alloc_bit;
2248132718Skan  char *result;
2249169689Skan  int bucket = pch_bucket (x, type, is_string);
2250132718Skan
2251169689Skan  /* Record the start of the object in the allocation bitmap.  We
2252169689Skan     can't assert that the allocation bit is previously clear, because
2253169689Skan     strings may violate the invariant that they are at least
2254169689Skan     BYTES_PER_ALLOC_BIT long.  This is harmless - ggc_get_size
2255169689Skan     should not be called for strings.  */
2256169689Skan  alloc_word = ((d->type_bases[bucket] - d->orig_base)
2257169689Skan		/ (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
2258169689Skan  alloc_bit = ((d->type_bases[bucket] - d->orig_base)
2259169689Skan	       / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
2260169689Skan  d->alloc_bits[alloc_word] |= 1L << alloc_bit;
2261169689Skan
2262169689Skan  /* Place the object at the current pointer for this bucket.  */
2263169689Skan  result = (char *) d->type_bases[bucket];
2264169689Skan  d->type_bases[bucket] += size;
2265169689Skan  return result;
2266132718Skan}
2267132718Skan
2268132718Skan/* Prepare to write out the PCH data to file F.  */
2269132718Skan
2270132718Skanvoid
2271169689Skanggc_pch_prepare_write (struct ggc_pch_data *d,
2272169689Skan		       FILE *f)
2273132718Skan{
2274169689Skan  /* We seek around a lot while writing.  Record where the end
2275169689Skan     of the padding in the PCH file is, so that we can
2276169689Skan     locate each object's offset.  */
2277169689Skan  d->start_offset = ftell (f);
2278132718Skan}
2279132718Skan
2280132718Skan/* Write out object X of SIZE to file F.  */
2281132718Skan
2282132718Skanvoid
2283169689Skanggc_pch_write_object (struct ggc_pch_data *d,
2284169689Skan		      FILE *f, void *x, void *newx,
2285169689Skan		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2286132718Skan{
2287169689Skan  if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
2288169689Skan    fatal_error ("can't seek PCH file: %m");
2289169689Skan
2290169689Skan  if (fwrite (x, size, 1, f) != 1)
2291132718Skan    fatal_error ("can't write PCH file: %m");
2292132718Skan}
2293132718Skan
2294132718Skanvoid
2295132718Skanggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2296132718Skan{
2297169689Skan  /* Write out the allocation bitmap.  */
2298169689Skan  if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
2299169689Skan    fatal_error ("can't seek PCH file: %m");
2300169689Skan
2301169689Skan  if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
2302169689Skan    fatal_error ("can't write PCH fle: %m");
2303169689Skan
2304169689Skan  /* Done with the PCH, so write out our footer.  */
2305132718Skan  if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2306132718Skan    fatal_error ("can't write PCH file: %m");
2307169689Skan
2308169689Skan  free (d->alloc_bits);
2309132718Skan  free (d);
2310132718Skan}
2311169689Skan
2312169689Skan/* The PCH file from F has been mapped at ADDR.  Read in any
2313169689Skan   additional data from the file and set up the GC state.  */
2314169689Skan
2315132718Skanvoid
2316132718Skanggc_pch_read (FILE *f, void *addr)
2317132718Skan{
2318132718Skan  struct ggc_pch_ondisk d;
2319169689Skan  size_t alloc_size;
2320169689Skan  struct alloc_zone *zone;
2321169689Skan  struct page_entry *pch_page;
2322169689Skan  char *p;
2323169689Skan
2324132718Skan  if (fread (&d, sizeof (d), 1, f) != 1)
2325132718Skan    fatal_error ("can't read PCH file: %m");
2326169689Skan
2327169689Skan  alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
2328169689Skan  alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
2329169689Skan
2330169689Skan  pch_zone.bytes = d.total;
2331169689Skan  pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
2332169689Skan  pch_zone.page = (char *) addr;
2333169689Skan  pch_zone.end = (char *) pch_zone.alloc_bits;
2334169689Skan
2335169689Skan  /* We've just read in a PCH file.  So, every object that used to be
2336169689Skan     allocated is now free.  */
2337169689Skan  for (zone = G.zones; zone; zone = zone->next_zone)
2338169689Skan    {
2339169689Skan      struct small_page_entry *page, *next_page;
2340169689Skan      struct large_page_entry *large_page, *next_large_page;
2341169689Skan
2342169689Skan      zone->allocated = 0;
2343169689Skan
2344169689Skan      /* Clear the zone's free chunk list.  */
2345169689Skan      memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
2346169689Skan      zone->high_free_bin = 0;
2347169689Skan      zone->cached_free = NULL;
2348169689Skan      zone->cached_free_size = 0;
2349169689Skan
2350169689Skan      /* Move all the small pages onto the free list.  */
2351169689Skan      for (page = zone->pages; page != NULL; page = next_page)
2352169689Skan	{
2353169689Skan	  next_page = page->next;
2354169689Skan	  memset (page->alloc_bits, 0,
2355169689Skan		  G.small_page_overhead - PAGE_OVERHEAD);
2356169689Skan	  free_small_page (page);
2357169689Skan	}
2358169689Skan
2359169689Skan      /* Discard all the large pages.  */
2360169689Skan      for (large_page = zone->large_pages; large_page != NULL;
2361169689Skan	   large_page = next_large_page)
2362169689Skan	{
2363169689Skan	  next_large_page = large_page->next;
2364169689Skan	  free_large_page (large_page);
2365169689Skan	}
2366169689Skan
2367169689Skan      zone->pages = NULL;
2368169689Skan      zone->large_pages = NULL;
2369169689Skan    }
2370169689Skan
2371169689Skan  /* Allocate the dummy page entry for the PCH, and set all pages
2372169689Skan     mapped into the PCH to reference it.  */
2373169689Skan  pch_page = xcalloc (1, sizeof (struct page_entry));
2374169689Skan  pch_page->page = pch_zone.page;
2375169689Skan  pch_page->pch_p = true;
2376169689Skan
2377169689Skan  for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
2378169689Skan    set_page_table_entry (p, pch_page);
2379132718Skan}
2380