17332Sjkh/* "Bag-of-pages" garbage collector for the GNU compiler. 27332Sjkh Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005 37332Sjkh Free Software Foundation, Inc. 47332Sjkh 57332SjkhThis file is part of GCC. 67332Sjkh 77332SjkhGCC is free software; you can redistribute it and/or modify it under 87332Sjkhthe terms of the GNU General Public License as published by the Free 97332SjkhSoftware Foundation; either version 2, or (at your option) any later 107332Sjkhversion. 117332Sjkh 127332SjkhGCC is distributed in the hope that it will be useful, but WITHOUT ANY 137332SjkhWARRANTY; without even the implied warranty of MERCHANTABILITY or 147332SjkhFITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 157332Sjkhfor more details. 167332Sjkh 177332SjkhYou should have received a copy of the GNU General Public License 187332Sjkhalong with GCC; see the file COPYING. If not, write to the Free 197332SjkhSoftware Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 207332Sjkh02110-1301, USA. */ 217332Sjkh 227332Sjkh#include "config.h" 237332Sjkh#include "system.h" 247332Sjkh#include "coretypes.h" 257332Sjkh#include "tm.h" 267332Sjkh#include "tree.h" 277332Sjkh#include "rtl.h" 2897748Sschweikh#include "tm_p.h" 297332Sjkh#include "toplev.h" 307332Sjkh#include "flags.h" 317332Sjkh#include "ggc.h" 327332Sjkh#include "timevar.h" 337332Sjkh#include "params.h" 347332Sjkh#include "tree-flow.h" 357332Sjkh#ifdef ENABLE_VALGRIND_CHECKING 367332Sjkh# ifdef HAVE_VALGRIND_MEMCHECK_H 377332Sjkh# include <valgrind/memcheck.h> 387332Sjkh# elif defined HAVE_MEMCHECK_H 397332Sjkh# include <memcheck.h> 407332Sjkh# else 417332Sjkh# include <valgrind.h> 427332Sjkh# endif 43119419Sobrien#else 44119419Sobrien/* Avoid #ifdef:s when we can help it. */ 457332Sjkh#define VALGRIND_DISCARD(x) 46119419Sobrien#endif 47106451Smdodd 487332Sjkh/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a 497332Sjkh file open. Prefer either to valloc. */ 507332Sjkh#ifdef HAVE_MMAP_ANON 5161011Speter# undef HAVE_MMAP_DEV_ZERO 527332Sjkh 53106450Smdodd# include <sys/mman.h> 5460041Sphk# ifndef MAP_FAILED 557332Sjkh# define MAP_FAILED -1 56106450Smdodd# endif 5761011Speter# if !defined (MAP_ANONYMOUS) && defined (MAP_ANON) 587369Sbde# define MAP_ANONYMOUS MAP_ANON 597332Sjkh# endif 607332Sjkh# define USING_MMAP 61106449Smdodd 62106449Smdodd#endif 63106449Smdodd 647332Sjkh#ifdef HAVE_MMAP_DEV_ZERO 65106449Smdodd 6612502Sjulian# include <sys/mman.h> 67106449Smdodd# ifndef MAP_FAILED 68106449Smdodd# define MAP_FAILED -1 69106449Smdodd# endif 707332Sjkh# define USING_MMAP 717332Sjkh 727332Sjkh#endif 737332Sjkh 747332Sjkh#ifndef USING_MMAP 757332Sjkh#define USING_MALLOC_PAGE_GROUPS 767332Sjkh#endif 777332Sjkh 787332Sjkh/* Strategy: 797332Sjkh 807332Sjkh This garbage-collecting allocator allocates objects on one of a set 817332Sjkh of pages. Each page can allocate objects of a single size only; 827332Sjkh available sizes are powers of two starting at four bytes. The size 837332Sjkh of an allocation request is rounded up to the next power of two 847332Sjkh (`order'), and satisfied from the appropriate page. 857332Sjkh 867332Sjkh Each page is recorded in a page-entry, which also maintains an 877332Sjkh in-use bitmap of object positions on the page. This allows the 887332Sjkh allocation state of a particular object to be flipped without 897332Sjkh touching the page itself. 907332Sjkh 917332Sjkh Each page-entry also has a context depth, which is used to track 927332Sjkh pushing and popping of allocation contexts. Only objects allocated 9311872Sphk in the current (highest-numbered) context may be collected. 94106449Smdodd 95106449Smdodd Page entries are arranged in an array of singly-linked lists. The 96106449Smdodd array is indexed by the allocation size, in bits, of the pages on 97106449Smdodd it; i.e. all pages on a list allocate objects of the same size. 98106449Smdodd Pages are ordered on the list such that all non-full pages precede 997332Sjkh all full pages, with non-full pages arranged in order of decreasing 100106449Smdodd context depth. 1017332Sjkh 1027332Sjkh Empty pages (of all orders) are kept on a single page cache list, 103106449Smdodd and are considered first when new pages are required; they are 104106449Smdodd deallocated at the start of the next collection if they haven't 105106449Smdodd been recycled by then. */ 106106449Smdodd 107106449Smdodd/* Define GGC_DEBUG_LEVEL to print debugging information. 1087332Sjkh 0: No debugging output. 1097332Sjkh 1: GC statistics only. 1107332Sjkh 2: Page-entry allocations/deallocations as well. 1117332Sjkh 3: Object allocations as well. 1127332Sjkh 4: Object marks as well. */ 113106449Smdodd#define GGC_DEBUG_LEVEL (0) 114106449Smdodd 115106449Smdodd#ifndef HOST_BITS_PER_PTR 116106449Smdodd#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG 117106449Smdodd#endif 118106449Smdodd 119106449Smdodd 120106449Smdodd/* A two-level tree is used to look up the page-entry for a given 1217332Sjkh pointer. Two chunks of the pointer's bits are extracted to index 122106449Smdodd the first and second levels of the tree, as follows: 12325056Sbde 124106449Smdodd HOST_PAGE_SIZE_BITS 1257332Sjkh 32 | | 126106449Smdodd msb +----------------+----+------+------+ lsb 127106449Smdodd | | | 128106449Smdodd PAGE_L1_BITS | 129106449Smdodd | | 130106449Smdodd PAGE_L2_BITS 131106449Smdodd 132106449Smdodd The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry 133141031Ssobomax pages are aligned on system page boundaries. The next most 134106449Smdodd significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first 1357332Sjkh index values in the lookup table, respectively. 1367332Sjkh 137106449Smdodd For 32-bit architectures and the settings below, there are no 138106449Smdodd leftover bits. For architectures with wider pointers, the lookup 139106449Smdodd tree points to a list of pages, which must be scanned to find the 1407332Sjkh correct one. */ 1417332Sjkh 14212675Sjulian#define PAGE_L1_BITS (8) 14312675Sjulian#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize) 14412675Sjulian#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS) 14512675Sjulian#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS) 14612675Sjulian 14774810Sphk#define LOOKUP_L1(p) \ 14837389Sjulian (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1)) 149126080Sphk 150111815Sphk#define LOOKUP_L2(p) \ 151111815Sphk (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1)) 152111815Sphk 153111815Sphk/* The number of objects per allocation page, for objects on a page of 154111815Sphk the indicated ORDER. */ 155111815Sphk#define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER] 156126080Sphk 15747625Sphk/* The number of objects in P. */ 15812675Sjulian#define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order)) 159106449Smdodd 160106449Smdodd/* The size of an object on a page of the indicated ORDER. */ 1617332Sjkh#define OBJECT_SIZE(ORDER) object_size_table[ORDER] 162106449Smdodd 1638876Srgrimes/* For speed, we avoid doing a general integer divide to locate the 164106449Smdodd offset in the allocation bitmap, by precalculating numbers M, S 1657332Sjkh such that (O * M) >> S == O / Z (modulo 2^32), for any offset O 166106449Smdodd within the page which is evenly divisible by the object size Z. */ 1677332Sjkh#define DIV_MULT(ORDER) inverse_table[ORDER].mult 168106490Smdodd#define DIV_SHIFT(ORDER) inverse_table[ORDER].shift 169106490Smdodd#define OFFSET_TO_BIT(OFFSET, ORDER) \ 170106490Smdodd (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER)) 1717332Sjkh 172106449Smdodd/* The number of extra orders, not corresponding to power-of-two sized 173106449Smdodd objects. */ 174106449Smdodd 175106449Smdodd#define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table) 176106451Smdodd 1777332Sjkh#define RTL_SIZE(NSLOTS) \ 1787332Sjkh (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion)) 17912675Sjulian 180130585Sphk#define TREE_EXP_SIZE(OPS) \ 1817332Sjkh (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree)) 182106449Smdodd 1837332Sjkh/* The Ith entry is the maximum size of an object to be stored in the 1848876Srgrimes Ith extra order. Adding a new entry to this array is the *only* 185106449Smdodd thing you need to do to add a new special allocation size. */ 1867332Sjkh 1877332Sjkhstatic const size_t extra_order_size_table[] = { 188106490Smdodd sizeof (struct stmt_ann_d), 189106451Smdodd sizeof (struct var_ann_d), 1907332Sjkh sizeof (struct tree_decl_non_common), 1917332Sjkh sizeof (struct tree_field_decl), 192106490Smdodd sizeof (struct tree_parm_decl), 193106451Smdodd sizeof (struct tree_var_decl), 1947332Sjkh sizeof (struct tree_list), 195106449Smdodd sizeof (struct tree_ssa_name), 1967332Sjkh sizeof (struct function), 197106449Smdodd sizeof (struct basic_block_def), 198106449Smdodd sizeof (bitmap_element), 199106451Smdodd /* PHI nodes with one to three arguments are already covered by the 2007332Sjkh above sizes. */ 201106490Smdodd sizeof (struct tree_phi_node) + sizeof (struct phi_arg_d) * 3, 2027332Sjkh TREE_EXP_SIZE (2), 2037332Sjkh RTL_SIZE (2), /* MEM, PLUS, etc. */ 204106449Smdodd RTL_SIZE (9), /* INSN */ 2057332Sjkh}; 206106449Smdodd 2077332Sjkh/* The total number of orders. */ 208106449Smdodd 209106451Smdodd#define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS) 2107332Sjkh 2117332Sjkh/* We use this structure to determine the alignment required for 2127332Sjkh allocations. For power-of-two sized allocations, that's not a 213106449Smdodd problem, but it does matter for odd-sized allocations. */ 214106451Smdodd 2157332Sjkhstruct max_alignment { 2167332Sjkh char c; 2177332Sjkh union { 218106490Smdodd HOST_WIDEST_INT i; 219106490Smdodd long double d; 2207332Sjkh } u; 221106451Smdodd}; 2227332Sjkh 2237332Sjkh/* The biggest alignment required. */ 22412675Sjulian 225130585Sphk#define MAX_ALIGNMENT (offsetof (struct max_alignment, u)) 2267332Sjkh 227106449Smdodd/* Compute the smallest nonnegative number which when added to X gives 2288876Srgrimes a multiple of F. */ 229106449Smdodd 2307332Sjkh#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f)) 231106490Smdodd 232106451Smdodd/* Compute the smallest multiple of F that is >= X. */ 2337332Sjkh 234106490Smdodd#define ROUND_UP(x, f) (CEIL (x, f) * (f)) 235106449Smdodd 236106490Smdodd/* The Ith entry is the number of objects on a page or order I. */ 2377332Sjkh 2387332Sjkhstatic unsigned objects_per_page_table[NUM_ORDERS]; 2397332Sjkh 2407332Sjkh/* The Ith entry is the size of an object on a page of order I. */ 241106490Smdodd 2427332Sjkhstatic size_t object_size_table[NUM_ORDERS]; 243106451Smdodd 2447332Sjkh/* The Ith entry is a pair of numbers (mult, shift) such that 2457332Sjkh ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32, 24612675Sjulian for all k evenly divisible by OBJECT_SIZE(I). */ 24759249Sphk 2487332Sjkhstatic struct 2497332Sjkh{ 250106449Smdodd size_t mult; 2517332Sjkh unsigned int shift; 252106449Smdodd} 2537332Sjkhinverse_table[NUM_ORDERS]; 2547332Sjkh 255106490Smdodd/* A page_entry records the status of an allocation page. This 256106449Smdodd structure is dynamically sized to fit the bitmap in_use_p. */ 25759249Sphktypedef struct page_entry 2587332Sjkh{ 2597332Sjkh /* The next page-entry with objects of the same size, or NULL if 2607332Sjkh this is the last page-entry. */ 2617332Sjkh struct page_entry *next; 26259249Sphk 26359249Sphk /* The previous page-entry with objects of the same size, or NULL if 2647332Sjkh this is the first page-entry. The PREV pointer exists solely to 2657332Sjkh keep the cost of ggc_free manageable. */ 2668876Srgrimes struct page_entry *prev; 2677332Sjkh 26859249Sphk /* The number of bytes allocated. (This will always be a multiple 2697332Sjkh of the host system page size.) */ 2708876Srgrimes size_t bytes; 271106490Smdodd 27259249Sphk /* The address at which the memory is allocated. */ 2737332Sjkh char *page; 2747332Sjkh 2757332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 27659249Sphk /* Back pointer to the page group this page came from. */ 2778876Srgrimes struct page_group *group; 2787332Sjkh#endif 2797332Sjkh 280112946Sphk /* This is the index in the by_depth varray where this page table 2817332Sjkh can be found. */ 2828876Srgrimes unsigned long index_by_depth; 2837332Sjkh 284106449Smdodd /* Context depth of this page. */ 2857332Sjkh unsigned short context_depth; 2867332Sjkh 2877332Sjkh /* The number of free objects remaining on this page. */ 28859249Sphk unsigned short num_free_objects; 2897332Sjkh 29059249Sphk /* A likely candidate for the bit position of a free object for the 2917332Sjkh next allocation from this page. */ 2927332Sjkh unsigned short next_bit_hint; 2937332Sjkh 2947332Sjkh /* The lg of size of objects allocated from this page. */ 2957332Sjkh unsigned char order; 296106449Smdodd 2977332Sjkh /* A bit vector indicating whether or not objects are in use. The 29859249Sphk Nth bit is one if the Nth object on this page is allocated. This 29946573Speter array is dynamically sized. */ 3008876Srgrimes unsigned long in_use_p[1]; 301106490Smdodd} page_entry; 3027332Sjkh 3037332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 3047332Sjkh/* A page_group describes a large allocation from malloc, from which 3057332Sjkh we parcel out aligned pages. */ 306137046Sphktypedef struct page_group 30715574Sphk{ 3087332Sjkh /* A linked list of all extant page groups. */ 309106490Smdodd struct page_group *next; 3107332Sjkh 3117332Sjkh /* The address we received from malloc. */ 3127332Sjkh char *allocation; 3137332Sjkh 3147332Sjkh /* The size of the block. */ 3157332Sjkh size_t alloc_size; 3167332Sjkh 317106490Smdodd /* A bitmask of pages in use. */ 318106490Smdodd unsigned int in_use; 3197332Sjkh} page_group; 3207332Sjkh#endif 321106490Smdodd 3227332Sjkh#if HOST_BITS_PER_PTR <= 32 3237332Sjkh 3247332Sjkh/* On 32-bit hosts, we use a two level page table, as pictured above. */ 32512675Sjuliantypedef page_entry **page_table[PAGE_L1_SIZE]; 326130585Sphk 3277332Sjkh#else 328106449Smdodd 3298876Srgrimes/* On 64-bit hosts, we use the same two level page tables plus a linked 330106449Smdodd list that disambiguates the top 32-bits. There will almost always be 3317332Sjkh exactly one entry in the list. */ 332106449Smdoddtypedef struct page_table_chain 3337332Sjkh{ 334106490Smdodd struct page_table_chain *next; 335106451Smdodd size_t high_bits; 3367332Sjkh page_entry **table[PAGE_L1_SIZE]; 3377332Sjkh} *page_table; 338106450Smdodd 339106490Smdodd#endif 340106450Smdodd 341106450Smdodd/* The rest of the global variables. */ 342106450Smdoddstatic struct globals 343106490Smdodd{ 344106450Smdodd /* The Nth element in this array is a page with objects of size 2^N. 345106450Smdodd If there are any pages with free objects, they will be at the 3467332Sjkh head of the list. NULL if there are no page-entries for this 347106449Smdodd object size. */ 3487332Sjkh page_entry *pages[NUM_ORDERS]; 349106451Smdodd 3507332Sjkh /* The Nth element in this array is the last page with objects of 351106449Smdodd size 2^N. NULL if there are no page-entries for this object 352141031Ssobomax size. */ 353141031Ssobomax page_entry *page_tails[NUM_ORDERS]; 3547332Sjkh 355141031Ssobomax /* Lookup table for associating allocation pages with object addresses. */ 3567332Sjkh page_table lookup; 357106449Smdodd 3587332Sjkh /* The system's page size. */ 359106449Smdodd size_t pagesize; 36025460Sjoerg size_t lg_pagesize; 361106449Smdodd 3627332Sjkh /* Bytes currently allocated. */ 3637332Sjkh size_t allocated; 3647332Sjkh 3657332Sjkh /* Bytes currently allocated at the end of the last collection. */ 3667332Sjkh size_t allocated_last_gc; 3677332Sjkh 3687332Sjkh /* Total amount of memory mapped. */ 3697332Sjkh size_t bytes_mapped; 370106451Smdodd 3717332Sjkh /* Bit N set if any allocations have been done at context depth N. */ 372106449Smdodd unsigned long context_depth_allocations; 3737332Sjkh 374106449Smdodd /* Bit N set if any collections have been done at context depth N. */ 3757332Sjkh unsigned long context_depth_collections; 376106451Smdodd 3777332Sjkh /* The current depth in the context stack. */ 378106449Smdodd unsigned short context_depth; 3797332Sjkh 380106449Smdodd /* A file descriptor open to /dev/zero for reading. */ 3817332Sjkh#if defined (HAVE_MMAP_DEV_ZERO) 382106451Smdodd int dev_zero_fd; 3837332Sjkh#endif 3847332Sjkh 3857332Sjkh /* A cache of free system pages. */ 3867332Sjkh page_entry *free_pages; 387106451Smdodd 3887332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 3897332Sjkh page_group *page_groups; 3907332Sjkh#endif 3917332Sjkh 3927332Sjkh /* The file descriptor for debugging output. */ 393106451Smdodd FILE *debug_file; 3947332Sjkh 395106449Smdodd /* Current number of elements in use in depth below. */ 396106451Smdodd unsigned int depth_in_use; 3977332Sjkh 3987332Sjkh /* Maximum number of elements that can be used before resizing. */ 3997332Sjkh unsigned int depth_max; 4007332Sjkh 4017332Sjkh /* Each element of this arry is an index in by_depth where the given 4027332Sjkh depth starts. This structure is indexed by that given depth we 4037332Sjkh are interested in. */ 4047332Sjkh unsigned int *depth; 405106449Smdodd 4067332Sjkh /* Current number of elements in use in by_depth below. */ 4077332Sjkh unsigned int by_depth_in_use; 4087332Sjkh 4097332Sjkh /* Maximum number of elements that can be used before resizing. */ 41011872Sphk unsigned int by_depth_max; 4117332Sjkh 412106490Smdodd /* Each element of this array is a pointer to a page_entry, all 4137332Sjkh page_entries can be found in here by increasing depth. 414106449Smdodd index_by_depth in the page_entry is the index into this data 415106451Smdodd structure where that page_entry can be found. This is used to 416106449Smdodd speed up finding all page_entries at a particular depth. */ 4177332Sjkh page_entry **by_depth; 4187332Sjkh 419106449Smdodd /* Each element is a pointer to the saved in_use_p bits, if any, 420106451Smdodd zero otherwise. We allocate them all together, to enable a 4217332Sjkh better runtime data access pattern. */ 4227332Sjkh unsigned long **save_in_use; 4237332Sjkh 424106449Smdodd#ifdef ENABLE_GC_ALWAYS_COLLECT 425106449Smdodd /* List of free objects to be verified as actually free on the 4267332Sjkh next collection. */ 427106490Smdodd struct free_object 428106490Smdodd { 4297332Sjkh void *object; 430106490Smdodd struct free_object *next; 431106451Smdodd } *free_object_list; 4327332Sjkh#endif 433106490Smdodd 434106490Smdodd#ifdef GATHER_STATISTICS 4357332Sjkh struct 436106449Smdodd { 4377332Sjkh /* Total memory allocated with ggc_alloc. */ 4387332Sjkh unsigned long long total_allocated; 4397332Sjkh /* Total overhead for memory to be allocated with ggc_alloc. */ 4407332Sjkh unsigned long long total_overhead; 441106449Smdodd 4427332Sjkh /* Total allocations and overhead for sizes less than 32, 64 and 128. 4437332Sjkh These sizes are interesting because they are typical cache line 4447332Sjkh sizes. */ 4457332Sjkh 4467332Sjkh unsigned long long total_allocated_under32; 4477332Sjkh unsigned long long total_overhead_under32; 4487332Sjkh 4497332Sjkh unsigned long long total_allocated_under64; 4507332Sjkh unsigned long long total_overhead_under64; 4517332Sjkh 452106449Smdodd unsigned long long total_allocated_under128; 4537332Sjkh unsigned long long total_overhead_under128; 4547332Sjkh 4557332Sjkh /* The allocations for each of the allocation orders. */ 4567332Sjkh unsigned long long total_allocated_per_order[NUM_ORDERS]; 457106449Smdodd 4587332Sjkh /* The overhead for each of the allocation orders. */ 4597332Sjkh unsigned long long total_overhead_per_order[NUM_ORDERS]; 4607332Sjkh } stats; 461106449Smdodd#endif 4627332Sjkh} G; 463106449Smdodd 4647332Sjkh/* The size in bytes required to maintain a bitmap for the objects 4657332Sjkh on a page-entry. */ 466106449Smdodd#define BITMAP_SIZE(Num_objects) \ 4677332Sjkh (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long)) 4687332Sjkh 4697332Sjkh/* Allocate pages in chunks of this size, to throttle calls to memory 4707332Sjkh allocation routines. The first page is used, the rest go onto the 471106490Smdodd free list. This cannot be larger than HOST_BITS_PER_INT for the 472106449Smdodd in_use bitmask for page_group. Hosts that need a different value 473106451Smdodd can override this by defining GGC_QUIRE_SIZE explicitly. */ 4747332Sjkh#ifndef GGC_QUIRE_SIZE 475106449Smdodd# ifdef USING_MMAP 476106451Smdodd# define GGC_QUIRE_SIZE 256 4777332Sjkh# else 4787332Sjkh# define GGC_QUIRE_SIZE 16 4797332Sjkh# endif 4807332Sjkh#endif 481106490Smdodd 482106490Smdodd/* Initial guess as to how many page table entries we might need. */ 483106451Smdodd#define INITIAL_PTE_COUNT 128 4847332Sjkh 4857332Sjkhstatic int ggc_allocated_p (const void *); 4867332Sjkhstatic page_entry *lookup_page_table_entry (const void *); 487106449Smdoddstatic void set_page_table_entry (void *, page_entry *); 4887332Sjkh#ifdef USING_MMAP 4897332Sjkhstatic char *alloc_anon (char *, size_t); 490106449Smdodd#endif 491106490Smdodd#ifdef USING_MALLOC_PAGE_GROUPS 492106451Smdoddstatic size_t page_group_index (char *, char *); 4937332Sjkhstatic void set_page_group_in_use (page_group *, char *); 4947332Sjkhstatic void clear_page_group_in_use (page_group *, char *); 4957332Sjkh#endif 496106449Smdoddstatic struct page_entry * alloc_page (unsigned); 4977332Sjkhstatic void free_page (struct page_entry *); 4987332Sjkhstatic void release_pages (void); 4997332Sjkhstatic void clear_marks (void); 500106490Smdoddstatic void sweep_pages (void); 501106451Smdoddstatic void ggc_recalculate_in_use_p (page_entry *); 5027332Sjkhstatic void compute_inverse (unsigned); 503106449Smdoddstatic inline void adjust_depth (void); 504106451Smdoddstatic void move_ptes_to_front (int, int); 5057332Sjkh 506106449Smdoddvoid debug_print_page_list (int); 507106451Smdoddstatic void push_depth (unsigned int); 5088876Srgrimesstatic void push_by_depth (page_entry *, unsigned long *); 509106490Smdodd 510106490Smdodd/* Push an entry onto G.depth. */ 511106490Smdodd 512106490Smdoddinline static void 5137332Sjkhpush_depth (unsigned int i) 514106449Smdodd{ 515106490Smdodd if (G.depth_in_use >= G.depth_max) 516106490Smdodd { 517106490Smdodd G.depth_max *= 2; 5187332Sjkh G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int)); 519106451Smdodd } 5207332Sjkh G.depth[G.depth_in_use++] = i; 5217332Sjkh} 5227332Sjkh 523106449Smdodd/* Push an entry onto G.by_depth and G.save_in_use. */ 5247332Sjkh 525106449Smdoddinline static void 526106449Smdoddpush_by_depth (page_entry *p, unsigned long *s) 527106451Smdodd{ 528106449Smdodd if (G.by_depth_in_use >= G.by_depth_max) 5297332Sjkh { 5307332Sjkh G.by_depth_max *= 2; 5317332Sjkh G.by_depth = xrealloc (G.by_depth, 532106449Smdodd G.by_depth_max * sizeof (page_entry *)); 5337332Sjkh G.save_in_use = xrealloc (G.save_in_use, 5347332Sjkh G.by_depth_max * sizeof (unsigned long *)); 535106490Smdodd } 536106490Smdodd G.by_depth[G.by_depth_in_use] = p; 5377332Sjkh G.save_in_use[G.by_depth_in_use++] = s; 538106449Smdodd} 539106449Smdodd 540106449Smdodd#if (GCC_VERSION < 3001) 5417332Sjkh#define prefetch(X) ((void) X) 542106451Smdodd#else 5437332Sjkh#define prefetch(X) __builtin_prefetch (X) 544106451Smdodd#endif 5457332Sjkh 5467332Sjkh#define save_in_use_p_i(__i) \ 5477332Sjkh (G.save_in_use[__i]) 548141031Ssobomax#define save_in_use_p(__p) \ 5497332Sjkh (save_in_use_p_i (__p->index_by_depth)) 5507332Sjkh 5517332Sjkh/* Returns nonzero if P was allocated in GC'able memory. */ 5527332Sjkh 553106449Smdoddstatic inline int 554106449Smdoddggc_allocated_p (const void *p) 5557332Sjkh{ 556106449Smdodd page_entry ***base; 557106451Smdodd size_t L1, L2; 5587332Sjkh 559106449Smdodd#if HOST_BITS_PER_PTR <= 32 560106451Smdodd base = &G.lookup[0]; 5617332Sjkh#else 562106449Smdodd page_table table = G.lookup; 563106451Smdodd size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; 5647332Sjkh while (1) 565106490Smdodd { 5667332Sjkh if (table == NULL) 5677332Sjkh return 0; 5687332Sjkh if (table->high_bits == high_bits) 5697332Sjkh break; 5707332Sjkh table = table->next; 5717332Sjkh } 5727332Sjkh base = &table->table[0]; 5737332Sjkh#endif 5747332Sjkh 5757332Sjkh /* Extract the level 1 and 2 indices. */ 5767332Sjkh L1 = LOOKUP_L1 (p); 577141031Ssobomax L2 = LOOKUP_L2 (p); 578141031Ssobomax 579141031Ssobomax return base[L1] && base[L1][L2]; 580141031Ssobomax} 581141031Ssobomax 582141031Ssobomax/* Traverse the page table and find the entry for a page. 583106451Smdodd Die (probably) if the object wasn't allocated via GC. */ 5847332Sjkh 5857332Sjkhstatic inline page_entry * 586106449Smdoddlookup_page_table_entry (const void *p) 587106449Smdodd{ 58842552Seivind page_entry ***base; 5897332Sjkh size_t L1, L2; 5907332Sjkh 5917332Sjkh#if HOST_BITS_PER_PTR <= 32 5927332Sjkh base = &G.lookup[0]; 5937332Sjkh#else 5947332Sjkh page_table table = G.lookup; 595106490Smdodd size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; 5967332Sjkh while (table->high_bits != high_bits) 5977332Sjkh table = table->next; 5987332Sjkh base = &table->table[0]; 5997332Sjkh#endif 6007332Sjkh 601106449Smdodd /* Extract the level 1 and 2 indices. */ 6027332Sjkh L1 = LOOKUP_L1 (p); 6037332Sjkh L2 = LOOKUP_L2 (p); 6047332Sjkh 6057332Sjkh return base[L1][L2]; 6067332Sjkh} 607106449Smdodd 608106449Smdodd/* Set the page table entry for a page. */ 609106449Smdodd 6107332Sjkhstatic void 611106449Smdoddset_page_table_entry (void *p, page_entry *entry) 6127332Sjkh{ 6137332Sjkh page_entry ***base; 6147332Sjkh size_t L1, L2; 6157332Sjkh 6167332Sjkh#if HOST_BITS_PER_PTR <= 32 617106449Smdodd base = &G.lookup[0]; 6187332Sjkh#else 619106449Smdodd page_table table; 620106449Smdodd size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff; 6217332Sjkh for (table = G.lookup; table; table = table->next) 6227332Sjkh if (table->high_bits == high_bits) 6237332Sjkh goto found; 6247332Sjkh 6257332Sjkh /* Not found -- allocate a new table. */ 6267332Sjkh table = xcalloc (1, sizeof(*table)); 6277332Sjkh table->next = G.lookup; 6287332Sjkh table->high_bits = high_bits; 6297332Sjkh G.lookup = table; 6307332Sjkhfound: 6317332Sjkh base = &table->table[0]; 6327332Sjkh#endif 6337332Sjkh 6347332Sjkh /* Extract the level 1 and 2 indices. */ 6357332Sjkh L1 = LOOKUP_L1 (p); 6367332Sjkh L2 = LOOKUP_L2 (p); 6377332Sjkh 638106490Smdodd if (base[L1] == NULL) 6397332Sjkh base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE); 6407332Sjkh 641106490Smdodd base[L1][L2] = entry; 6427332Sjkh} 643106490Smdodd 6447332Sjkh/* Prints the page-entry for object size ORDER, for debugging. */ 645106449Smdodd 6467332Sjkhvoid 6477332Sjkhdebug_print_page_list (int order) 6487332Sjkh{ 649106449Smdodd page_entry *p; 6507332Sjkh printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order], 6517332Sjkh (void *) G.page_tails[order]); 6527332Sjkh p = G.pages[order]; 653106449Smdodd while (p != NULL) 654106449Smdodd { 655106451Smdodd printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth, 656106449Smdodd p->num_free_objects); 657106451Smdodd p = p->next; 658106451Smdodd } 6597332Sjkh printf ("NULL\n"); 6607332Sjkh fflush (stdout); 6617332Sjkh} 6627332Sjkh 6637332Sjkh#ifdef USING_MMAP 664106449Smdodd/* Allocate SIZE bytes of anonymous memory, preferably near PREF, 665106449Smdodd (if non-null). The ifdef structure here is intended to cause a 6667332Sjkh compile error unless exactly one of the HAVE_* is defined. */ 6677332Sjkh 6687332Sjkhstatic inline char * 6697332Sjkhalloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size) 6707332Sjkh{ 6717332Sjkh#ifdef HAVE_MMAP_ANON 6727332Sjkh char *page = mmap (pref, size, PROT_READ | PROT_WRITE, 6737332Sjkh MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 6747332Sjkh#endif 6757332Sjkh#ifdef HAVE_MMAP_DEV_ZERO 6767332Sjkh char *page = mmap (pref, size, PROT_READ | PROT_WRITE, 6777332Sjkh MAP_PRIVATE, G.dev_zero_fd, 0); 67825056Sbde#endif 67925056Sbde 680106449Smdodd if (page == (char *) MAP_FAILED) 681106449Smdodd { 682106449Smdodd perror ("virtual memory exhausted"); 683106449Smdodd exit (FATAL_EXIT_CODE); 68425056Sbde } 68525056Sbde 68625056Sbde /* Remember that we allocated this memory. */ 687106449Smdodd G.bytes_mapped += size; 6887332Sjkh 689106449Smdodd /* Pretend we don't have access to the allocated pages. We'll enable 69059249Sphk access to smaller pieces of the area in ggc_alloc. Discard the 691106449Smdodd handle to avoid handle leak. */ 6927332Sjkh VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size)); 6937332Sjkh 6947332Sjkh return page; 6957332Sjkh} 6967332Sjkh#endif 6977332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 6987332Sjkh/* Compute the index for this page into the page group. */ 699106449Smdodd 7007332Sjkhstatic inline size_t 7017332Sjkhpage_group_index (char *allocation, char *page) 7027332Sjkh{ 7037332Sjkh return (size_t) (page - allocation) >> G.lg_pagesize; 7047332Sjkh} 705106449Smdodd 7067332Sjkh/* Set and clear the in_use bit for this page in the page group. */ 7077332Sjkh 7087332Sjkhstatic inline void 709106449Smdoddset_page_group_in_use (page_group *group, char *page) 710106449Smdodd{ 7117332Sjkh group->in_use |= 1 << page_group_index (group->allocation, page); 712106449Smdodd} 7137332Sjkh 7147332Sjkhstatic inline void 7157332Sjkhclear_page_group_in_use (page_group *group, char *page) 7167332Sjkh{ 717106449Smdodd group->in_use &= ~(1 << page_group_index (group->allocation, page)); 718106449Smdodd} 719106449Smdodd#endif 7207332Sjkh 7217332Sjkh/* Allocate a new page for allocating objects of size 2^ORDER, 7228876Srgrimes and return an entry for it. The entry is not added to the 723106449Smdodd appropriate page_table list. */ 7247332Sjkh 7257332Sjkhstatic inline struct page_entry * 726106490Smdoddalloc_page (unsigned order) 727106449Smdodd{ 7287332Sjkh struct page_entry *entry, *p, **pp; 7297332Sjkh char *page; 7307332Sjkh size_t num_objects; 731106490Smdodd size_t bitmap_size; 7327332Sjkh size_t page_entry_size; 7337332Sjkh size_t entry_size; 73459249Sphk#ifdef USING_MALLOC_PAGE_GROUPS 7357332Sjkh page_group *group; 7367332Sjkh#endif 7377332Sjkh 738106490Smdodd num_objects = OBJECTS_PER_PAGE (order); 7397332Sjkh bitmap_size = BITMAP_SIZE (num_objects + 1); 7407332Sjkh page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size; 741121212Sphk entry_size = num_objects * OBJECT_SIZE (order); 7427332Sjkh if (entry_size < G.pagesize) 743106449Smdodd entry_size = G.pagesize; 7447332Sjkh 7457332Sjkh entry = NULL; 7467332Sjkh page = NULL; 7477332Sjkh 748106449Smdodd /* Check the list of free pages for one we can use. */ 749106449Smdodd for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp) 750106449Smdodd if (p->bytes == entry_size) 7517332Sjkh break; 752106449Smdodd 7537332Sjkh if (p != NULL) 7547332Sjkh { 7557332Sjkh /* Recycle the allocated memory from this page ... */ 756106449Smdodd *pp = p->next; 757106449Smdodd page = p->page; 7587332Sjkh 7597332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 7607332Sjkh group = p->group; 761106449Smdodd#endif 762106449Smdodd 7637332Sjkh /* ... and, if possible, the page entry itself. */ 764106449Smdodd if (p->order == order) 7657332Sjkh { 7667332Sjkh entry = p; 767106449Smdodd memset (entry, 0, page_entry_size); 768106449Smdodd } 769106449Smdodd else 7707332Sjkh free (p); 7717332Sjkh } 772106449Smdodd#ifdef USING_MMAP 773106449Smdodd else if (entry_size == G.pagesize) 7747332Sjkh { 775106449Smdodd /* We want just one page. Allocate a bunch of them and put the 776106449Smdodd extras on the freelist. (Can only do this optimization with 7777332Sjkh mmap for backing store.) */ 7787332Sjkh struct page_entry *e, *f = G.free_pages; 779106449Smdodd int i; 780106490Smdodd 7817332Sjkh page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE); 7827332Sjkh 783106449Smdodd /* This loop counts down so that the chain will be in ascending 7847332Sjkh memory order. */ 7857332Sjkh for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--) 7867332Sjkh { 7877332Sjkh e = xcalloc (1, page_entry_size); 7887332Sjkh e->order = order; 789106449Smdodd e->bytes = G.pagesize; 790106449Smdodd e->page = page + (i << G.lg_pagesize); 7917332Sjkh e->next = f; 792106449Smdodd f = e; 7937332Sjkh } 7947332Sjkh 795106449Smdodd G.free_pages = f; 796106449Smdodd } 797106449Smdodd else 7987332Sjkh page = alloc_anon (NULL, entry_size); 7997332Sjkh#endif 800106449Smdodd#ifdef USING_MALLOC_PAGE_GROUPS 8017332Sjkh else 8027332Sjkh { 8037332Sjkh /* Allocate a large block of memory and serve out the aligned 8047332Sjkh pages therein. This results in much less memory wastage 805106490Smdodd than the traditional implementation of valloc. */ 806106449Smdodd 807106449Smdodd char *allocation, *a, *enda; 8087332Sjkh size_t alloc_size, head_slop, tail_slop; 809106449Smdodd int multiple_pages = (entry_size == G.pagesize); 810106449Smdodd 8117332Sjkh if (multiple_pages) 8127332Sjkh alloc_size = GGC_QUIRE_SIZE * G.pagesize; 8137332Sjkh else 8147332Sjkh alloc_size = entry_size + G.pagesize - 1; 815106719Smdodd allocation = xmalloc (alloc_size); 816106449Smdodd 817106449Smdodd page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize); 818106449Smdodd head_slop = page - allocation; 819106449Smdodd if (multiple_pages) 820106449Smdodd tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1); 821106449Smdodd else 822106449Smdodd tail_slop = alloc_size - entry_size - head_slop; 823106719Smdodd enda = allocation + alloc_size - tail_slop; 8247332Sjkh 8257332Sjkh /* We allocated N pages, which are likely not aligned, leaving 8267332Sjkh us with N-1 usable pages. We plan to place the page_group 827106449Smdodd structure somewhere in the slop. */ 8287332Sjkh if (head_slop >= sizeof (page_group)) 8297332Sjkh group = (page_group *)page - 1; 8307332Sjkh else 8317332Sjkh { 832106449Smdodd /* We magically got an aligned allocation. Too bad, we have 833106449Smdodd to waste a page anyway. */ 8347332Sjkh if (tail_slop == 0) 8357332Sjkh { 8367332Sjkh enda -= G.pagesize; 837106449Smdodd tail_slop += G.pagesize; 838106449Smdodd } 8397332Sjkh gcc_assert (tail_slop >= sizeof (page_group)); 840106449Smdodd group = (page_group *)enda; 8417332Sjkh tail_slop -= sizeof (page_group); 842106449Smdodd } 8437332Sjkh 8447332Sjkh /* Remember that we allocated this memory. */ 845106449Smdodd group->next = G.page_groups; 846106449Smdodd group->allocation = allocation; 847106490Smdodd group->alloc_size = alloc_size; 8487332Sjkh group->in_use = 0; 849106449Smdodd G.page_groups = group; 850106449Smdodd G.bytes_mapped += alloc_size; 8517332Sjkh 8527332Sjkh /* If we allocated multiple pages, put the rest on the free list. */ 853106449Smdodd if (multiple_pages) 8547332Sjkh { 8557332Sjkh struct page_entry *e, *f = G.free_pages; 8567332Sjkh for (a = enda - G.pagesize; a != page; a -= G.pagesize) 85759249Sphk { 858106449Smdodd e = xcalloc (1, page_entry_size); 859106449Smdodd e->order = order; 8607332Sjkh e->bytes = G.pagesize; 8617332Sjkh e->page = a; 8627332Sjkh e->group = group; 863106449Smdodd e->next = f; 8647332Sjkh f = e; 8657332Sjkh } 8667332Sjkh G.free_pages = f; 8677332Sjkh } 8687332Sjkh } 8697332Sjkh#endif 870106449Smdodd 871106449Smdodd if (entry == NULL) 8727332Sjkh entry = xcalloc (1, page_entry_size); 873106449Smdodd 8747332Sjkh entry->bytes = entry_size; 8757332Sjkh entry->page = page; 8767332Sjkh entry->context_depth = G.context_depth; 8777332Sjkh entry->order = order; 878106449Smdodd entry->num_free_objects = num_objects; 879106449Smdodd entry->next_bit_hint = 1; 880106449Smdodd 8817332Sjkh G.context_depth_allocations |= (unsigned long)1 << G.context_depth; 8827332Sjkh 883106451Smdodd#ifdef USING_MALLOC_PAGE_GROUPS 8847332Sjkh entry->group = group; 885106449Smdodd set_page_group_in_use (group, page); 8867332Sjkh#endif 8877332Sjkh 8887332Sjkh /* Set the one-past-the-end in-use bit. This acts as a sentry as we 889106449Smdodd increment the hint. */ 890106449Smdodd entry->in_use_p[num_objects / HOST_BITS_PER_LONG] 8917332Sjkh = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG); 8927332Sjkh 8937332Sjkh set_page_table_entry (page, entry); 8947332Sjkh 895106449Smdodd if (GGC_DEBUG_LEVEL >= 2) 8967332Sjkh fprintf (G.debug_file, 8977332Sjkh "Allocating page at %p, object size=%lu, data %p-%p\n", 8987332Sjkh (void *) entry, (unsigned long) OBJECT_SIZE (order), page, 8997332Sjkh page + entry_size - 1); 900106449Smdodd 9017332Sjkh return entry; 9027332Sjkh} 903106449Smdodd 9047332Sjkh/* Adjust the size of G.depth so that no index greater than the one 9057332Sjkh used by the top of the G.by_depth is used. */ 906106490Smdodd 9077332Sjkhstatic inline void 9087332Sjkhadjust_depth (void) 9097332Sjkh{ 9107332Sjkh page_entry *top; 911106449Smdodd 9127332Sjkh if (G.by_depth_in_use) 9137332Sjkh { 9147332Sjkh top = G.by_depth[G.by_depth_in_use-1]; 915106449Smdodd 9167332Sjkh /* Peel back indices in depth that index into by_depth, so that 9177332Sjkh as new elements are added to by_depth, we note the indices 9187332Sjkh of those elements, if they are for new context depths. */ 9197332Sjkh while (G.depth_in_use > (size_t)top->context_depth+1) 9207332Sjkh --G.depth_in_use; 9217332Sjkh } 9227332Sjkh} 9237332Sjkh 9247332Sjkh/* For a page that is no longer needed, put it on the free page list. */ 92559249Sphk 9267332Sjkhstatic void 9277332Sjkhfree_page (page_entry *entry) 928106490Smdodd{ 929106449Smdodd if (GGC_DEBUG_LEVEL >= 2) 9307332Sjkh fprintf (G.debug_file, 9317332Sjkh "Deallocating page at %p, data %p-%p\n", (void *) entry, 9327332Sjkh entry->page, entry->page + entry->bytes - 1); 9337332Sjkh 9347332Sjkh /* Mark the page as inaccessible. Discard the handle to avoid handle 935106449Smdodd leak. */ 9367332Sjkh VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes)); 9377332Sjkh 9387332Sjkh set_page_table_entry (entry->page, NULL); 9397332Sjkh 9407332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 94159249Sphk clear_page_group_in_use (entry->group, entry->page); 94259249Sphk#endif 94359249Sphk 9447332Sjkh if (G.by_depth_in_use > 1) 9457332Sjkh { 946106490Smdodd page_entry *top = G.by_depth[G.by_depth_in_use-1]; 947106449Smdodd int i = entry->index_by_depth; 9487332Sjkh 9497332Sjkh /* We cannot free a page from a context deeper than the current 9507332Sjkh one. */ 951106449Smdodd gcc_assert (entry->context_depth == top->context_depth); 9527332Sjkh 9537332Sjkh /* Put top element into freed slot. */ 9547332Sjkh G.by_depth[i] = top; 9557332Sjkh G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1]; 9567332Sjkh top->index_by_depth = i; 9577332Sjkh } 958106490Smdodd --G.by_depth_in_use; 9597332Sjkh 9607332Sjkh adjust_depth (); 9617332Sjkh 9627332Sjkh entry->next = G.free_pages; 9637332Sjkh G.free_pages = entry; 9647332Sjkh} 9657332Sjkh 9667332Sjkh/* Release the free page cache to the system. */ 9677332Sjkh 9687332Sjkhstatic void 969106490Smdoddrelease_pages (void) 9707332Sjkh{ 9717332Sjkh#ifdef USING_MMAP 9727332Sjkh page_entry *p, *next; 9737332Sjkh char *start; 9747332Sjkh size_t len; 9757332Sjkh 976106449Smdodd /* Gather up adjacent pages so they are unmapped together. */ 9777332Sjkh p = G.free_pages; 9787332Sjkh 9797332Sjkh while (p) 9807332Sjkh { 981106449Smdodd start = p->page; 982106449Smdodd next = p->next; 983106449Smdodd len = p->bytes; 9847332Sjkh free (p); 985106451Smdodd p = next; 9867332Sjkh 9877332Sjkh while (p && p->page == start + len) 988106449Smdodd { 9897332Sjkh next = p->next; 9907332Sjkh len += p->bytes; 9917332Sjkh free (p); 9927332Sjkh p = next; 9937332Sjkh } 9947332Sjkh 9957332Sjkh munmap (start, len); 996106449Smdodd G.bytes_mapped -= len; 9977332Sjkh } 9987332Sjkh 9997332Sjkh G.free_pages = NULL; 1000106449Smdodd#endif 10017332Sjkh#ifdef USING_MALLOC_PAGE_GROUPS 10027332Sjkh page_entry **pp, *p; 10037332Sjkh page_group **gp, *g; 1004106449Smdodd 10057332Sjkh /* Remove all pages from free page groups from the list. */ 10067332Sjkh pp = &G.free_pages; 10077332Sjkh while ((p = *pp) != NULL) 1008106449Smdodd if (p->group->in_use == 0) 1009106449Smdodd { 10107332Sjkh *pp = p->next; 10117332Sjkh free (p); 10127332Sjkh } 1013106449Smdodd else 10147332Sjkh pp = &p->next; 10157332Sjkh 10167332Sjkh /* Remove all free page groups, and release the storage. */ 10177332Sjkh gp = &G.page_groups; 1018106451Smdodd while ((g = *gp) != NULL) 10197332Sjkh if (g->in_use == 0) 10207332Sjkh { 10217332Sjkh *gp = g->next; 10227332Sjkh G.bytes_mapped -= g->alloc_size; 10237332Sjkh free (g->allocation); 10247332Sjkh } 10257332Sjkh else 1026106449Smdodd gp = &g->next; 10277332Sjkh#endif 10287332Sjkh} 10297332Sjkh 10307332Sjkh/* This table provides a fast way to determine ceil(log_2(size)) for 10317332Sjkh allocation requests. The minimum allocation size is eight bytes. */ 10327332Sjkh#define NUM_SIZE_LOOKUP 512 10337332Sjkhstatic unsigned char size_lookup[NUM_SIZE_LOOKUP] = 1034106449Smdodd{ 10357332Sjkh 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 1036106449Smdodd 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1037106451Smdodd 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 10387332Sjkh 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 10397332Sjkh 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1040106449Smdodd 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1041106449Smdodd 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 10427332Sjkh 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 10437332Sjkh 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10447332Sjkh 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1045106451Smdodd 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10467332Sjkh 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10477332Sjkh 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1048106451Smdodd 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10497332Sjkh 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 10507332Sjkh 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1051106449Smdodd 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10527332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1053106451Smdodd 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10547332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10557332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10567332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10577332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10587332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10597332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10607332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10617332Sjkh 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1062106451Smdodd 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10638876Srgrimes 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1064106451Smdodd 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 1065132771Skan 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10668876Srgrimes 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 1067106451Smdodd}; 1068132771Skan 10698876Srgrimes/* Typed allocation function. Does nothing special in this collector. */ 1070106451Smdodd 1071132771Skanvoid * 10728876Srgrimesggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size 1073106451Smdodd MEM_STAT_DECL) 1074132771Skan{ 10758876Srgrimes return ggc_alloc_stat (size PASS_MEM_STAT); 1076106451Smdodd} 1077132771Skan 1078106451Smdodd/* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */ 10797332Sjkh 10807332Sjkhvoid * 10817332Sjkhggc_alloc_stat (size_t size MEM_STAT_DECL) 1082106449Smdodd{ 10837332Sjkh size_t order, word, bit, object_offset, object_size; 10847332Sjkh struct page_entry *entry; 10857332Sjkh void *result; 10867332Sjkh 10877332Sjkh if (size < NUM_SIZE_LOOKUP) 10887332Sjkh { 1089106453Smdodd order = size_lookup[size]; 10907332Sjkh object_size = OBJECT_SIZE (order); 1091106451Smdodd } 10927332Sjkh else 1093106449Smdodd { 1094106451Smdodd order = 10; 10957332Sjkh while (size > (object_size = OBJECT_SIZE (order))) 1096106449Smdodd order++; 1097106451Smdodd } 10987332Sjkh 1099106449Smdodd /* If there are non-full pages for this size allocation, they are at 11007332Sjkh the head of the list. */ 11017332Sjkh entry = G.pages[order]; 11027332Sjkh 11037332Sjkh /* If there is no page for this object size, or all pages in this 1104106490Smdodd context are full, allocate a new page. */ 1105106490Smdodd if (entry == NULL || entry->num_free_objects == 0) 11067332Sjkh { 1107106449Smdodd struct page_entry *new_entry; 11087332Sjkh new_entry = alloc_page (order); 1109106490Smdodd 1110106490Smdodd new_entry->index_by_depth = G.by_depth_in_use; 1111106490Smdodd push_by_depth (new_entry, 0); 1112106490Smdodd 1113106490Smdodd /* We can skip context depths, if we do, make sure we go all the 1114106490Smdodd way to the new depth. */ 1115106490Smdodd while (new_entry->context_depth >= G.depth_in_use) 1116106490Smdodd push_depth (G.by_depth_in_use-1); 11177332Sjkh 11187332Sjkh /* If this is the only entry, it's also the tail. If it is not 1119106449Smdodd the only entry, then we must update the PREV pointer of the 1120106449Smdodd ENTRY (G.pages[order]) to point to our new page entry. */ 1121106449Smdodd if (entry == NULL) 1122106449Smdodd G.page_tails[order] = new_entry; 11237332Sjkh else 1124106490Smdodd entry->prev = new_entry; 1125106490Smdodd 1126106490Smdodd /* Put new pages at the head of the page list. By definition the 11277332Sjkh entry at the head of the list always has a NULL pointer. */ 11287332Sjkh new_entry->next = entry; 11297332Sjkh new_entry->prev = NULL; 1130106490Smdodd entry = new_entry; 11317332Sjkh G.pages[order] = new_entry; 11327332Sjkh 1133106490Smdodd /* For a new page, we know the word and bit positions (in the 11347332Sjkh in_use bitmap) of the first available object -- they're zero. */ 1135106490Smdodd new_entry->next_bit_hint = 1; 1136106490Smdodd word = 0; 1137106490Smdodd bit = 0; 11387332Sjkh object_offset = 0; 11397332Sjkh } 11407332Sjkh else 1141106490Smdodd { 11427332Sjkh /* First try to use the hint left from the previous allocation 1143106451Smdodd to locate a clear bit in the in-use bitmap. We've made sure 11447332Sjkh that the one-past-the-end bit is always set, so if the hint 11457332Sjkh has run over, this test will fail. */ 11467332Sjkh unsigned hint = entry->next_bit_hint; 1147106449Smdodd word = hint / HOST_BITS_PER_LONG; 11487332Sjkh bit = hint % HOST_BITS_PER_LONG; 11497332Sjkh 11507332Sjkh /* If the hint didn't work, scan the bitmap from the beginning. */ 1151106449Smdodd if ((entry->in_use_p[word] >> bit) & 1) 1152106449Smdodd { 11537332Sjkh word = bit = 0; 1154106449Smdodd while (~entry->in_use_p[word] == 0) 11557332Sjkh ++word; 11567332Sjkh 11577332Sjkh#if GCC_VERSION >= 3004 11587332Sjkh bit = __builtin_ctzl (~entry->in_use_p[word]); 1159106449Smdodd#else 11607332Sjkh while ((entry->in_use_p[word] >> bit) & 1) 11617332Sjkh ++bit; 11627332Sjkh#endif 1163106449Smdodd 11647332Sjkh hint = word * HOST_BITS_PER_LONG + bit; 11657332Sjkh } 11667332Sjkh 11677332Sjkh /* Next time, try the next bit. */ 1168106449Smdodd entry->next_bit_hint = hint + 1; 1169106451Smdodd 1170106449Smdodd object_offset = hint * object_size; 11717332Sjkh } 11727332Sjkh 1173106449Smdodd /* Set the in-use bit. */ 11747332Sjkh entry->in_use_p[word] |= ((unsigned long) 1 << bit); 1175106449Smdodd 11767332Sjkh /* Keep a running total of the number of free objects. If this page 1177106451Smdodd fills up, we may have to move it to the end of the list if the 11787332Sjkh next page isn't full. If the next page is full, all subsequent 11797332Sjkh pages are full, so there's no need to move it. */ 11807332Sjkh if (--entry->num_free_objects == 0 11817332Sjkh && entry->next != NULL 1182106449Smdodd && entry->next->num_free_objects > 0) 11837332Sjkh { 11847332Sjkh /* We have a new head for the list. */ 11857332Sjkh G.pages[order] = entry->next; 11867332Sjkh 11877332Sjkh /* We are moving ENTRY to the end of the page table list. 11887332Sjkh The new page at the head of the list will have NULL in 1189106449Smdodd its PREV field and ENTRY will have NULL in its NEXT field. */ 1190106449Smdodd entry->next->prev = NULL; 1191106451Smdodd entry->next = NULL; 11927332Sjkh 11937332Sjkh /* Append ENTRY to the tail of the list. */ 1194106449Smdodd entry->prev = G.page_tails[order]; 11957332Sjkh G.page_tails[order]->next = entry; 1196106449Smdodd G.page_tails[order] = entry; 1197106449Smdodd } 11987332Sjkh 11997332Sjkh /* Calculate the object's address. */ 1200106449Smdodd result = entry->page + object_offset; 12017332Sjkh#ifdef GATHER_STATISTICS 1202106449Smdodd ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size, 1203106449Smdodd result PASS_MEM_STAT); 1204106451Smdodd#endif 12057332Sjkh 12068876Srgrimes#ifdef ENABLE_GC_CHECKING 12077332Sjkh /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the 12087332Sjkh exact same semantics in presence of memory bugs, regardless of 12097332Sjkh ENABLE_VALGRIND_CHECKING. We override this request below. Drop the 1210106449Smdodd handle to avoid handle leak. */ 1211106449Smdodd VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, object_size)); 12127332Sjkh 12137332Sjkh /* `Poison' the entire allocated object, including any padding at 1214106449Smdodd the end. */ 12157332Sjkh memset (result, 0xaf, object_size); 1216106449Smdodd 12177332Sjkh /* Make the bytes after the end of the object unaccessible. Discard the 1218106449Smdodd handle to avoid handle leak. */ 121911872Sphk VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size, 1220106451Smdodd object_size - size)); 12217332Sjkh#endif 1222106449Smdodd 1223106449Smdodd /* Tell Valgrind that the memory is there, but its content isn't 12247332Sjkh defined. The bytes at the end of the object are still marked 1225106449Smdodd unaccessible. */ 1226102412Scharnier VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size)); 12277332Sjkh 1228106449Smdodd /* Keep track of how many bytes are being allocated. This 1229106451Smdodd information is used in deciding when to collect. */ 12307332Sjkh G.allocated += object_size; 12317332Sjkh 1232106449Smdodd /* For timevar statistics. */ 1233106449Smdodd timevar_ggc_mem_total += object_size; 1234106451Smdodd 12357332Sjkh#ifdef GATHER_STATISTICS 12367332Sjkh { 12377332Sjkh size_t overhead = object_size - size; 12387332Sjkh 1239106449Smdodd G.stats.total_overhead += overhead; 12407332Sjkh G.stats.total_allocated += object_size; 1241106490Smdodd G.stats.total_overhead_per_order[order] += overhead; 12427332Sjkh G.stats.total_allocated_per_order[order] += object_size; 12437332Sjkh 1244106449Smdodd if (size <= 32) 12457332Sjkh { 12467332Sjkh G.stats.total_overhead_under32 += overhead; 1247106449Smdodd G.stats.total_allocated_under32 += object_size; 12487332Sjkh } 12497332Sjkh if (size <= 64) 12507332Sjkh { 1251106449Smdodd G.stats.total_overhead_under64 += overhead; 12527332Sjkh G.stats.total_allocated_under64 += object_size; 1253106449Smdodd } 12547332Sjkh if (size <= 128) 12557332Sjkh { 12567332Sjkh G.stats.total_overhead_under128 += overhead; 12577332Sjkh G.stats.total_allocated_under128 += object_size; 12587332Sjkh } 12597332Sjkh } 1260106449Smdodd#endif 12617332Sjkh 1262106449Smdodd if (GGC_DEBUG_LEVEL >= 3) 12637332Sjkh fprintf (G.debug_file, 12647332Sjkh "Allocating object, requested size=%lu, actual=%lu at %p on %p\n", 12657332Sjkh (unsigned long) size, (unsigned long) object_size, result, 12667332Sjkh (void *) entry); 12677332Sjkh 12687332Sjkh return result; 1269106449Smdodd} 12707332Sjkh 1271106451Smdodd/* If P is not marked, marks it and return false. Otherwise return true. 12727332Sjkh P must have been allocated by the GC allocator; it mustn't point to 1273106449Smdodd static objects, stack variables, or memory allocated with malloc. */ 12747332Sjkh 12757332Sjkhint 12767332Sjkhggc_set_mark (const void *p) 12777332Sjkh{ 12787332Sjkh page_entry *entry; 12797332Sjkh unsigned bit, word; 12807332Sjkh unsigned long mask; 12817332Sjkh 12827332Sjkh /* Look up the page on which the object is alloced. If the object 12837332Sjkh wasn't allocated by the collector, we'll probably die. */ 12847332Sjkh entry = lookup_page_table_entry (p); 12857332Sjkh gcc_assert (entry); 1286106449Smdodd 12877332Sjkh /* Calculate the index of the object on the page; this is its bit 1288106449Smdodd position in the in_use_p bitmap. */ 12897332Sjkh bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); 12907332Sjkh word = bit / HOST_BITS_PER_LONG; 12917332Sjkh mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); 12927332Sjkh 12937332Sjkh /* If the bit was previously set, skip it. */ 12947332Sjkh if (entry->in_use_p[word] & mask) 12957332Sjkh return 1; 1296167086Sjhb 12977332Sjkh /* Otherwise set it, and decrement the free object count. */ 12987332Sjkh entry->in_use_p[word] |= mask; 12997332Sjkh entry->num_free_objects -= 1; 13007332Sjkh 13017332Sjkh if (GGC_DEBUG_LEVEL >= 4) 1302106451Smdodd fprintf (G.debug_file, "Marking %p\n", p); 13037332Sjkh 13047332Sjkh return 0; 13057332Sjkh} 1306106449Smdodd 13077332Sjkh/* Return 1 if P has been marked, zero otherwise. 13087332Sjkh P must have been allocated by the GC allocator; it mustn't point to 1309106449Smdodd static objects, stack variables, or memory allocated with malloc. */ 1310106451Smdodd 13117332Sjkhint 13127332Sjkhggc_marked_p (const void *p) 13137332Sjkh{ 13147332Sjkh page_entry *entry; 1315106449Smdodd unsigned bit, word; 13167332Sjkh unsigned long mask; 13177332Sjkh 13187332Sjkh /* Look up the page on which the object is alloced. If the object 1319106490Smdodd wasn't allocated by the collector, we'll probably die. */ 1320106449Smdodd entry = lookup_page_table_entry (p); 1321106451Smdodd gcc_assert (entry); 13227332Sjkh 13237332Sjkh /* Calculate the index of the object on the page; this is its bit 1324106490Smdodd position in the in_use_p bitmap. */ 1325106490Smdodd bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order); 13267332Sjkh word = bit / HOST_BITS_PER_LONG; 13277332Sjkh mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG); 1328106451Smdodd 13297332Sjkh return (entry->in_use_p[word] & mask) != 0; 13307332Sjkh} 13317332Sjkh 1332106449Smdodd/* Return the size of the gc-able object P. */ 13337332Sjkh 13347332Sjkhsize_t 13357332Sjkhggc_get_size (const void *p) 13367332Sjkh{ 1337106490Smdodd page_entry *pe = lookup_page_table_entry (p); 1338106449Smdodd return OBJECT_SIZE (pe->order); 1339106451Smdodd} 13407332Sjkh 13417332Sjkh/* Release the memory for object P. */ 13427332Sjkh 13437332Sjkhvoid 13447332Sjkhggc_free (void *p) 1345106490Smdodd{ 13468876Srgrimes page_entry *pe = lookup_page_table_entry (p); 13477332Sjkh size_t order = pe->order; 1348106490Smdodd size_t size = OBJECT_SIZE (order); 1349106451Smdodd 13507332Sjkh#ifdef GATHER_STATISTICS 13517332Sjkh ggc_free_overhead (p); 13527332Sjkh#endif 13537332Sjkh 1354106451Smdodd if (GGC_DEBUG_LEVEL >= 3) 13557332Sjkh fprintf (G.debug_file, 13567332Sjkh "Freeing object, actual size=%lu, at %p on %p\n", 1357106490Smdodd (unsigned long) size, p, (void *) pe); 13587332Sjkh 13597332Sjkh#ifdef ENABLE_GC_CHECKING 13607332Sjkh /* Poison the data, to indicate the data is garbage. */ 13617332Sjkh VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (p, size)); 1362106490Smdodd memset (p, 0xa5, size); 1363106490Smdodd#endif 1364106490Smdodd /* Let valgrind know the object is free. */ 13657332Sjkh VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (p, size)); 13667332Sjkh 13677332Sjkh#ifdef ENABLE_GC_ALWAYS_COLLECT 13687332Sjkh /* In the completely-anal-checking mode, we do *not* immediately free 1369106451Smdodd the data, but instead verify that the data is *actually* not 13707332Sjkh reachable the next time we collect. */ 1371106451Smdodd { 13727332Sjkh struct free_object *fo = XNEW (struct free_object); 13737332Sjkh fo->object = p; 137412517Sjulian fo->next = G.free_object_list; 137525460Sjoerg G.free_object_list = fo; 1376106449Smdodd } 137725460Sjoerg#else 137825460Sjoerg { 137925460Sjoerg unsigned int bit_offset, word, bit; 138025460Sjoerg 1381106490Smdodd G.allocated -= size; 1382106449Smdodd 1383106451Smdodd /* Mark the object not-in-use. */ 138425460Sjoerg bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order); 138525460Sjoerg word = bit_offset / HOST_BITS_PER_LONG; 138625460Sjoerg bit = bit_offset % HOST_BITS_PER_LONG; 138725460Sjoerg pe->in_use_p[word] &= ~(1UL << bit); 138825460Sjoerg 1389106490Smdodd if (pe->num_free_objects++ == 0) 139025460Sjoerg { 139125460Sjoerg page_entry *p, *q; 1392106490Smdodd 1393106451Smdodd /* If the page is completely full, then it's supposed to 139425460Sjoerg be after all pages that aren't. Since we've freed one 139525460Sjoerg object from a page that was full, we need to move the 1396106490Smdodd page to the head of the list. 139725460Sjoerg 139825460Sjoerg PE is the node we want to move. Q is the previous node 139925460Sjoerg and P is the next node in the list. */ 140025460Sjoerg q = pe->prev; 1401106490Smdodd if (q && q->num_free_objects == 0) 1402106490Smdodd { 1403106490Smdodd p = pe->next; 140425460Sjoerg 140525460Sjoerg q->next = p; 140625460Sjoerg 140725460Sjoerg /* If PE was at the end of the list, then Q becomes the 140825460Sjoerg new end of the list. If PE was not the end of the 1409106451Smdodd list, then we need to update the PREV field for P. */ 141025460Sjoerg if (!p) 1411 G.page_tails[order] = q; 1412 else 1413 p->prev = q; 1414 1415 /* Move PE to the head of the list. */ 1416 pe->next = G.pages[order]; 1417 pe->prev = NULL; 1418 G.pages[order]->prev = pe; 1419 G.pages[order] = pe; 1420 } 1421 1422 /* Reset the hint bit to point to the only free object. */ 1423 pe->next_bit_hint = bit_offset; 1424 } 1425 } 1426#endif 1427} 1428 1429/* Subroutine of init_ggc which computes the pair of numbers used to 1430 perform division by OBJECT_SIZE (order) and fills in inverse_table[]. 1431 1432 This algorithm is taken from Granlund and Montgomery's paper 1433 "Division by Invariant Integers using Multiplication" 1434 (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by 1435 constants). */ 1436 1437static void 1438compute_inverse (unsigned order) 1439{ 1440 size_t size, inv; 1441 unsigned int e; 1442 1443 size = OBJECT_SIZE (order); 1444 e = 0; 1445 while (size % 2 == 0) 1446 { 1447 e++; 1448 size >>= 1; 1449 } 1450 1451 inv = size; 1452 while (inv * size != 1) 1453 inv = inv * (2 - inv*size); 1454 1455 DIV_MULT (order) = inv; 1456 DIV_SHIFT (order) = e; 1457} 1458 1459/* Initialize the ggc-mmap allocator. */ 1460void 1461init_ggc (void) 1462{ 1463 unsigned order; 1464 1465 G.pagesize = getpagesize(); 1466 G.lg_pagesize = exact_log2 (G.pagesize); 1467 1468#ifdef HAVE_MMAP_DEV_ZERO 1469 G.dev_zero_fd = open ("/dev/zero", O_RDONLY); 1470 if (G.dev_zero_fd == -1) 1471 internal_error ("open /dev/zero: %m"); 1472#endif 1473 1474#if 0 1475 G.debug_file = fopen ("ggc-mmap.debug", "w"); 1476#else 1477 G.debug_file = stdout; 1478#endif 1479 1480#ifdef USING_MMAP 1481 /* StunOS has an amazing off-by-one error for the first mmap allocation 1482 after fiddling with RLIMIT_STACK. The result, as hard as it is to 1483 believe, is an unaligned page allocation, which would cause us to 1484 hork badly if we tried to use it. */ 1485 { 1486 char *p = alloc_anon (NULL, G.pagesize); 1487 struct page_entry *e; 1488 if ((size_t)p & (G.pagesize - 1)) 1489 { 1490 /* How losing. Discard this one and try another. If we still 1491 can't get something useful, give up. */ 1492 1493 p = alloc_anon (NULL, G.pagesize); 1494 gcc_assert (!((size_t)p & (G.pagesize - 1))); 1495 } 1496 1497 /* We have a good page, might as well hold onto it... */ 1498 e = XCNEW (struct page_entry); 1499 e->bytes = G.pagesize; 1500 e->page = p; 1501 e->next = G.free_pages; 1502 G.free_pages = e; 1503 } 1504#endif 1505 1506 /* Initialize the object size table. */ 1507 for (order = 0; order < HOST_BITS_PER_PTR; ++order) 1508 object_size_table[order] = (size_t) 1 << order; 1509 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) 1510 { 1511 size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR]; 1512 1513 /* If S is not a multiple of the MAX_ALIGNMENT, then round it up 1514 so that we're sure of getting aligned memory. */ 1515 s = ROUND_UP (s, MAX_ALIGNMENT); 1516 object_size_table[order] = s; 1517 } 1518 1519 /* Initialize the objects-per-page and inverse tables. */ 1520 for (order = 0; order < NUM_ORDERS; ++order) 1521 { 1522 objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order); 1523 if (objects_per_page_table[order] == 0) 1524 objects_per_page_table[order] = 1; 1525 compute_inverse (order); 1526 } 1527 1528 /* Reset the size_lookup array to put appropriately sized objects in 1529 the special orders. All objects bigger than the previous power 1530 of two, but no greater than the special size, should go in the 1531 new order. */ 1532 for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order) 1533 { 1534 int o; 1535 int i; 1536 1537 i = OBJECT_SIZE (order); 1538 if (i >= NUM_SIZE_LOOKUP) 1539 continue; 1540 1541 for (o = size_lookup[i]; o == size_lookup [i]; --i) 1542 size_lookup[i] = order; 1543 } 1544 1545 G.depth_in_use = 0; 1546 G.depth_max = 10; 1547 G.depth = XNEWVEC (unsigned int, G.depth_max); 1548 1549 G.by_depth_in_use = 0; 1550 G.by_depth_max = INITIAL_PTE_COUNT; 1551 G.by_depth = XNEWVEC (page_entry *, G.by_depth_max); 1552 G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); 1553} 1554 1555/* Start a new GGC zone. */ 1556 1557struct alloc_zone * 1558new_ggc_zone (const char *name ATTRIBUTE_UNUSED) 1559{ 1560 return NULL; 1561} 1562 1563/* Destroy a GGC zone. */ 1564void 1565destroy_ggc_zone (struct alloc_zone *zone ATTRIBUTE_UNUSED) 1566{ 1567} 1568 1569/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P 1570 reflects reality. Recalculate NUM_FREE_OBJECTS as well. */ 1571 1572static void 1573ggc_recalculate_in_use_p (page_entry *p) 1574{ 1575 unsigned int i; 1576 size_t num_objects; 1577 1578 /* Because the past-the-end bit in in_use_p is always set, we 1579 pretend there is one additional object. */ 1580 num_objects = OBJECTS_IN_PAGE (p) + 1; 1581 1582 /* Reset the free object count. */ 1583 p->num_free_objects = num_objects; 1584 1585 /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */ 1586 for (i = 0; 1587 i < CEIL (BITMAP_SIZE (num_objects), 1588 sizeof (*p->in_use_p)); 1589 ++i) 1590 { 1591 unsigned long j; 1592 1593 /* Something is in use if it is marked, or if it was in use in a 1594 context further down the context stack. */ 1595 p->in_use_p[i] |= save_in_use_p (p)[i]; 1596 1597 /* Decrement the free object count for every object allocated. */ 1598 for (j = p->in_use_p[i]; j; j >>= 1) 1599 p->num_free_objects -= (j & 1); 1600 } 1601 1602 gcc_assert (p->num_free_objects < num_objects); 1603} 1604 1605/* Unmark all objects. */ 1606 1607static void 1608clear_marks (void) 1609{ 1610 unsigned order; 1611 1612 for (order = 2; order < NUM_ORDERS; order++) 1613 { 1614 page_entry *p; 1615 1616 for (p = G.pages[order]; p != NULL; p = p->next) 1617 { 1618 size_t num_objects = OBJECTS_IN_PAGE (p); 1619 size_t bitmap_size = BITMAP_SIZE (num_objects + 1); 1620 1621 /* The data should be page-aligned. */ 1622 gcc_assert (!((size_t) p->page & (G.pagesize - 1))); 1623 1624 /* Pages that aren't in the topmost context are not collected; 1625 nevertheless, we need their in-use bit vectors to store GC 1626 marks. So, back them up first. */ 1627 if (p->context_depth < G.context_depth) 1628 { 1629 if (! save_in_use_p (p)) 1630 save_in_use_p (p) = xmalloc (bitmap_size); 1631 memcpy (save_in_use_p (p), p->in_use_p, bitmap_size); 1632 } 1633 1634 /* Reset reset the number of free objects and clear the 1635 in-use bits. These will be adjusted by mark_obj. */ 1636 p->num_free_objects = num_objects; 1637 memset (p->in_use_p, 0, bitmap_size); 1638 1639 /* Make sure the one-past-the-end bit is always set. */ 1640 p->in_use_p[num_objects / HOST_BITS_PER_LONG] 1641 = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG)); 1642 } 1643 } 1644} 1645 1646/* Free all empty pages. Partially empty pages need no attention 1647 because the `mark' bit doubles as an `unused' bit. */ 1648 1649static void 1650sweep_pages (void) 1651{ 1652 unsigned order; 1653 1654 for (order = 2; order < NUM_ORDERS; order++) 1655 { 1656 /* The last page-entry to consider, regardless of entries 1657 placed at the end of the list. */ 1658 page_entry * const last = G.page_tails[order]; 1659 1660 size_t num_objects; 1661 size_t live_objects; 1662 page_entry *p, *previous; 1663 int done; 1664 1665 p = G.pages[order]; 1666 if (p == NULL) 1667 continue; 1668 1669 previous = NULL; 1670 do 1671 { 1672 page_entry *next = p->next; 1673 1674 /* Loop until all entries have been examined. */ 1675 done = (p == last); 1676 1677 num_objects = OBJECTS_IN_PAGE (p); 1678 1679 /* Add all live objects on this page to the count of 1680 allocated memory. */ 1681 live_objects = num_objects - p->num_free_objects; 1682 1683 G.allocated += OBJECT_SIZE (order) * live_objects; 1684 1685 /* Only objects on pages in the topmost context should get 1686 collected. */ 1687 if (p->context_depth < G.context_depth) 1688 ; 1689 1690 /* Remove the page if it's empty. */ 1691 else if (live_objects == 0) 1692 { 1693 /* If P was the first page in the list, then NEXT 1694 becomes the new first page in the list, otherwise 1695 splice P out of the forward pointers. */ 1696 if (! previous) 1697 G.pages[order] = next; 1698 else 1699 previous->next = next; 1700 1701 /* Splice P out of the back pointers too. */ 1702 if (next) 1703 next->prev = previous; 1704 1705 /* Are we removing the last element? */ 1706 if (p == G.page_tails[order]) 1707 G.page_tails[order] = previous; 1708 free_page (p); 1709 p = previous; 1710 } 1711 1712 /* If the page is full, move it to the end. */ 1713 else if (p->num_free_objects == 0) 1714 { 1715 /* Don't move it if it's already at the end. */ 1716 if (p != G.page_tails[order]) 1717 { 1718 /* Move p to the end of the list. */ 1719 p->next = NULL; 1720 p->prev = G.page_tails[order]; 1721 G.page_tails[order]->next = p; 1722 1723 /* Update the tail pointer... */ 1724 G.page_tails[order] = p; 1725 1726 /* ... and the head pointer, if necessary. */ 1727 if (! previous) 1728 G.pages[order] = next; 1729 else 1730 previous->next = next; 1731 1732 /* And update the backpointer in NEXT if necessary. */ 1733 if (next) 1734 next->prev = previous; 1735 1736 p = previous; 1737 } 1738 } 1739 1740 /* If we've fallen through to here, it's a page in the 1741 topmost context that is neither full nor empty. Such a 1742 page must precede pages at lesser context depth in the 1743 list, so move it to the head. */ 1744 else if (p != G.pages[order]) 1745 { 1746 previous->next = p->next; 1747 1748 /* Update the backchain in the next node if it exists. */ 1749 if (p->next) 1750 p->next->prev = previous; 1751 1752 /* Move P to the head of the list. */ 1753 p->next = G.pages[order]; 1754 p->prev = NULL; 1755 G.pages[order]->prev = p; 1756 1757 /* Update the head pointer. */ 1758 G.pages[order] = p; 1759 1760 /* Are we moving the last element? */ 1761 if (G.page_tails[order] == p) 1762 G.page_tails[order] = previous; 1763 p = previous; 1764 } 1765 1766 previous = p; 1767 p = next; 1768 } 1769 while (! done); 1770 1771 /* Now, restore the in_use_p vectors for any pages from contexts 1772 other than the current one. */ 1773 for (p = G.pages[order]; p; p = p->next) 1774 if (p->context_depth != G.context_depth) 1775 ggc_recalculate_in_use_p (p); 1776 } 1777} 1778 1779#ifdef ENABLE_GC_CHECKING 1780/* Clobber all free objects. */ 1781 1782static void 1783poison_pages (void) 1784{ 1785 unsigned order; 1786 1787 for (order = 2; order < NUM_ORDERS; order++) 1788 { 1789 size_t size = OBJECT_SIZE (order); 1790 page_entry *p; 1791 1792 for (p = G.pages[order]; p != NULL; p = p->next) 1793 { 1794 size_t num_objects; 1795 size_t i; 1796 1797 if (p->context_depth != G.context_depth) 1798 /* Since we don't do any collection for pages in pushed 1799 contexts, there's no need to do any poisoning. And 1800 besides, the IN_USE_P array isn't valid until we pop 1801 contexts. */ 1802 continue; 1803 1804 num_objects = OBJECTS_IN_PAGE (p); 1805 for (i = 0; i < num_objects; i++) 1806 { 1807 size_t word, bit; 1808 word = i / HOST_BITS_PER_LONG; 1809 bit = i % HOST_BITS_PER_LONG; 1810 if (((p->in_use_p[word] >> bit) & 1) == 0) 1811 { 1812 char *object = p->page + i * size; 1813 1814 /* Keep poison-by-write when we expect to use Valgrind, 1815 so the exact same memory semantics is kept, in case 1816 there are memory errors. We override this request 1817 below. */ 1818 VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size)); 1819 memset (object, 0xa5, size); 1820 1821 /* Drop the handle to avoid handle leak. */ 1822 VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size)); 1823 } 1824 } 1825 } 1826 } 1827} 1828#else 1829#define poison_pages() 1830#endif 1831 1832#ifdef ENABLE_GC_ALWAYS_COLLECT 1833/* Validate that the reportedly free objects actually are. */ 1834 1835static void 1836validate_free_objects (void) 1837{ 1838 struct free_object *f, *next, *still_free = NULL; 1839 1840 for (f = G.free_object_list; f ; f = next) 1841 { 1842 page_entry *pe = lookup_page_table_entry (f->object); 1843 size_t bit, word; 1844 1845 bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order); 1846 word = bit / HOST_BITS_PER_LONG; 1847 bit = bit % HOST_BITS_PER_LONG; 1848 next = f->next; 1849 1850 /* Make certain it isn't visible from any root. Notice that we 1851 do this check before sweep_pages merges save_in_use_p. */ 1852 gcc_assert (!(pe->in_use_p[word] & (1UL << bit))); 1853 1854 /* If the object comes from an outer context, then retain the 1855 free_object entry, so that we can verify that the address 1856 isn't live on the stack in some outer context. */ 1857 if (pe->context_depth != G.context_depth) 1858 { 1859 f->next = still_free; 1860 still_free = f; 1861 } 1862 else 1863 free (f); 1864 } 1865 1866 G.free_object_list = still_free; 1867} 1868#else 1869#define validate_free_objects() 1870#endif 1871 1872/* Top level mark-and-sweep routine. */ 1873 1874void 1875ggc_collect (void) 1876{ 1877 /* Avoid frequent unnecessary work by skipping collection if the 1878 total allocations haven't expanded much since the last 1879 collection. */ 1880 float allocated_last_gc = 1881 MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024); 1882 1883 float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100; 1884 1885 if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect) 1886 return; 1887 1888 timevar_push (TV_GC); 1889 if (!quiet_flag) 1890 fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024); 1891 if (GGC_DEBUG_LEVEL >= 2) 1892 fprintf (G.debug_file, "BEGIN COLLECTING\n"); 1893 1894 /* Zero the total allocated bytes. This will be recalculated in the 1895 sweep phase. */ 1896 G.allocated = 0; 1897 1898 /* Release the pages we freed the last time we collected, but didn't 1899 reuse in the interim. */ 1900 release_pages (); 1901 1902 /* Indicate that we've seen collections at this context depth. */ 1903 G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1; 1904 1905 clear_marks (); 1906 ggc_mark_roots (); 1907#ifdef GATHER_STATISTICS 1908 ggc_prune_overhead_list (); 1909#endif 1910 poison_pages (); 1911 validate_free_objects (); 1912 sweep_pages (); 1913 1914 G.allocated_last_gc = G.allocated; 1915 1916 timevar_pop (TV_GC); 1917 1918 if (!quiet_flag) 1919 fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024); 1920 if (GGC_DEBUG_LEVEL >= 2) 1921 fprintf (G.debug_file, "END COLLECTING\n"); 1922} 1923 1924/* Print allocation statistics. */ 1925#define SCALE(x) ((unsigned long) ((x) < 1024*10 \ 1926 ? (x) \ 1927 : ((x) < 1024*1024*10 \ 1928 ? (x) / 1024 \ 1929 : (x) / (1024*1024)))) 1930#define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M')) 1931 1932void 1933ggc_print_statistics (void) 1934{ 1935 struct ggc_statistics stats; 1936 unsigned int i; 1937 size_t total_overhead = 0; 1938 1939 /* Clear the statistics. */ 1940 memset (&stats, 0, sizeof (stats)); 1941 1942 /* Make sure collection will really occur. */ 1943 G.allocated_last_gc = 0; 1944 1945 /* Collect and print the statistics common across collectors. */ 1946 ggc_print_common_statistics (stderr, &stats); 1947 1948 /* Release free pages so that we will not count the bytes allocated 1949 there as part of the total allocated memory. */ 1950 release_pages (); 1951 1952 /* Collect some information about the various sizes of 1953 allocation. */ 1954 fprintf (stderr, 1955 "Memory still allocated at the end of the compilation process\n"); 1956 fprintf (stderr, "%-5s %10s %10s %10s\n", 1957 "Size", "Allocated", "Used", "Overhead"); 1958 for (i = 0; i < NUM_ORDERS; ++i) 1959 { 1960 page_entry *p; 1961 size_t allocated; 1962 size_t in_use; 1963 size_t overhead; 1964 1965 /* Skip empty entries. */ 1966 if (!G.pages[i]) 1967 continue; 1968 1969 overhead = allocated = in_use = 0; 1970 1971 /* Figure out the total number of bytes allocated for objects of 1972 this size, and how many of them are actually in use. Also figure 1973 out how much memory the page table is using. */ 1974 for (p = G.pages[i]; p; p = p->next) 1975 { 1976 allocated += p->bytes; 1977 in_use += 1978 (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i); 1979 1980 overhead += (sizeof (page_entry) - sizeof (long) 1981 + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1)); 1982 } 1983 fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n", 1984 (unsigned long) OBJECT_SIZE (i), 1985 SCALE (allocated), STAT_LABEL (allocated), 1986 SCALE (in_use), STAT_LABEL (in_use), 1987 SCALE (overhead), STAT_LABEL (overhead)); 1988 total_overhead += overhead; 1989 } 1990 fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total", 1991 SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped), 1992 SCALE (G.allocated), STAT_LABEL(G.allocated), 1993 SCALE (total_overhead), STAT_LABEL (total_overhead)); 1994 1995#ifdef GATHER_STATISTICS 1996 { 1997 fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n"); 1998 1999 fprintf (stderr, "Total Overhead: %10lld\n", 2000 G.stats.total_overhead); 2001 fprintf (stderr, "Total Allocated: %10lld\n", 2002 G.stats.total_allocated); 2003 2004 fprintf (stderr, "Total Overhead under 32B: %10lld\n", 2005 G.stats.total_overhead_under32); 2006 fprintf (stderr, "Total Allocated under 32B: %10lld\n", 2007 G.stats.total_allocated_under32); 2008 fprintf (stderr, "Total Overhead under 64B: %10lld\n", 2009 G.stats.total_overhead_under64); 2010 fprintf (stderr, "Total Allocated under 64B: %10lld\n", 2011 G.stats.total_allocated_under64); 2012 fprintf (stderr, "Total Overhead under 128B: %10lld\n", 2013 G.stats.total_overhead_under128); 2014 fprintf (stderr, "Total Allocated under 128B: %10lld\n", 2015 G.stats.total_allocated_under128); 2016 2017 for (i = 0; i < NUM_ORDERS; i++) 2018 if (G.stats.total_allocated_per_order[i]) 2019 { 2020 fprintf (stderr, "Total Overhead page size %7d: %10lld\n", 2021 OBJECT_SIZE (i), G.stats.total_overhead_per_order[i]); 2022 fprintf (stderr, "Total Allocated page size %7d: %10lld\n", 2023 OBJECT_SIZE (i), G.stats.total_allocated_per_order[i]); 2024 } 2025 } 2026#endif 2027} 2028 2029struct ggc_pch_data 2030{ 2031 struct ggc_pch_ondisk 2032 { 2033 unsigned totals[NUM_ORDERS]; 2034 } d; 2035 size_t base[NUM_ORDERS]; 2036 size_t written[NUM_ORDERS]; 2037}; 2038 2039struct ggc_pch_data * 2040init_ggc_pch (void) 2041{ 2042 return XCNEW (struct ggc_pch_data); 2043} 2044 2045void 2046ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, 2047 size_t size, bool is_string ATTRIBUTE_UNUSED, 2048 enum gt_types_enum type ATTRIBUTE_UNUSED) 2049{ 2050 unsigned order; 2051 2052 if (size < NUM_SIZE_LOOKUP) 2053 order = size_lookup[size]; 2054 else 2055 { 2056 order = 10; 2057 while (size > OBJECT_SIZE (order)) 2058 order++; 2059 } 2060 2061 d->d.totals[order]++; 2062} 2063 2064size_t 2065ggc_pch_total_size (struct ggc_pch_data *d) 2066{ 2067 size_t a = 0; 2068 unsigned i; 2069 2070 for (i = 0; i < NUM_ORDERS; i++) 2071 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); 2072 return a; 2073} 2074 2075void 2076ggc_pch_this_base (struct ggc_pch_data *d, void *base) 2077{ 2078 size_t a = (size_t) base; 2079 unsigned i; 2080 2081 for (i = 0; i < NUM_ORDERS; i++) 2082 { 2083 d->base[i] = a; 2084 a += ROUND_UP (d->d.totals[i] * OBJECT_SIZE (i), G.pagesize); 2085 } 2086} 2087 2088 2089char * 2090ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED, 2091 size_t size, bool is_string ATTRIBUTE_UNUSED, 2092 enum gt_types_enum type ATTRIBUTE_UNUSED) 2093{ 2094 unsigned order; 2095 char *result; 2096 2097 if (size < NUM_SIZE_LOOKUP) 2098 order = size_lookup[size]; 2099 else 2100 { 2101 order = 10; 2102 while (size > OBJECT_SIZE (order)) 2103 order++; 2104 } 2105 2106 result = (char *) d->base[order]; 2107 d->base[order] += OBJECT_SIZE (order); 2108 return result; 2109} 2110 2111void 2112ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED, 2113 FILE *f ATTRIBUTE_UNUSED) 2114{ 2115 /* Nothing to do. */ 2116} 2117 2118void 2119ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED, 2120 FILE *f, void *x, void *newx ATTRIBUTE_UNUSED, 2121 size_t size, bool is_string ATTRIBUTE_UNUSED) 2122{ 2123 unsigned order; 2124 static const char emptyBytes[256]; 2125 2126 if (size < NUM_SIZE_LOOKUP) 2127 order = size_lookup[size]; 2128 else 2129 { 2130 order = 10; 2131 while (size > OBJECT_SIZE (order)) 2132 order++; 2133 } 2134 2135 if (fwrite (x, size, 1, f) != 1) 2136 fatal_error ("can't write PCH file: %m"); 2137 2138 /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the 2139 object out to OBJECT_SIZE(order). This happens for strings. */ 2140 2141 if (size != OBJECT_SIZE (order)) 2142 { 2143 unsigned padding = OBJECT_SIZE(order) - size; 2144 2145 /* To speed small writes, we use a nulled-out array that's larger 2146 than most padding requests as the source for our null bytes. This 2147 permits us to do the padding with fwrite() rather than fseek(), and 2148 limits the chance the OS may try to flush any outstanding writes. */ 2149 if (padding <= sizeof(emptyBytes)) 2150 { 2151 if (fwrite (emptyBytes, 1, padding, f) != padding) 2152 fatal_error ("can't write PCH file"); 2153 } 2154 else 2155 { 2156 /* Larger than our buffer? Just default to fseek. */ 2157 if (fseek (f, padding, SEEK_CUR) != 0) 2158 fatal_error ("can't write PCH file"); 2159 } 2160 } 2161 2162 d->written[order]++; 2163 if (d->written[order] == d->d.totals[order] 2164 && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order), 2165 G.pagesize), 2166 SEEK_CUR) != 0) 2167 fatal_error ("can't write PCH file: %m"); 2168} 2169 2170void 2171ggc_pch_finish (struct ggc_pch_data *d, FILE *f) 2172{ 2173 if (fwrite (&d->d, sizeof (d->d), 1, f) != 1) 2174 fatal_error ("can't write PCH file: %m"); 2175 free (d); 2176} 2177 2178/* Move the PCH PTE entries just added to the end of by_depth, to the 2179 front. */ 2180 2181static void 2182move_ptes_to_front (int count_old_page_tables, int count_new_page_tables) 2183{ 2184 unsigned i; 2185 2186 /* First, we swap the new entries to the front of the varrays. */ 2187 page_entry **new_by_depth; 2188 unsigned long **new_save_in_use; 2189 2190 new_by_depth = XNEWVEC (page_entry *, G.by_depth_max); 2191 new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max); 2192 2193 memcpy (&new_by_depth[0], 2194 &G.by_depth[count_old_page_tables], 2195 count_new_page_tables * sizeof (void *)); 2196 memcpy (&new_by_depth[count_new_page_tables], 2197 &G.by_depth[0], 2198 count_old_page_tables * sizeof (void *)); 2199 memcpy (&new_save_in_use[0], 2200 &G.save_in_use[count_old_page_tables], 2201 count_new_page_tables * sizeof (void *)); 2202 memcpy (&new_save_in_use[count_new_page_tables], 2203 &G.save_in_use[0], 2204 count_old_page_tables * sizeof (void *)); 2205 2206 free (G.by_depth); 2207 free (G.save_in_use); 2208 2209 G.by_depth = new_by_depth; 2210 G.save_in_use = new_save_in_use; 2211 2212 /* Now update all the index_by_depth fields. */ 2213 for (i = G.by_depth_in_use; i > 0; --i) 2214 { 2215 page_entry *p = G.by_depth[i-1]; 2216 p->index_by_depth = i-1; 2217 } 2218 2219 /* And last, we update the depth pointers in G.depth. The first 2220 entry is already 0, and context 0 entries always start at index 2221 0, so there is nothing to update in the first slot. We need a 2222 second slot, only if we have old ptes, and if we do, they start 2223 at index count_new_page_tables. */ 2224 if (count_old_page_tables) 2225 push_depth (count_new_page_tables); 2226} 2227 2228void 2229ggc_pch_read (FILE *f, void *addr) 2230{ 2231 struct ggc_pch_ondisk d; 2232 unsigned i; 2233 char *offs = addr; 2234 unsigned long count_old_page_tables; 2235 unsigned long count_new_page_tables; 2236 2237 count_old_page_tables = G.by_depth_in_use; 2238 2239 /* We've just read in a PCH file. So, every object that used to be 2240 allocated is now free. */ 2241 clear_marks (); 2242#ifdef ENABLE_GC_CHECKING 2243 poison_pages (); 2244#endif 2245 2246 /* No object read from a PCH file should ever be freed. So, set the 2247 context depth to 1, and set the depth of all the currently-allocated 2248 pages to be 1 too. PCH pages will have depth 0. */ 2249 gcc_assert (!G.context_depth); 2250 G.context_depth = 1; 2251 for (i = 0; i < NUM_ORDERS; i++) 2252 { 2253 page_entry *p; 2254 for (p = G.pages[i]; p != NULL; p = p->next) 2255 p->context_depth = G.context_depth; 2256 } 2257 2258 /* Allocate the appropriate page-table entries for the pages read from 2259 the PCH file. */ 2260 if (fread (&d, sizeof (d), 1, f) != 1) 2261 fatal_error ("can't read PCH file: %m"); 2262 2263 for (i = 0; i < NUM_ORDERS; i++) 2264 { 2265 struct page_entry *entry; 2266 char *pte; 2267 size_t bytes; 2268 size_t num_objs; 2269 size_t j; 2270 2271 if (d.totals[i] == 0) 2272 continue; 2273 2274 bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize); 2275 num_objs = bytes / OBJECT_SIZE (i); 2276 entry = xcalloc (1, (sizeof (struct page_entry) 2277 - sizeof (long) 2278 + BITMAP_SIZE (num_objs + 1))); 2279 entry->bytes = bytes; 2280 entry->page = offs; 2281 entry->context_depth = 0; 2282 offs += bytes; 2283 entry->num_free_objects = 0; 2284 entry->order = i; 2285 2286 for (j = 0; 2287 j + HOST_BITS_PER_LONG <= num_objs + 1; 2288 j += HOST_BITS_PER_LONG) 2289 entry->in_use_p[j / HOST_BITS_PER_LONG] = -1; 2290 for (; j < num_objs + 1; j++) 2291 entry->in_use_p[j / HOST_BITS_PER_LONG] 2292 |= 1L << (j % HOST_BITS_PER_LONG); 2293 2294 for (pte = entry->page; 2295 pte < entry->page + entry->bytes; 2296 pte += G.pagesize) 2297 set_page_table_entry (pte, entry); 2298 2299 if (G.page_tails[i] != NULL) 2300 G.page_tails[i]->next = entry; 2301 else 2302 G.pages[i] = entry; 2303 G.page_tails[i] = entry; 2304 2305 /* We start off by just adding all the new information to the 2306 end of the varrays, later, we will move the new information 2307 to the front of the varrays, as the PCH page tables are at 2308 context 0. */ 2309 push_by_depth (entry, 0); 2310 } 2311 2312 /* Now, we update the various data structures that speed page table 2313 handling. */ 2314 count_new_page_tables = G.by_depth_in_use - count_old_page_tables; 2315 2316 move_ptes_to_front (count_old_page_tables, count_new_page_tables); 2317 2318 /* Update the statistics. */ 2319 G.allocated = G.allocated_last_gc = offs - (char *)addr; 2320} 2321