ggc-common.c revision 132718
1/* Simple garbage collection for the GNU compiler.
2   Copyright (C) 1999, 2000, 2001, 2002, 2003
3   Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA.  */
21
22/* Generic garbage collection (GC) functions and data, not specific to
23   any particular GC implementation.  */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "hashtab.h"
29#include "ggc.h"
30#include "toplev.h"
31#include "params.h"
32#include "hosthooks.h"
33
34#ifdef HAVE_SYS_RESOURCE_H
35# include <sys/resource.h>
36#endif
37
38#ifdef HAVE_MMAP_FILE
39# include <sys/mman.h>
40# ifdef HAVE_MINCORE
41/* This is on Solaris.  */
42#  include <sys/types.h>
43# endif
44#endif
45
46#ifndef MAP_FAILED
47# define MAP_FAILED ((void *)-1)
48#endif
49
50#ifdef ENABLE_VALGRIND_CHECKING
51# ifdef HAVE_VALGRIND_MEMCHECK_H
52#  include <valgrind/memcheck.h>
53# elif defined HAVE_MEMCHECK_H
54#  include <memcheck.h>
55# else
56#  include <valgrind.h>
57# endif
58#else
59/* Avoid #ifdef:s when we can help it.  */
60#define VALGRIND_DISCARD(x)
61#endif
62
63/* Statistics about the allocation.  */
64static ggc_statistics *ggc_stats;
65
66struct traversal_state;
67
68static int ggc_htab_delete (void **, void *);
69static hashval_t saving_htab_hash (const void *);
70static int saving_htab_eq (const void *, const void *);
71static int call_count (void **, void *);
72static int call_alloc (void **, void *);
73static int compare_ptr_data (const void *, const void *);
74static void relocate_ptrs (void *, void *);
75static void write_pch_globals (const struct ggc_root_tab * const *tab,
76			       struct traversal_state *state);
77static double ggc_rlimit_bound (double);
78
79/* Maintain global roots that are preserved during GC.  */
80
81/* Process a slot of an htab by deleting it if it has not been marked.  */
82
83static int
84ggc_htab_delete (void **slot, void *info)
85{
86  const struct ggc_cache_tab *r = (const struct ggc_cache_tab *) info;
87
88  if (! (*r->marked_p) (*slot))
89    htab_clear_slot (*r->base, slot);
90  else
91    (*r->cb) (*slot);
92
93  return 1;
94}
95
96/* Iterate through all registered roots and mark each element.  */
97
98void
99ggc_mark_roots (void)
100{
101  const struct ggc_root_tab *const *rt;
102  const struct ggc_root_tab *rti;
103  const struct ggc_cache_tab *const *ct;
104  const struct ggc_cache_tab *cti;
105  size_t i;
106
107  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
108    for (rti = *rt; rti->base != NULL; rti++)
109      memset (rti->base, 0, rti->stride);
110
111  for (rt = gt_ggc_rtab; *rt; rt++)
112    for (rti = *rt; rti->base != NULL; rti++)
113      for (i = 0; i < rti->nelt; i++)
114	(*rti->cb)(*(void **)((char *)rti->base + rti->stride * i));
115
116  ggc_mark_stringpool ();
117
118  /* Now scan all hash tables that have objects which are to be deleted if
119     they are not already marked.  */
120  for (ct = gt_ggc_cache_rtab; *ct; ct++)
121    for (cti = *ct; cti->base != NULL; cti++)
122      if (*cti->base)
123	{
124	  ggc_set_mark (*cti->base);
125	  htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
126	  ggc_set_mark ((*cti->base)->entries);
127	}
128}
129
130/* Allocate a block of memory, then clear it.  */
131void *
132ggc_alloc_cleared (size_t size)
133{
134  void *buf = ggc_alloc (size);
135  memset (buf, 0, size);
136  return buf;
137}
138
139/* Resize a block of memory, possibly re-allocating it.  */
140void *
141ggc_realloc (void *x, size_t size)
142{
143  void *r;
144  size_t old_size;
145
146  if (x == NULL)
147    return ggc_alloc (size);
148
149  old_size = ggc_get_size (x);
150  if (size <= old_size)
151    {
152      /* Mark the unwanted memory as unaccessible.  We also need to make
153	 the "new" size accessible, since ggc_get_size returns the size of
154	 the pool, not the size of the individually allocated object, the
155	 size which was previously made accessible.  Unfortunately, we
156	 don't know that previously allocated size.  Without that
157	 knowledge we have to lose some initialization-tracking for the
158	 old parts of the object.  An alternative is to mark the whole
159	 old_size as reachable, but that would lose tracking of writes
160	 after the end of the object (by small offsets).  Discard the
161	 handle to avoid handle leak.  */
162      VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
163						old_size - size));
164      VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
165      return x;
166    }
167
168  r = ggc_alloc (size);
169
170  /* Since ggc_get_size returns the size of the pool, not the size of the
171     individually allocated object, we'd access parts of the old object
172     that were marked invalid with the memcpy below.  We lose a bit of the
173     initialization-tracking since some of it may be uninitialized.  */
174  VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
175
176  memcpy (r, x, old_size);
177
178  /* The old object is not supposed to be used anymore.  */
179  VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x, old_size));
180
181  return r;
182}
183
184/* Like ggc_alloc_cleared, but performs a multiplication.  */
185void *
186ggc_calloc (size_t s1, size_t s2)
187{
188  return ggc_alloc_cleared (s1 * s2);
189}
190
191/* These are for splay_tree_new_ggc.  */
192void *
193ggc_splay_alloc (int sz, void *nl)
194{
195  if (nl != NULL)
196    abort ();
197  return ggc_alloc (sz);
198}
199
200void
201ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
202{
203  if (nl != NULL)
204    abort ();
205}
206
207/* Print statistics that are independent of the collector in use.  */
208#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
209		  ? (x) \
210		  : ((x) < 1024*1024*10 \
211		     ? (x) / 1024 \
212		     : (x) / (1024*1024))))
213#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
214
215void
216ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
217			     ggc_statistics *stats)
218{
219  /* Set the pointer so that during collection we will actually gather
220     the statistics.  */
221  ggc_stats = stats;
222
223  /* Then do one collection to fill in the statistics.  */
224  ggc_collect ();
225
226  /* At present, we don't really gather any interesting statistics.  */
227
228  /* Don't gather statistics any more.  */
229  ggc_stats = NULL;
230}
231
232/* Functions for saving and restoring GCable memory to disk.  */
233
234static htab_t saving_htab;
235
236struct ptr_data
237{
238  void *obj;
239  void *note_ptr_cookie;
240  gt_note_pointers note_ptr_fn;
241  gt_handle_reorder reorder_fn;
242  size_t size;
243  void *new_addr;
244};
245
246#define POINTER_HASH(x) (hashval_t)((long)x >> 3)
247
248/* Register an object in the hash table.  */
249
250int
251gt_pch_note_object (void *obj, void *note_ptr_cookie,
252		    gt_note_pointers note_ptr_fn)
253{
254  struct ptr_data **slot;
255
256  if (obj == NULL || obj == (void *) 1)
257    return 0;
258
259  slot = (struct ptr_data **)
260    htab_find_slot_with_hash (saving_htab, obj, POINTER_HASH (obj),
261			      INSERT);
262  if (*slot != NULL)
263    {
264      if ((*slot)->note_ptr_fn != note_ptr_fn
265	  || (*slot)->note_ptr_cookie != note_ptr_cookie)
266	abort ();
267      return 0;
268    }
269
270  *slot = xcalloc (sizeof (struct ptr_data), 1);
271  (*slot)->obj = obj;
272  (*slot)->note_ptr_fn = note_ptr_fn;
273  (*slot)->note_ptr_cookie = note_ptr_cookie;
274  if (note_ptr_fn == gt_pch_p_S)
275    (*slot)->size = strlen (obj) + 1;
276  else
277    (*slot)->size = ggc_get_size (obj);
278  return 1;
279}
280
281/* Register an object in the hash table.  */
282
283void
284gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
285		     gt_handle_reorder reorder_fn)
286{
287  struct ptr_data *data;
288
289  if (obj == NULL || obj == (void *) 1)
290    return;
291
292  data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
293  if (data == NULL
294      || data->note_ptr_cookie != note_ptr_cookie)
295    abort ();
296
297  data->reorder_fn = reorder_fn;
298}
299
300/* Hash and equality functions for saving_htab, callbacks for htab_create.  */
301
302static hashval_t
303saving_htab_hash (const void *p)
304{
305  return POINTER_HASH (((struct ptr_data *)p)->obj);
306}
307
308static int
309saving_htab_eq (const void *p1, const void *p2)
310{
311  return ((struct ptr_data *)p1)->obj == p2;
312}
313
314/* Handy state for the traversal functions.  */
315
316struct traversal_state
317{
318  FILE *f;
319  struct ggc_pch_data *d;
320  size_t count;
321  struct ptr_data **ptrs;
322  size_t ptrs_i;
323};
324
325/* Callbacks for htab_traverse.  */
326
327static int
328call_count (void **slot, void *state_p)
329{
330  struct ptr_data *d = (struct ptr_data *)*slot;
331  struct traversal_state *state = (struct traversal_state *)state_p;
332
333  ggc_pch_count_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
334  state->count++;
335  return 1;
336}
337
338static int
339call_alloc (void **slot, void *state_p)
340{
341  struct ptr_data *d = (struct ptr_data *)*slot;
342  struct traversal_state *state = (struct traversal_state *)state_p;
343
344  d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size, d->note_ptr_fn == gt_pch_p_S);
345  state->ptrs[state->ptrs_i++] = d;
346  return 1;
347}
348
349/* Callback for qsort.  */
350
351static int
352compare_ptr_data (const void *p1_p, const void *p2_p)
353{
354  struct ptr_data *p1 = *(struct ptr_data *const *)p1_p;
355  struct ptr_data *p2 = *(struct ptr_data *const *)p2_p;
356  return (((size_t)p1->new_addr > (size_t)p2->new_addr)
357	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
358}
359
360/* Callbacks for note_ptr_fn.  */
361
362static void
363relocate_ptrs (void *ptr_p, void *state_p)
364{
365  void **ptr = (void **)ptr_p;
366  struct traversal_state *state ATTRIBUTE_UNUSED
367    = (struct traversal_state *)state_p;
368  struct ptr_data *result;
369
370  if (*ptr == NULL || *ptr == (void *)1)
371    return;
372
373  result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
374  if (result == NULL)
375    abort ();
376  *ptr = result->new_addr;
377}
378
379/* Write out, after relocation, the pointers in TAB.  */
380static void
381write_pch_globals (const struct ggc_root_tab * const *tab,
382		   struct traversal_state *state)
383{
384  const struct ggc_root_tab *const *rt;
385  const struct ggc_root_tab *rti;
386  size_t i;
387
388  for (rt = tab; *rt; rt++)
389    for (rti = *rt; rti->base != NULL; rti++)
390      for (i = 0; i < rti->nelt; i++)
391	{
392	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
393	  struct ptr_data *new_ptr;
394	  if (ptr == NULL || ptr == (void *)1)
395	    {
396	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
397		  != 1)
398		fatal_error ("can't write PCH file: %m");
399	    }
400	  else
401	    {
402	      new_ptr = htab_find_with_hash (saving_htab, ptr,
403					     POINTER_HASH (ptr));
404	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
405		  != 1)
406		fatal_error ("can't write PCH file: %m");
407	    }
408	}
409}
410
411/* Hold the information we need to mmap the file back in.  */
412
413struct mmap_info
414{
415  size_t offset;
416  size_t size;
417  void *preferred_base;
418};
419
420/* Write out the state of the compiler to F.  */
421
422void
423gt_pch_save (FILE *f)
424{
425  const struct ggc_root_tab *const *rt;
426  const struct ggc_root_tab *rti;
427  size_t i;
428  struct traversal_state state;
429  char *this_object = NULL;
430  size_t this_object_size = 0;
431  struct mmap_info mmi;
432  size_t page_size = getpagesize();
433
434  gt_pch_save_stringpool ();
435
436  saving_htab = htab_create (50000, saving_htab_hash, saving_htab_eq, free);
437
438  for (rt = gt_ggc_rtab; *rt; rt++)
439    for (rti = *rt; rti->base != NULL; rti++)
440      for (i = 0; i < rti->nelt; i++)
441	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
442
443  for (rt = gt_pch_cache_rtab; *rt; rt++)
444    for (rti = *rt; rti->base != NULL; rti++)
445      for (i = 0; i < rti->nelt; i++)
446	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
447
448  /* Prepare the objects for writing, determine addresses and such.  */
449  state.f = f;
450  state.d = init_ggc_pch();
451  state.count = 0;
452  htab_traverse (saving_htab, call_count, &state);
453
454  mmi.size = ggc_pch_total_size (state.d);
455
456  /* Try to arrange things so that no relocation is necessary, but
457     don't try very hard.  On most platforms, this will always work,
458     and on the rest it's a lot of work to do better.
459     (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
460     HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
461  mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size);
462
463#if HAVE_MMAP_FILE
464  if (mmi.preferred_base == NULL)
465    {
466      mmi.preferred_base = mmap (NULL, mmi.size,
467				 PROT_READ | PROT_WRITE, MAP_PRIVATE,
468				 fileno (state.f), 0);
469      if (mmi.preferred_base == (void *) MAP_FAILED)
470	mmi.preferred_base = NULL;
471      else
472	munmap (mmi.preferred_base, mmi.size);
473    }
474#endif /* HAVE_MMAP_FILE */
475
476  ggc_pch_this_base (state.d, mmi.preferred_base);
477
478  state.ptrs = xmalloc (state.count * sizeof (*state.ptrs));
479  state.ptrs_i = 0;
480  htab_traverse (saving_htab, call_alloc, &state);
481  qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
482
483  /* Write out all the scalar variables.  */
484  for (rt = gt_pch_scalar_rtab; *rt; rt++)
485    for (rti = *rt; rti->base != NULL; rti++)
486      if (fwrite (rti->base, rti->stride, 1, f) != 1)
487	fatal_error ("can't write PCH file: %m");
488
489  /* Write out all the global pointers, after translation.  */
490  write_pch_globals (gt_ggc_rtab, &state);
491  write_pch_globals (gt_pch_cache_rtab, &state);
492
493  ggc_pch_prepare_write (state.d, state.f);
494
495  /* Pad the PCH file so that the mmapped area starts on a page boundary.  */
496  {
497    long o;
498    o = ftell (state.f) + sizeof (mmi);
499    if (o == -1)
500      fatal_error ("can't get position in PCH file: %m");
501    mmi.offset = page_size - o % page_size;
502    if (mmi.offset == page_size)
503      mmi.offset = 0;
504    mmi.offset += o;
505  }
506  if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
507    fatal_error ("can't write PCH file: %m");
508  if (mmi.offset != 0
509      && fseek (state.f, mmi.offset, SEEK_SET) != 0)
510    fatal_error ("can't write padding to PCH file: %m");
511
512  /* Actually write out the objects.  */
513  for (i = 0; i < state.count; i++)
514    {
515      if (this_object_size < state.ptrs[i]->size)
516	{
517	  this_object_size = state.ptrs[i]->size;
518	  this_object = xrealloc (this_object, this_object_size);
519	}
520      memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
521      if (state.ptrs[i]->reorder_fn != NULL)
522	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
523				   state.ptrs[i]->note_ptr_cookie,
524				   relocate_ptrs, &state);
525      state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
526				  state.ptrs[i]->note_ptr_cookie,
527				  relocate_ptrs, &state);
528      ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
529			    state.ptrs[i]->new_addr, state.ptrs[i]->size, state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
530      if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
531	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
532    }
533  ggc_pch_finish (state.d, state.f);
534  gt_pch_fixup_stringpool ();
535
536  free (state.ptrs);
537  htab_delete (saving_htab);
538}
539
540/* Read the state of the compiler back in from F.  */
541
542void
543gt_pch_restore (FILE *f)
544{
545  const struct ggc_root_tab *const *rt;
546  const struct ggc_root_tab *rti;
547  size_t i;
548  struct mmap_info mmi;
549  void *addr;
550  bool needs_read;
551
552  /* Delete any deletable objects.  This makes ggc_pch_read much
553     faster, as it can be sure that no GCable objects remain other
554     than the ones just read in.  */
555  for (rt = gt_ggc_deletable_rtab; *rt; rt++)
556    for (rti = *rt; rti->base != NULL; rti++)
557      memset (rti->base, 0, rti->stride);
558
559  /* Read in all the scalar variables.  */
560  for (rt = gt_pch_scalar_rtab; *rt; rt++)
561    for (rti = *rt; rti->base != NULL; rti++)
562      if (fread (rti->base, rti->stride, 1, f) != 1)
563	fatal_error ("can't read PCH file: %m");
564
565  /* Read in all the global pointers, in 6 easy loops.  */
566  for (rt = gt_ggc_rtab; *rt; rt++)
567    for (rti = *rt; rti->base != NULL; rti++)
568      for (i = 0; i < rti->nelt; i++)
569	if (fread ((char *)rti->base + rti->stride * i,
570		   sizeof (void *), 1, f) != 1)
571	  fatal_error ("can't read PCH file: %m");
572
573  for (rt = gt_pch_cache_rtab; *rt; rt++)
574    for (rti = *rt; rti->base != NULL; rti++)
575      for (i = 0; i < rti->nelt; i++)
576	if (fread ((char *)rti->base + rti->stride * i,
577		   sizeof (void *), 1, f) != 1)
578	  fatal_error ("can't read PCH file: %m");
579
580  if (fread (&mmi, sizeof (mmi), 1, f) != 1)
581    fatal_error ("can't read PCH file: %m");
582
583  if (host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size))
584    {
585#if HAVE_MMAP_FILE
586      void *mmap_result;
587
588      mmap_result = mmap (mmi.preferred_base, mmi.size,
589			  PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
590			  fileno (f), mmi.offset);
591
592      /* The file might not be mmap-able.  */
593      needs_read = mmap_result == (void *) MAP_FAILED;
594
595      /* Sanity check for broken MAP_FIXED.  */
596      if (! needs_read && mmap_result != mmi.preferred_base)
597	abort ();
598#else
599      needs_read = true;
600#endif
601      addr = mmi.preferred_base;
602    }
603  else
604    {
605#if HAVE_MMAP_FILE
606      addr = mmap (mmi.preferred_base, mmi.size,
607		   PROT_READ | PROT_WRITE, MAP_PRIVATE,
608		   fileno (f), mmi.offset);
609
610#if HAVE_MINCORE
611      if (addr != mmi.preferred_base)
612	{
613	  size_t page_size = getpagesize();
614	  char one_byte;
615
616	  if (addr != (void *) MAP_FAILED)
617	    munmap (addr, mmi.size);
618
619	  /* We really want to be mapped at mmi.preferred_base
620	     so we're going to resort to MAP_FIXED.  But before,
621	     make sure that we can do so without destroying a
622	     previously mapped area, by looping over all pages
623	     that would be affected by the fixed mapping.  */
624	  errno = 0;
625
626	  for (i = 0; i < mmi.size; i+= page_size)
627	    if (mincore ((char *)mmi.preferred_base + i, page_size,
628			 (void *)&one_byte) == -1
629		&& errno == ENOMEM)
630	      continue; /* The page is not mapped.  */
631	    else
632	      break;
633
634	  if (i >= mmi.size)
635	    addr = mmap (mmi.preferred_base, mmi.size,
636			 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
637			 fileno (f), mmi.offset);
638	}
639#endif /* HAVE_MINCORE */
640
641      needs_read = addr == (void *) MAP_FAILED;
642
643#else /* HAVE_MMAP_FILE */
644      needs_read = true;
645#endif /* HAVE_MMAP_FILE */
646      if (needs_read)
647	addr = xmalloc (mmi.size);
648    }
649
650  if (needs_read)
651    {
652      if (fseek (f, mmi.offset, SEEK_SET) != 0
653	  || fread (&mmi, mmi.size, 1, f) != 1)
654	fatal_error ("can't read PCH file: %m");
655    }
656  else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
657    fatal_error ("can't read PCH file: %m");
658
659  ggc_pch_read (f, addr);
660
661  if (addr != mmi.preferred_base)
662    {
663      for (rt = gt_ggc_rtab; *rt; rt++)
664	for (rti = *rt; rti->base != NULL; rti++)
665	  for (i = 0; i < rti->nelt; i++)
666	    {
667	      char **ptr = (char **)((char *)rti->base + rti->stride * i);
668	      if (*ptr != NULL)
669		*ptr += (size_t)addr - (size_t)mmi.preferred_base;
670	    }
671
672      for (rt = gt_pch_cache_rtab; *rt; rt++)
673	for (rti = *rt; rti->base != NULL; rti++)
674	  for (i = 0; i < rti->nelt; i++)
675	    {
676	      char **ptr = (char **)((char *)rti->base + rti->stride * i);
677	      if (*ptr != NULL)
678		*ptr += (size_t)addr - (size_t)mmi.preferred_base;
679	    }
680
681      sorry ("had to relocate PCH");
682    }
683
684  gt_pch_restore_stringpool ();
685}
686
687/* Modify the bound based on rlimits.  Keep the smallest number found.  */
688static double
689ggc_rlimit_bound (double limit)
690{
691#if defined(HAVE_GETRLIMIT)
692  struct rlimit rlim;
693# ifdef RLIMIT_RSS
694  if (getrlimit (RLIMIT_RSS, &rlim) == 0
695      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
696      && rlim.rlim_cur < limit)
697    limit = rlim.rlim_cur;
698# endif
699# ifdef RLIMIT_DATA
700  if (getrlimit (RLIMIT_DATA, &rlim) == 0
701      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
702      && rlim.rlim_cur < limit)
703    limit = rlim.rlim_cur;
704# endif
705# ifdef RLIMIT_AS
706  if (getrlimit (RLIMIT_AS, &rlim) == 0
707      && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
708      && rlim.rlim_cur < limit)
709    limit = rlim.rlim_cur;
710# endif
711#endif /* HAVE_GETRLIMIT */
712
713  return limit;
714}
715
716/* Heuristic to set a default for GGC_MIN_EXPAND.  */
717int
718ggc_min_expand_heuristic (void)
719{
720  double min_expand = physmem_total();
721
722  /* Adjust for rlimits.  */
723  min_expand = ggc_rlimit_bound (min_expand);
724
725  /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
726     a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
727  min_expand /= 1024*1024*1024;
728  min_expand *= 70;
729  min_expand = MIN (min_expand, 70);
730  min_expand += 30;
731
732  return min_expand;
733}
734
735/* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
736int
737ggc_min_heapsize_heuristic (void)
738{
739  double min_heap_kbytes = physmem_total();
740
741  /* Adjust for rlimits.  */
742  min_heap_kbytes = ggc_rlimit_bound (min_heap_kbytes);
743
744  min_heap_kbytes /= 1024; /* Convert to Kbytes.  */
745
746  /* The heuristic is RAM/8, with a lower bound of 4M and an upper
747     bound of 128M (when RAM >= 1GB).  */
748  min_heap_kbytes /= 8;
749  min_heap_kbytes = MAX (min_heap_kbytes, 4 * 1024);
750  min_heap_kbytes = MIN (min_heap_kbytes, 128 * 1024);
751
752  return min_heap_kbytes;
753}
754
755void
756init_ggc_heuristics (void)
757{
758#if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
759  set_param_value ("ggc-min-expand", ggc_min_expand_heuristic());
760  set_param_value ("ggc-min-heapsize", ggc_min_heapsize_heuristic());
761#endif
762}
763