g1CollectedHeap.hpp revision 11915:3d026957cd98
1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
26#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
27
28#include "gc/g1/evacuationInfo.hpp"
29#include "gc/g1/g1AllocationContext.hpp"
30#include "gc/g1/g1BiasedArray.hpp"
31#include "gc/g1/g1CollectionSet.hpp"
32#include "gc/g1/g1CollectorState.hpp"
33#include "gc/g1/g1ConcurrentMark.hpp"
34#include "gc/g1/g1EdenRegions.hpp"
35#include "gc/g1/g1EvacFailure.hpp"
36#include "gc/g1/g1EvacStats.hpp"
37#include "gc/g1/g1HeapVerifier.hpp"
38#include "gc/g1/g1HRPrinter.hpp"
39#include "gc/g1/g1InCSetState.hpp"
40#include "gc/g1/g1MonitoringSupport.hpp"
41#include "gc/g1/g1SATBCardTableModRefBS.hpp"
42#include "gc/g1/g1SurvivorRegions.hpp"
43#include "gc/g1/g1YCTypes.hpp"
44#include "gc/g1/hSpaceCounters.hpp"
45#include "gc/g1/heapRegionManager.hpp"
46#include "gc/g1/heapRegionSet.hpp"
47#include "gc/shared/barrierSet.hpp"
48#include "gc/shared/collectedHeap.hpp"
49#include "gc/shared/plab.hpp"
50#include "gc/shared/preservedMarks.hpp"
51#include "memory/memRegion.hpp"
52#include "utilities/stack.hpp"
53
54// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
55// It uses the "Garbage First" heap organization and algorithm, which
56// may combine concurrent marking with parallel, incremental compaction of
57// heap subsets that will yield large amounts of garbage.
58
59// Forward declarations
60class HeapRegion;
61class HRRSCleanupTask;
62class GenerationSpec;
63class OopsInHeapRegionClosure;
64class G1ParScanThreadState;
65class G1ParScanThreadStateSet;
66class G1KlassScanClosure;
67class G1ParScanThreadState;
68class ObjectClosure;
69class SpaceClosure;
70class CompactibleSpaceClosure;
71class Space;
72class G1CollectionSet;
73class G1CollectorPolicy;
74class G1Policy;
75class G1HotCardCache;
76class G1RemSet;
77class HeapRegionRemSetIterator;
78class G1ConcurrentMark;
79class ConcurrentMarkThread;
80class ConcurrentG1Refine;
81class GenerationCounters;
82class STWGCTimer;
83class G1NewTracer;
84class EvacuationFailedInfo;
85class nmethod;
86class Ticks;
87class WorkGang;
88class G1Allocator;
89class G1ArchiveAllocator;
90class G1HeapVerifier;
91class G1HeapSizingPolicy;
92
93typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
94typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
95
96typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
97typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
98
99// The G1 STW is alive closure.
100// An instance is embedded into the G1CH and used as the
101// (optional) _is_alive_non_header closure in the STW
102// reference processor. It is also extensively used during
103// reference processing during STW evacuation pauses.
104class G1STWIsAliveClosure: public BoolObjectClosure {
105  G1CollectedHeap* _g1;
106public:
107  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
108  bool do_object_b(oop p);
109};
110
111class RefineCardTableEntryClosure;
112
113class G1RegionMappingChangedListener : public G1MappingChangedListener {
114 private:
115  void reset_from_card_cache(uint start_idx, size_t num_regions);
116 public:
117  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
118};
119
120class G1CollectedHeap : public CollectedHeap {
121  friend class G1FreeCollectionSetTask;
122  friend class VM_CollectForMetadataAllocation;
123  friend class VM_G1CollectForAllocation;
124  friend class VM_G1CollectFull;
125  friend class VM_G1IncCollectionPause;
126  friend class VMStructs;
127  friend class MutatorAllocRegion;
128  friend class G1GCAllocRegion;
129  friend class G1HeapVerifier;
130
131  // Closures used in implementation.
132  friend class G1ParScanThreadState;
133  friend class G1ParScanThreadStateSet;
134  friend class G1ParTask;
135  friend class G1PLABAllocator;
136  friend class G1PrepareCompactClosure;
137
138  // Other related classes.
139  friend class HeapRegionClaimer;
140
141  // Testing classes.
142  friend class G1CheckCSetFastTableClosure;
143
144private:
145  WorkGang* _workers;
146  G1CollectorPolicy* _collector_policy;
147
148  static size_t _humongous_object_threshold_in_words;
149
150  // The secondary free list which contains regions that have been
151  // freed up during the cleanup process. This will be appended to
152  // the master free list when appropriate.
153  FreeRegionList _secondary_free_list;
154
155  // It keeps track of the old regions.
156  HeapRegionSet _old_set;
157
158  // It keeps track of the humongous regions.
159  HeapRegionSet _humongous_set;
160
161  void eagerly_reclaim_humongous_regions();
162
163  // The number of regions we could create by expansion.
164  uint _expansion_regions;
165
166  // The block offset table for the G1 heap.
167  G1BlockOffsetTable* _bot;
168
169  // Tears down the region sets / lists so that they are empty and the
170  // regions on the heap do not belong to a region set / list. The
171  // only exception is the humongous set which we leave unaltered. If
172  // free_list_only is true, it will only tear down the master free
173  // list. It is called before a Full GC (free_list_only == false) or
174  // before heap shrinking (free_list_only == true).
175  void tear_down_region_sets(bool free_list_only);
176
177  // Rebuilds the region sets / lists so that they are repopulated to
178  // reflect the contents of the heap. The only exception is the
179  // humongous set which was not torn down in the first place. If
180  // free_list_only is true, it will only rebuild the master free
181  // list. It is called after a Full GC (free_list_only == false) or
182  // after heap shrinking (free_list_only == true).
183  void rebuild_region_sets(bool free_list_only);
184
185  // Callback for region mapping changed events.
186  G1RegionMappingChangedListener _listener;
187
188  // The sequence of all heap regions in the heap.
189  HeapRegionManager _hrm;
190
191  // Manages all allocations with regions except humongous object allocations.
192  G1Allocator* _allocator;
193
194  // Manages all heap verification.
195  G1HeapVerifier* _verifier;
196
197  // Outside of GC pauses, the number of bytes used in all regions other
198  // than the current allocation region(s).
199  size_t _summary_bytes_used;
200
201  void increase_used(size_t bytes);
202  void decrease_used(size_t bytes);
203
204  void set_used(size_t bytes);
205
206  // Class that handles archive allocation ranges.
207  G1ArchiveAllocator* _archive_allocator;
208
209  // Statistics for each allocation context
210  AllocationContextStats _allocation_context_stats;
211
212  // GC allocation statistics policy for survivors.
213  G1EvacStats _survivor_evac_stats;
214
215  // GC allocation statistics policy for tenured objects.
216  G1EvacStats _old_evac_stats;
217
218  // It specifies whether we should attempt to expand the heap after a
219  // region allocation failure. If heap expansion fails we set this to
220  // false so that we don't re-attempt the heap expansion (it's likely
221  // that subsequent expansion attempts will also fail if one fails).
222  // Currently, it is only consulted during GC and it's reset at the
223  // start of each GC.
224  bool _expand_heap_after_alloc_failure;
225
226  // Helper for monitoring and management support.
227  G1MonitoringSupport* _g1mm;
228
229  // Records whether the region at the given index is (still) a
230  // candidate for eager reclaim.  Only valid for humongous start
231  // regions; other regions have unspecified values.  Humongous start
232  // regions are initialized at start of collection pause, with
233  // candidates removed from the set as they are found reachable from
234  // roots or the young generation.
235  class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
236   protected:
237    bool default_value() const { return false; }
238   public:
239    void clear() { G1BiasedMappedArray<bool>::clear(); }
240    void set_candidate(uint region, bool value) {
241      set_by_index(region, value);
242    }
243    bool is_candidate(uint region) {
244      return get_by_index(region);
245    }
246  };
247
248  HumongousReclaimCandidates _humongous_reclaim_candidates;
249  // Stores whether during humongous object registration we found candidate regions.
250  // If not, we can skip a few steps.
251  bool _has_humongous_reclaim_candidates;
252
253  volatile uint _gc_time_stamp;
254
255  G1HRPrinter _hr_printer;
256
257  // It decides whether an explicit GC should start a concurrent cycle
258  // instead of doing a STW GC. Currently, a concurrent cycle is
259  // explicitly started if:
260  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
261  // (b) cause == _g1_humongous_allocation
262  // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
263  // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
264  // (e) cause == _update_allocation_context_stats_inc
265  // (f) cause == _wb_conc_mark
266  bool should_do_concurrent_full_gc(GCCause::Cause cause);
267
268  // indicates whether we are in young or mixed GC mode
269  G1CollectorState _collector_state;
270
271  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
272  // concurrent cycles) we have started.
273  volatile uint _old_marking_cycles_started;
274
275  // Keeps track of how many "old marking cycles" (i.e., Full GCs or
276  // concurrent cycles) we have completed.
277  volatile uint _old_marking_cycles_completed;
278
279  // This is a non-product method that is helpful for testing. It is
280  // called at the end of a GC and artificially expands the heap by
281  // allocating a number of dead regions. This way we can induce very
282  // frequent marking cycles and stress the cleanup / concurrent
283  // cleanup code more (as all the regions that will be allocated by
284  // this method will be found dead by the marking cycle).
285  void allocate_dummy_regions() PRODUCT_RETURN;
286
287  // Clear RSets after a compaction. It also resets the GC time stamps.
288  void clear_rsets_post_compaction();
289
290  // If the HR printer is active, dump the state of the regions in the
291  // heap after a compaction.
292  void print_hrm_post_compaction();
293
294  // Create a memory mapper for auxiliary data structures of the given size and
295  // translation factor.
296  static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
297                                                         size_t size,
298                                                         size_t translation_factor);
299
300  static G1Policy* create_g1_policy();
301
302  void trace_heap(GCWhen::Type when, const GCTracer* tracer);
303
304  void process_weak_jni_handles();
305
306  // These are macros so that, if the assert fires, we get the correct
307  // line number, file, etc.
308
309#define heap_locking_asserts_params(_extra_message_)                          \
310  "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
311  (_extra_message_),                                                          \
312  BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
313  BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
314  BOOL_TO_STR(Thread::current()->is_VM_thread())
315
316#define assert_heap_locked()                                                  \
317  do {                                                                        \
318    assert(Heap_lock->owned_by_self(),                                        \
319           heap_locking_asserts_params("should be holding the Heap_lock"));   \
320  } while (0)
321
322#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
323  do {                                                                        \
324    assert(Heap_lock->owned_by_self() ||                                      \
325           (SafepointSynchronize::is_at_safepoint() &&                        \
326             ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
327           heap_locking_asserts_params("should be holding the Heap_lock or "  \
328                                        "should be at a safepoint"));         \
329  } while (0)
330
331#define assert_heap_locked_and_not_at_safepoint()                             \
332  do {                                                                        \
333    assert(Heap_lock->owned_by_self() &&                                      \
334                                    !SafepointSynchronize::is_at_safepoint(), \
335          heap_locking_asserts_params("should be holding the Heap_lock and "  \
336                                       "should not be at a safepoint"));      \
337  } while (0)
338
339#define assert_heap_not_locked()                                              \
340  do {                                                                        \
341    assert(!Heap_lock->owned_by_self(),                                       \
342        heap_locking_asserts_params("should not be holding the Heap_lock"));  \
343  } while (0)
344
345#define assert_heap_not_locked_and_not_at_safepoint()                         \
346  do {                                                                        \
347    assert(!Heap_lock->owned_by_self() &&                                     \
348                                    !SafepointSynchronize::is_at_safepoint(), \
349      heap_locking_asserts_params("should not be holding the Heap_lock and "  \
350                                   "should not be at a safepoint"));          \
351  } while (0)
352
353#define assert_at_safepoint(_should_be_vm_thread_)                            \
354  do {                                                                        \
355    assert(SafepointSynchronize::is_at_safepoint() &&                         \
356              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
357           heap_locking_asserts_params("should be at a safepoint"));          \
358  } while (0)
359
360#define assert_not_at_safepoint()                                             \
361  do {                                                                        \
362    assert(!SafepointSynchronize::is_at_safepoint(),                          \
363           heap_locking_asserts_params("should not be at a safepoint"));      \
364  } while (0)
365
366protected:
367
368  // The young region list.
369  G1EdenRegions _eden;
370  G1SurvivorRegions _survivor;
371
372  // The current policy object for the collector.
373  G1Policy* _g1_policy;
374  G1HeapSizingPolicy* _heap_sizing_policy;
375
376  G1CollectionSet _collection_set;
377
378  // This is the second level of trying to allocate a new region. If
379  // new_region() didn't find a region on the free_list, this call will
380  // check whether there's anything available on the
381  // secondary_free_list and/or wait for more regions to appear on
382  // that list, if _free_regions_coming is set.
383  HeapRegion* new_region_try_secondary_free_list(bool is_old);
384
385  // Try to allocate a single non-humongous HeapRegion sufficient for
386  // an allocation of the given word_size. If do_expand is true,
387  // attempt to expand the heap if necessary to satisfy the allocation
388  // request. If the region is to be used as an old region or for a
389  // humongous object, set is_old to true. If not, to false.
390  HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
391
392  // Initialize a contiguous set of free regions of length num_regions
393  // and starting at index first so that they appear as a single
394  // humongous region.
395  HeapWord* humongous_obj_allocate_initialize_regions(uint first,
396                                                      uint num_regions,
397                                                      size_t word_size,
398                                                      AllocationContext_t context);
399
400  // Attempt to allocate a humongous object of the given size. Return
401  // NULL if unsuccessful.
402  HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
403
404  // The following two methods, allocate_new_tlab() and
405  // mem_allocate(), are the two main entry points from the runtime
406  // into the G1's allocation routines. They have the following
407  // assumptions:
408  //
409  // * They should both be called outside safepoints.
410  //
411  // * They should both be called without holding the Heap_lock.
412  //
413  // * All allocation requests for new TLABs should go to
414  //   allocate_new_tlab().
415  //
416  // * All non-TLAB allocation requests should go to mem_allocate().
417  //
418  // * If either call cannot satisfy the allocation request using the
419  //   current allocating region, they will try to get a new one. If
420  //   this fails, they will attempt to do an evacuation pause and
421  //   retry the allocation.
422  //
423  // * If all allocation attempts fail, even after trying to schedule
424  //   an evacuation pause, allocate_new_tlab() will return NULL,
425  //   whereas mem_allocate() will attempt a heap expansion and/or
426  //   schedule a Full GC.
427  //
428  // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
429  //   should never be called with word_size being humongous. All
430  //   humongous allocation requests should go to mem_allocate() which
431  //   will satisfy them with a special path.
432
433  virtual HeapWord* allocate_new_tlab(size_t word_size);
434
435  virtual HeapWord* mem_allocate(size_t word_size,
436                                 bool*  gc_overhead_limit_was_exceeded);
437
438  // The following three methods take a gc_count_before_ret
439  // parameter which is used to return the GC count if the method
440  // returns NULL. Given that we are required to read the GC count
441  // while holding the Heap_lock, and these paths will take the
442  // Heap_lock at some point, it's easier to get them to read the GC
443  // count while holding the Heap_lock before they return NULL instead
444  // of the caller (namely: mem_allocate()) having to also take the
445  // Heap_lock just to read the GC count.
446
447  // First-level mutator allocation attempt: try to allocate out of
448  // the mutator alloc region without taking the Heap_lock. This
449  // should only be used for non-humongous allocations.
450  inline HeapWord* attempt_allocation(size_t word_size,
451                                      uint* gc_count_before_ret,
452                                      uint* gclocker_retry_count_ret);
453
454  // Second-level mutator allocation attempt: take the Heap_lock and
455  // retry the allocation attempt, potentially scheduling a GC
456  // pause. This should only be used for non-humongous allocations.
457  HeapWord* attempt_allocation_slow(size_t word_size,
458                                    AllocationContext_t context,
459                                    uint* gc_count_before_ret,
460                                    uint* gclocker_retry_count_ret);
461
462  // Takes the Heap_lock and attempts a humongous allocation. It can
463  // potentially schedule a GC pause.
464  HeapWord* attempt_allocation_humongous(size_t word_size,
465                                         uint* gc_count_before_ret,
466                                         uint* gclocker_retry_count_ret);
467
468  // Allocation attempt that should be called during safepoints (e.g.,
469  // at the end of a successful GC). expect_null_mutator_alloc_region
470  // specifies whether the mutator alloc region is expected to be NULL
471  // or not.
472  HeapWord* attempt_allocation_at_safepoint(size_t word_size,
473                                            AllocationContext_t context,
474                                            bool expect_null_mutator_alloc_region);
475
476  // These methods are the "callbacks" from the G1AllocRegion class.
477
478  // For mutator alloc regions.
479  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
480  void retire_mutator_alloc_region(HeapRegion* alloc_region,
481                                   size_t allocated_bytes);
482
483  // For GC alloc regions.
484  bool has_more_regions(InCSetState dest);
485  HeapRegion* new_gc_alloc_region(size_t word_size, InCSetState dest);
486  void retire_gc_alloc_region(HeapRegion* alloc_region,
487                              size_t allocated_bytes, InCSetState dest);
488
489  // - if explicit_gc is true, the GC is for a System.gc() etc,
490  //   otherwise it's for a failed allocation.
491  // - if clear_all_soft_refs is true, all soft references should be
492  //   cleared during the GC.
493  // - it returns false if it is unable to do the collection due to the
494  //   GC locker being active, true otherwise.
495  bool do_full_collection(bool explicit_gc,
496                          bool clear_all_soft_refs);
497
498  // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
499  virtual void do_full_collection(bool clear_all_soft_refs);
500
501  // Resize the heap if necessary after a full collection.
502  void resize_if_necessary_after_full_collection();
503
504  // Callback from VM_G1CollectForAllocation operation.
505  // This function does everything necessary/possible to satisfy a
506  // failed allocation request (including collection, expansion, etc.)
507  HeapWord* satisfy_failed_allocation(size_t word_size,
508                                      AllocationContext_t context,
509                                      bool* succeeded);
510private:
511  // Helper method for satisfy_failed_allocation()
512  HeapWord* satisfy_failed_allocation_helper(size_t word_size,
513                                             AllocationContext_t context,
514                                             bool do_gc,
515                                             bool clear_all_soft_refs,
516                                             bool expect_null_mutator_alloc_region,
517                                             bool* gc_succeeded);
518
519protected:
520  // Attempting to expand the heap sufficiently
521  // to support an allocation of the given "word_size".  If
522  // successful, perform the allocation and return the address of the
523  // allocated block, or else "NULL".
524  HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
525
526  // Preserve any referents discovered by concurrent marking that have not yet been
527  // copied by the STW pause.
528  void preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states);
529  // Process any reference objects discovered during
530  // an incremental evacuation pause.
531  void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
532
533  // Enqueue any remaining discovered references
534  // after processing.
535  void enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states);
536
537  // Merges the information gathered on a per-thread basis for all worker threads
538  // during GC into global variables.
539  void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
540public:
541  WorkGang* workers() const { return _workers; }
542
543  G1Allocator* allocator() {
544    return _allocator;
545  }
546
547  G1HeapVerifier* verifier() {
548    return _verifier;
549  }
550
551  G1MonitoringSupport* g1mm() {
552    assert(_g1mm != NULL, "should have been initialized");
553    return _g1mm;
554  }
555
556  // Expand the garbage-first heap by at least the given size (in bytes!).
557  // Returns true if the heap was expanded by the requested amount;
558  // false otherwise.
559  // (Rounds up to a HeapRegion boundary.)
560  bool expand(size_t expand_bytes, double* expand_time_ms = NULL);
561
562  // Returns the PLAB statistics for a given destination.
563  inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
564
565  // Determines PLAB size for a given destination.
566  inline size_t desired_plab_sz(InCSetState dest);
567
568  inline AllocationContextStats& allocation_context_stats();
569
570  // Do anything common to GC's.
571  void gc_prologue(bool full);
572  void gc_epilogue(bool full);
573
574  // Modify the reclaim candidate set and test for presence.
575  // These are only valid for starts_humongous regions.
576  inline void set_humongous_reclaim_candidate(uint region, bool value);
577  inline bool is_humongous_reclaim_candidate(uint region);
578
579  // Remove from the reclaim candidate set.  Also remove from the
580  // collection set so that later encounters avoid the slow path.
581  inline void set_humongous_is_live(oop obj);
582
583  // Register the given region to be part of the collection set.
584  inline void register_humongous_region_with_cset(uint index);
585  // Register regions with humongous objects (actually on the start region) in
586  // the in_cset_fast_test table.
587  void register_humongous_regions_with_cset();
588  // We register a region with the fast "in collection set" test. We
589  // simply set to true the array slot corresponding to this region.
590  void register_young_region_with_cset(HeapRegion* r) {
591    _in_cset_fast_test.set_in_young(r->hrm_index());
592  }
593  void register_old_region_with_cset(HeapRegion* r) {
594    _in_cset_fast_test.set_in_old(r->hrm_index());
595  }
596  inline void register_ext_region_with_cset(HeapRegion* r) {
597    _in_cset_fast_test.set_ext(r->hrm_index());
598  }
599  void clear_in_cset(const HeapRegion* hr) {
600    _in_cset_fast_test.clear(hr);
601  }
602
603  void clear_cset_fast_test() {
604    _in_cset_fast_test.clear();
605  }
606
607  bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
608
609  // This is called at the start of either a concurrent cycle or a Full
610  // GC to update the number of old marking cycles started.
611  void increment_old_marking_cycles_started();
612
613  // This is called at the end of either a concurrent cycle or a Full
614  // GC to update the number of old marking cycles completed. Those two
615  // can happen in a nested fashion, i.e., we start a concurrent
616  // cycle, a Full GC happens half-way through it which ends first,
617  // and then the cycle notices that a Full GC happened and ends
618  // too. The concurrent parameter is a boolean to help us do a bit
619  // tighter consistency checking in the method. If concurrent is
620  // false, the caller is the inner caller in the nesting (i.e., the
621  // Full GC). If concurrent is true, the caller is the outer caller
622  // in this nesting (i.e., the concurrent cycle). Further nesting is
623  // not currently supported. The end of this call also notifies
624  // the FullGCCount_lock in case a Java thread is waiting for a full
625  // GC to happen (e.g., it called System.gc() with
626  // +ExplicitGCInvokesConcurrent).
627  void increment_old_marking_cycles_completed(bool concurrent);
628
629  uint old_marking_cycles_completed() {
630    return _old_marking_cycles_completed;
631  }
632
633  G1HRPrinter* hr_printer() { return &_hr_printer; }
634
635  // Allocates a new heap region instance.
636  HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
637
638  // Allocate the highest free region in the reserved heap. This will commit
639  // regions as necessary.
640  HeapRegion* alloc_highest_free_region();
641
642  // Frees a non-humongous region by initializing its contents and
643  // adding it to the free list that's passed as a parameter (this is
644  // usually a local list which will be appended to the master free
645  // list later). The used bytes of freed regions are accumulated in
646  // pre_used. If skip_remset is true, the region's RSet will not be freed
647  // up. If skip_hot_card_cache is true, the region's hot card cache will not
648  // be freed up. The assumption is that this will be done later.
649  // The locked parameter indicates if the caller has already taken
650  // care of proper synchronization. This may allow some optimizations.
651  void free_region(HeapRegion* hr,
652                   FreeRegionList* free_list,
653                   bool skip_remset,
654                   bool skip_hot_card_cache = false,
655                   bool locked = false);
656
657  // It dirties the cards that cover the block so that the post
658  // write barrier never queues anything when updating objects on this
659  // block. It is assumed (and in fact we assert) that the block
660  // belongs to a young region.
661  inline void dirty_young_block(HeapWord* start, size_t word_size);
662
663  // Frees a humongous region by collapsing it into individual regions
664  // and calling free_region() for each of them. The freed regions
665  // will be added to the free list that's passed as a parameter (this
666  // is usually a local list which will be appended to the master free
667  // list later). The used bytes of freed regions are accumulated in
668  // pre_used. If skip_remset is true, the region's RSet will not be freed
669  // up. The assumption is that this will be done later.
670  void free_humongous_region(HeapRegion* hr,
671                             FreeRegionList* free_list,
672                             bool skip_remset);
673
674  // Facility for allocating in 'archive' regions in high heap memory and
675  // recording the allocated ranges. These should all be called from the
676  // VM thread at safepoints, without the heap lock held. They can be used
677  // to create and archive a set of heap regions which can be mapped at the
678  // same fixed addresses in a subsequent JVM invocation.
679  void begin_archive_alloc_range();
680
681  // Check if the requested size would be too large for an archive allocation.
682  bool is_archive_alloc_too_large(size_t word_size);
683
684  // Allocate memory of the requested size from the archive region. This will
685  // return NULL if the size is too large or if no memory is available. It
686  // does not trigger a garbage collection.
687  HeapWord* archive_mem_allocate(size_t word_size);
688
689  // Optionally aligns the end address and returns the allocated ranges in
690  // an array of MemRegions in order of ascending addresses.
691  void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
692                               size_t end_alignment_in_bytes = 0);
693
694  // Facility for allocating a fixed range within the heap and marking
695  // the containing regions as 'archive'. For use at JVM init time, when the
696  // caller may mmap archived heap data at the specified range(s).
697  // Verify that the MemRegions specified in the argument array are within the
698  // reserved heap.
699  bool check_archive_addresses(MemRegion* range, size_t count);
700
701  // Commit the appropriate G1 regions containing the specified MemRegions
702  // and mark them as 'archive' regions. The regions in the array must be
703  // non-overlapping and in order of ascending address.
704  bool alloc_archive_regions(MemRegion* range, size_t count);
705
706  // Insert any required filler objects in the G1 regions around the specified
707  // ranges to make the regions parseable. This must be called after
708  // alloc_archive_regions, and after class loading has occurred.
709  void fill_archive_regions(MemRegion* range, size_t count);
710
711  // For each of the specified MemRegions, uncommit the containing G1 regions
712  // which had been allocated by alloc_archive_regions. This should be called
713  // rather than fill_archive_regions at JVM init time if the archive file
714  // mapping failed, with the same non-overlapping and sorted MemRegion array.
715  void dealloc_archive_regions(MemRegion* range, size_t count);
716
717protected:
718
719  // Shrink the garbage-first heap by at most the given size (in bytes!).
720  // (Rounds down to a HeapRegion boundary.)
721  virtual void shrink(size_t expand_bytes);
722  void shrink_helper(size_t expand_bytes);
723
724  #if TASKQUEUE_STATS
725  static void print_taskqueue_stats_hdr(outputStream* const st);
726  void print_taskqueue_stats() const;
727  void reset_taskqueue_stats();
728  #endif // TASKQUEUE_STATS
729
730  // Schedule the VM operation that will do an evacuation pause to
731  // satisfy an allocation request of word_size. *succeeded will
732  // return whether the VM operation was successful (it did do an
733  // evacuation pause) or not (another thread beat us to it or the GC
734  // locker was active). Given that we should not be holding the
735  // Heap_lock when we enter this method, we will pass the
736  // gc_count_before (i.e., total_collections()) as a parameter since
737  // it has to be read while holding the Heap_lock. Currently, both
738  // methods that call do_collection_pause() release the Heap_lock
739  // before the call, so it's easy to read gc_count_before just before.
740  HeapWord* do_collection_pause(size_t         word_size,
741                                uint           gc_count_before,
742                                bool*          succeeded,
743                                GCCause::Cause gc_cause);
744
745  void wait_for_root_region_scanning();
746
747  // The guts of the incremental collection pause, executed by the vm
748  // thread. It returns false if it is unable to do the collection due
749  // to the GC locker being active, true otherwise
750  bool do_collection_pause_at_safepoint(double target_pause_time_ms);
751
752  // Actually do the work of evacuating the collection set.
753  virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
754
755  void pre_evacuate_collection_set();
756  void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
757
758  // Print the header for the per-thread termination statistics.
759  static void print_termination_stats_hdr();
760  // Print actual per-thread termination statistics.
761  void print_termination_stats(uint worker_id,
762                               double elapsed_ms,
763                               double strong_roots_ms,
764                               double term_ms,
765                               size_t term_attempts,
766                               size_t alloc_buffer_waste,
767                               size_t undo_waste) const;
768  // Update object copying statistics.
769  void record_obj_copy_mem_stats();
770
771  // The hot card cache for remembered set insertion optimization.
772  G1HotCardCache* _hot_card_cache;
773
774  // The g1 remembered set of the heap.
775  G1RemSet* _g1_rem_set;
776
777  // A set of cards that cover the objects for which the Rsets should be updated
778  // concurrently after the collection.
779  DirtyCardQueueSet _dirty_card_queue_set;
780
781  // The closure used to refine a single card.
782  RefineCardTableEntryClosure* _refine_cte_cl;
783
784  // After a collection pause, convert the regions in the collection set into free
785  // regions.
786  void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
787
788  // Abandon the current collection set without recording policy
789  // statistics or updating free lists.
790  void abandon_collection_set(G1CollectionSet* collection_set);
791
792  // The concurrent marker (and the thread it runs in.)
793  G1ConcurrentMark* _cm;
794  ConcurrentMarkThread* _cmThread;
795
796  // The concurrent refiner.
797  ConcurrentG1Refine* _cg1r;
798
799  // The parallel task queues
800  RefToScanQueueSet *_task_queues;
801
802  // True iff a evacuation has failed in the current collection.
803  bool _evacuation_failed;
804
805  EvacuationFailedInfo* _evacuation_failed_info_array;
806
807  // Failed evacuations cause some logical from-space objects to have
808  // forwarding pointers to themselves.  Reset them.
809  void remove_self_forwarding_pointers();
810
811  // Restore the objects in the regions in the collection set after an
812  // evacuation failure.
813  void restore_after_evac_failure();
814
815  PreservedMarksSet _preserved_marks_set;
816
817  // Preserve the mark of "obj", if necessary, in preparation for its mark
818  // word being overwritten with a self-forwarding-pointer.
819  void preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m);
820
821#ifndef PRODUCT
822  // Support for forcing evacuation failures. Analogous to
823  // PromotionFailureALot for the other collectors.
824
825  // Records whether G1EvacuationFailureALot should be in effect
826  // for the current GC
827  bool _evacuation_failure_alot_for_current_gc;
828
829  // Used to record the GC number for interval checking when
830  // determining whether G1EvaucationFailureALot is in effect
831  // for the current GC.
832  size_t _evacuation_failure_alot_gc_number;
833
834  // Count of the number of evacuations between failures.
835  volatile size_t _evacuation_failure_alot_count;
836
837  // Set whether G1EvacuationFailureALot should be in effect
838  // for the current GC (based upon the type of GC and which
839  // command line flags are set);
840  inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
841                                                  bool during_initial_mark,
842                                                  bool during_marking);
843
844  inline void set_evacuation_failure_alot_for_current_gc();
845
846  // Return true if it's time to cause an evacuation failure.
847  inline bool evacuation_should_fail();
848
849  // Reset the G1EvacuationFailureALot counters.  Should be called at
850  // the end of an evacuation pause in which an evacuation failure occurred.
851  inline void reset_evacuation_should_fail();
852#endif // !PRODUCT
853
854  // ("Weak") Reference processing support.
855  //
856  // G1 has 2 instances of the reference processor class. One
857  // (_ref_processor_cm) handles reference object discovery
858  // and subsequent processing during concurrent marking cycles.
859  //
860  // The other (_ref_processor_stw) handles reference object
861  // discovery and processing during full GCs and incremental
862  // evacuation pauses.
863  //
864  // During an incremental pause, reference discovery will be
865  // temporarily disabled for _ref_processor_cm and will be
866  // enabled for _ref_processor_stw. At the end of the evacuation
867  // pause references discovered by _ref_processor_stw will be
868  // processed and discovery will be disabled. The previous
869  // setting for reference object discovery for _ref_processor_cm
870  // will be re-instated.
871  //
872  // At the start of marking:
873  //  * Discovery by the CM ref processor is verified to be inactive
874  //    and it's discovered lists are empty.
875  //  * Discovery by the CM ref processor is then enabled.
876  //
877  // At the end of marking:
878  //  * Any references on the CM ref processor's discovered
879  //    lists are processed (possibly MT).
880  //
881  // At the start of full GC we:
882  //  * Disable discovery by the CM ref processor and
883  //    empty CM ref processor's discovered lists
884  //    (without processing any entries).
885  //  * Verify that the STW ref processor is inactive and it's
886  //    discovered lists are empty.
887  //  * Temporarily set STW ref processor discovery as single threaded.
888  //  * Temporarily clear the STW ref processor's _is_alive_non_header
889  //    field.
890  //  * Finally enable discovery by the STW ref processor.
891  //
892  // The STW ref processor is used to record any discovered
893  // references during the full GC.
894  //
895  // At the end of a full GC we:
896  //  * Enqueue any reference objects discovered by the STW ref processor
897  //    that have non-live referents. This has the side-effect of
898  //    making the STW ref processor inactive by disabling discovery.
899  //  * Verify that the CM ref processor is still inactive
900  //    and no references have been placed on it's discovered
901  //    lists (also checked as a precondition during initial marking).
902
903  // The (stw) reference processor...
904  ReferenceProcessor* _ref_processor_stw;
905
906  STWGCTimer* _gc_timer_stw;
907
908  G1NewTracer* _gc_tracer_stw;
909
910  // During reference object discovery, the _is_alive_non_header
911  // closure (if non-null) is applied to the referent object to
912  // determine whether the referent is live. If so then the
913  // reference object does not need to be 'discovered' and can
914  // be treated as a regular oop. This has the benefit of reducing
915  // the number of 'discovered' reference objects that need to
916  // be processed.
917  //
918  // Instance of the is_alive closure for embedding into the
919  // STW reference processor as the _is_alive_non_header field.
920  // Supplying a value for the _is_alive_non_header field is
921  // optional but doing so prevents unnecessary additions to
922  // the discovered lists during reference discovery.
923  G1STWIsAliveClosure _is_alive_closure_stw;
924
925  // The (concurrent marking) reference processor...
926  ReferenceProcessor* _ref_processor_cm;
927
928  // Instance of the concurrent mark is_alive closure for embedding
929  // into the Concurrent Marking reference processor as the
930  // _is_alive_non_header field. Supplying a value for the
931  // _is_alive_non_header field is optional but doing so prevents
932  // unnecessary additions to the discovered lists during reference
933  // discovery.
934  G1CMIsAliveClosure _is_alive_closure_cm;
935
936  volatile bool _free_regions_coming;
937
938public:
939
940  void set_refine_cte_cl_concurrency(bool concurrent);
941
942  RefToScanQueue *task_queue(uint i) const;
943
944  uint num_task_queues() const;
945
946  // A set of cards where updates happened during the GC
947  DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
948
949  // Create a G1CollectedHeap with the specified policy.
950  // Must call the initialize method afterwards.
951  // May not return if something goes wrong.
952  G1CollectedHeap(G1CollectorPolicy* policy);
953
954  // Initialize the G1CollectedHeap to have the initial and
955  // maximum sizes and remembered and barrier sets
956  // specified by the policy object.
957  jint initialize();
958
959  virtual void stop();
960
961  // Return the (conservative) maximum heap alignment for any G1 heap
962  static size_t conservative_max_heap_alignment();
963
964  // Does operations required after initialization has been done.
965  void post_initialize();
966
967  // Initialize weak reference processing.
968  void ref_processing_init();
969
970  virtual Name kind() const {
971    return CollectedHeap::G1CollectedHeap;
972  }
973
974  virtual const char* name() const {
975    return "G1";
976  }
977
978  const G1CollectorState* collector_state() const { return &_collector_state; }
979  G1CollectorState* collector_state() { return &_collector_state; }
980
981  // The current policy object for the collector.
982  G1Policy* g1_policy() const { return _g1_policy; }
983
984  const G1CollectionSet* collection_set() const { return &_collection_set; }
985  G1CollectionSet* collection_set() { return &_collection_set; }
986
987  virtual CollectorPolicy* collector_policy() const;
988
989  // Adaptive size policy.  No such thing for g1.
990  virtual AdaptiveSizePolicy* size_policy() { return NULL; }
991
992  // The rem set and barrier set.
993  G1RemSet* g1_rem_set() const { return _g1_rem_set; }
994
995  // Try to minimize the remembered set.
996  void scrub_rem_set();
997
998  uint get_gc_time_stamp() {
999    return _gc_time_stamp;
1000  }
1001
1002  inline void reset_gc_time_stamp();
1003
1004  void check_gc_time_stamps() PRODUCT_RETURN;
1005
1006  inline void increment_gc_time_stamp();
1007
1008  // Reset the given region's GC timestamp. If it's starts humongous,
1009  // also reset the GC timestamp of its corresponding
1010  // continues humongous regions too.
1011  void reset_gc_time_stamps(HeapRegion* hr);
1012
1013  // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1014  void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
1015
1016  // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
1017  void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
1018
1019  // The shared block offset table array.
1020  G1BlockOffsetTable* bot() const { return _bot; }
1021
1022  // Reference Processing accessors
1023
1024  // The STW reference processor....
1025  ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1026
1027  G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1028
1029  // The Concurrent Marking reference processor...
1030  ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1031
1032  virtual size_t capacity() const;
1033  virtual size_t used() const;
1034  // This should be called when we're not holding the heap lock. The
1035  // result might be a bit inaccurate.
1036  size_t used_unlocked() const;
1037  size_t recalculate_used() const;
1038
1039  // These virtual functions do the actual allocation.
1040  // Some heaps may offer a contiguous region for shared non-blocking
1041  // allocation, via inlined code (by exporting the address of the top and
1042  // end fields defining the extent of the contiguous allocation region.)
1043  // But G1CollectedHeap doesn't yet support this.
1044
1045  virtual bool is_maximal_no_gc() const {
1046    return _hrm.available() == 0;
1047  }
1048
1049  // The current number of regions in the heap.
1050  uint num_regions() const { return _hrm.length(); }
1051
1052  // The max number of regions in the heap.
1053  uint max_regions() const { return _hrm.max_length(); }
1054
1055  // The number of regions that are completely free.
1056  uint num_free_regions() const { return _hrm.num_free_regions(); }
1057
1058  MemoryUsage get_auxiliary_data_memory_usage() const {
1059    return _hrm.get_auxiliary_data_memory_usage();
1060  }
1061
1062  // The number of regions that are not completely free.
1063  uint num_used_regions() const { return num_regions() - num_free_regions(); }
1064
1065#ifdef ASSERT
1066  bool is_on_master_free_list(HeapRegion* hr) {
1067    return _hrm.is_free(hr);
1068  }
1069#endif // ASSERT
1070
1071  // Wrapper for the region list operations that can be called from
1072  // methods outside this class.
1073
1074  void secondary_free_list_add(FreeRegionList* list) {
1075    _secondary_free_list.add_ordered(list);
1076  }
1077
1078  void append_secondary_free_list() {
1079    _hrm.insert_list_into_free_list(&_secondary_free_list);
1080  }
1081
1082  void append_secondary_free_list_if_not_empty_with_lock() {
1083    // If the secondary free list looks empty there's no reason to
1084    // take the lock and then try to append it.
1085    if (!_secondary_free_list.is_empty()) {
1086      MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1087      append_secondary_free_list();
1088    }
1089  }
1090
1091  inline void old_set_add(HeapRegion* hr);
1092  inline void old_set_remove(HeapRegion* hr);
1093
1094  size_t non_young_capacity_bytes() {
1095    return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
1096  }
1097
1098  void set_free_regions_coming();
1099  void reset_free_regions_coming();
1100  bool free_regions_coming() { return _free_regions_coming; }
1101  void wait_while_free_regions_coming();
1102
1103  // Determine whether the given region is one that we are using as an
1104  // old GC alloc region.
1105  bool is_old_gc_alloc_region(HeapRegion* hr);
1106
1107  // Perform a collection of the heap; intended for use in implementing
1108  // "System.gc".  This probably implies as full a collection as the
1109  // "CollectedHeap" supports.
1110  virtual void collect(GCCause::Cause cause);
1111
1112  virtual bool copy_allocation_context_stats(const jint* contexts,
1113                                             jlong* totals,
1114                                             jbyte* accuracy,
1115                                             jint len);
1116
1117  // True iff an evacuation has failed in the most-recent collection.
1118  bool evacuation_failed() { return _evacuation_failed; }
1119
1120  void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed);
1121  void prepend_to_freelist(FreeRegionList* list);
1122  void decrement_summary_bytes(size_t bytes);
1123
1124  virtual bool is_in(const void* p) const;
1125#ifdef ASSERT
1126  // Returns whether p is in one of the available areas of the heap. Slow but
1127  // extensive version.
1128  bool is_in_exact(const void* p) const;
1129#endif
1130
1131  // Return "TRUE" iff the given object address is within the collection
1132  // set. Slow implementation.
1133  bool obj_in_cs(oop obj);
1134
1135  inline bool is_in_cset(const HeapRegion *hr);
1136  inline bool is_in_cset(oop obj);
1137
1138  inline bool is_in_cset_or_humongous(const oop obj);
1139
1140 private:
1141  // This array is used for a quick test on whether a reference points into
1142  // the collection set or not. Each of the array's elements denotes whether the
1143  // corresponding region is in the collection set or not.
1144  G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
1145
1146 public:
1147
1148  inline InCSetState in_cset_state(const oop obj);
1149
1150  // Return "TRUE" iff the given object address is in the reserved
1151  // region of g1.
1152  bool is_in_g1_reserved(const void* p) const {
1153    return _hrm.reserved().contains(p);
1154  }
1155
1156  // Returns a MemRegion that corresponds to the space that has been
1157  // reserved for the heap
1158  MemRegion g1_reserved() const {
1159    return _hrm.reserved();
1160  }
1161
1162  virtual bool is_in_closed_subset(const void* p) const;
1163
1164  G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
1165    return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
1166  }
1167
1168  // Iteration functions.
1169
1170  // Iterate over all objects, calling "cl.do_object" on each.
1171  virtual void object_iterate(ObjectClosure* cl);
1172
1173  virtual void safe_object_iterate(ObjectClosure* cl) {
1174    object_iterate(cl);
1175  }
1176
1177  // Iterate over heap regions, in address order, terminating the
1178  // iteration early if the "doHeapRegion" method returns "true".
1179  void heap_region_iterate(HeapRegionClosure* blk) const;
1180
1181  // Return the region with the given index. It assumes the index is valid.
1182  inline HeapRegion* region_at(uint index) const;
1183
1184  // Return the next region (by index) that is part of the same
1185  // humongous object that hr is part of.
1186  inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1187
1188  // Calculate the region index of the given address. Given address must be
1189  // within the heap.
1190  inline uint addr_to_region(HeapWord* addr) const;
1191
1192  inline HeapWord* bottom_addr_for_region(uint index) const;
1193
1194  // Iterate over the heap regions in parallel. Assumes that this will be called
1195  // in parallel by ParallelGCThreads worker threads with distinct worker ids
1196  // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
1197  // to each of the regions, by attempting to claim the region using the
1198  // HeapRegionClaimer and, if successful, applying the closure to the claimed
1199  // region. The concurrent argument should be set to true if iteration is
1200  // performed concurrently, during which no assumptions are made for consistent
1201  // attributes of the heap regions (as they might be modified while iterating).
1202  void heap_region_par_iterate(HeapRegionClosure* cl,
1203                               uint worker_id,
1204                               HeapRegionClaimer* hrclaimer,
1205                               bool concurrent = false) const;
1206
1207  // Iterate over the regions (if any) in the current collection set.
1208  void collection_set_iterate(HeapRegionClosure* blk);
1209
1210  // Iterate over the regions (if any) in the current collection set. Starts the
1211  // iteration over the entire collection set so that the start regions of a given
1212  // worker id over the set active_workers are evenly spread across the set of
1213  // collection set regions.
1214  void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
1215
1216  HeapRegion* next_compaction_region(const HeapRegion* from) const;
1217
1218  // Returns the HeapRegion that contains addr. addr must not be NULL.
1219  template <class T>
1220  inline HeapRegion* heap_region_containing(const T addr) const;
1221
1222  // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1223  // each address in the (reserved) heap is a member of exactly
1224  // one block.  The defining characteristic of a block is that it is
1225  // possible to find its size, and thus to progress forward to the next
1226  // block.  (Blocks may be of different sizes.)  Thus, blocks may
1227  // represent Java objects, or they might be free blocks in a
1228  // free-list-based heap (or subheap), as long as the two kinds are
1229  // distinguishable and the size of each is determinable.
1230
1231  // Returns the address of the start of the "block" that contains the
1232  // address "addr".  We say "blocks" instead of "object" since some heaps
1233  // may not pack objects densely; a chunk may either be an object or a
1234  // non-object.
1235  virtual HeapWord* block_start(const void* addr) const;
1236
1237  // Requires "addr" to be the start of a chunk, and returns its size.
1238  // "addr + size" is required to be the start of a new chunk, or the end
1239  // of the active area of the heap.
1240  virtual size_t block_size(const HeapWord* addr) const;
1241
1242  // Requires "addr" to be the start of a block, and returns "TRUE" iff
1243  // the block is an object.
1244  virtual bool block_is_obj(const HeapWord* addr) const;
1245
1246  // Section on thread-local allocation buffers (TLABs)
1247  // See CollectedHeap for semantics.
1248
1249  bool supports_tlab_allocation() const;
1250  size_t tlab_capacity(Thread* ignored) const;
1251  size_t tlab_used(Thread* ignored) const;
1252  size_t max_tlab_size() const;
1253  size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1254
1255  // Can a compiler initialize a new object without store barriers?
1256  // This permission only extends from the creation of a new object
1257  // via a TLAB up to the first subsequent safepoint. If such permission
1258  // is granted for this heap type, the compiler promises to call
1259  // defer_store_barrier() below on any slow path allocation of
1260  // a new object for which such initializing store barriers will
1261  // have been elided. G1, like CMS, allows this, but should be
1262  // ready to provide a compensating write barrier as necessary
1263  // if that storage came out of a non-young region. The efficiency
1264  // of this implementation depends crucially on being able to
1265  // answer very efficiently in constant time whether a piece of
1266  // storage in the heap comes from a young region or not.
1267  // See ReduceInitialCardMarks.
1268  virtual bool can_elide_tlab_store_barriers() const {
1269    return true;
1270  }
1271
1272  virtual bool card_mark_must_follow_store() const {
1273    return true;
1274  }
1275
1276  inline bool is_in_young(const oop obj);
1277
1278  virtual bool is_scavengable(const void* addr);
1279
1280  // We don't need barriers for initializing stores to objects
1281  // in the young gen: for the SATB pre-barrier, there is no
1282  // pre-value that needs to be remembered; for the remembered-set
1283  // update logging post-barrier, we don't maintain remembered set
1284  // information for young gen objects.
1285  virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1286
1287  // Returns "true" iff the given word_size is "very large".
1288  static bool is_humongous(size_t word_size) {
1289    // Note this has to be strictly greater-than as the TLABs
1290    // are capped at the humongous threshold and we want to
1291    // ensure that we don't try to allocate a TLAB as
1292    // humongous and that we don't allocate a humongous
1293    // object in a TLAB.
1294    return word_size > _humongous_object_threshold_in_words;
1295  }
1296
1297  // Returns the humongous threshold for a specific region size
1298  static size_t humongous_threshold_for(size_t region_size) {
1299    return (region_size / 2);
1300  }
1301
1302  // Returns the number of regions the humongous object of the given word size
1303  // requires.
1304  static size_t humongous_obj_size_in_regions(size_t word_size);
1305
1306  // Print the maximum heap capacity.
1307  virtual size_t max_capacity() const;
1308
1309  virtual jlong millis_since_last_gc();
1310
1311
1312  // Convenience function to be used in situations where the heap type can be
1313  // asserted to be this type.
1314  static G1CollectedHeap* heap();
1315
1316  void set_region_short_lived_locked(HeapRegion* hr);
1317  // add appropriate methods for any other surv rate groups
1318
1319  const G1SurvivorRegions* survivor() const { return &_survivor; }
1320
1321  uint survivor_regions_count() const {
1322    return _survivor.length();
1323  }
1324
1325  uint eden_regions_count() const {
1326    return _eden.length();
1327  }
1328
1329  uint young_regions_count() const {
1330    return _eden.length() + _survivor.length();
1331  }
1332
1333  uint old_regions_count() const { return _old_set.length(); }
1334
1335  uint humongous_regions_count() const { return _humongous_set.length(); }
1336
1337#ifdef ASSERT
1338  bool check_young_list_empty();
1339#endif
1340
1341  // *** Stuff related to concurrent marking.  It's not clear to me that so
1342  // many of these need to be public.
1343
1344  // The functions below are helper functions that a subclass of
1345  // "CollectedHeap" can use in the implementation of its virtual
1346  // functions.
1347  // This performs a concurrent marking of the live objects in a
1348  // bitmap off to the side.
1349  void doConcurrentMark();
1350
1351  bool isMarkedPrev(oop obj) const;
1352  bool isMarkedNext(oop obj) const;
1353
1354  // Determine if an object is dead, given the object and also
1355  // the region to which the object belongs. An object is dead
1356  // iff a) it was not allocated since the last mark, b) it
1357  // is not marked, and c) it is not in an archive region.
1358  bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1359    return
1360      !hr->obj_allocated_since_prev_marking(obj) &&
1361      !isMarkedPrev(obj) &&
1362      !hr->is_archive();
1363  }
1364
1365  // This function returns true when an object has been
1366  // around since the previous marking and hasn't yet
1367  // been marked during this marking, and is not in an archive region.
1368  bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1369    return
1370      !hr->obj_allocated_since_next_marking(obj) &&
1371      !isMarkedNext(obj) &&
1372      !hr->is_archive();
1373  }
1374
1375  // Determine if an object is dead, given only the object itself.
1376  // This will find the region to which the object belongs and
1377  // then call the region version of the same function.
1378
1379  // Added if it is NULL it isn't dead.
1380
1381  inline bool is_obj_dead(const oop obj) const;
1382
1383  inline bool is_obj_ill(const oop obj) const;
1384
1385  G1ConcurrentMark* concurrent_mark() const { return _cm; }
1386
1387  // Refinement
1388
1389  ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1390
1391  // Optimized nmethod scanning support routines
1392
1393  // Register the given nmethod with the G1 heap.
1394  virtual void register_nmethod(nmethod* nm);
1395
1396  // Unregister the given nmethod from the G1 heap.
1397  virtual void unregister_nmethod(nmethod* nm);
1398
1399  // Free up superfluous code root memory.
1400  void purge_code_root_memory();
1401
1402  // Rebuild the strong code root lists for each region
1403  // after a full GC.
1404  void rebuild_strong_code_roots();
1405
1406  // Delete entries for dead interned string and clean up unreferenced symbols
1407  // in symbol table, possibly in parallel.
1408  void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1409
1410  // Parallel phase of unloading/cleaning after G1 concurrent mark.
1411  void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred);
1412
1413  // Redirty logged cards in the refinement queue.
1414  void redirty_logged_cards();
1415  // Verification
1416
1417  // Perform any cleanup actions necessary before allowing a verification.
1418  virtual void prepare_for_verify();
1419
1420  // Perform verification.
1421
1422  // vo == UsePrevMarking  -> use "prev" marking information,
1423  // vo == UseNextMarking -> use "next" marking information
1424  // vo == UseMarkWord    -> use the mark word in the object header
1425  //
1426  // NOTE: Only the "prev" marking information is guaranteed to be
1427  // consistent most of the time, so most calls to this should use
1428  // vo == UsePrevMarking.
1429  // Currently, there is only one case where this is called with
1430  // vo == UseNextMarking, which is to verify the "next" marking
1431  // information at the end of remark.
1432  // Currently there is only one place where this is called with
1433  // vo == UseMarkWord, which is to verify the marking during a
1434  // full GC.
1435  void verify(VerifyOption vo);
1436
1437  // The methods below are here for convenience and dispatch the
1438  // appropriate method depending on value of the given VerifyOption
1439  // parameter. The values for that parameter, and their meanings,
1440  // are the same as those above.
1441
1442  bool is_obj_dead_cond(const oop obj,
1443                        const HeapRegion* hr,
1444                        const VerifyOption vo) const;
1445
1446  bool is_obj_dead_cond(const oop obj,
1447                        const VerifyOption vo) const;
1448
1449  G1HeapSummary create_g1_heap_summary();
1450  G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1451
1452  // Printing
1453private:
1454  void print_heap_regions() const;
1455  void print_regions_on(outputStream* st) const;
1456
1457public:
1458  virtual void print_on(outputStream* st) const;
1459  virtual void print_extended_on(outputStream* st) const;
1460  virtual void print_on_error(outputStream* st) const;
1461
1462  virtual void print_gc_threads_on(outputStream* st) const;
1463  virtual void gc_threads_do(ThreadClosure* tc) const;
1464
1465  // Override
1466  void print_tracing_info() const;
1467
1468  // The following two methods are helpful for debugging RSet issues.
1469  void print_cset_rsets() PRODUCT_RETURN;
1470  void print_all_rsets() PRODUCT_RETURN;
1471
1472public:
1473  size_t pending_card_num();
1474
1475protected:
1476  size_t _max_heap_capacity;
1477};
1478
1479class G1ParEvacuateFollowersClosure : public VoidClosure {
1480private:
1481  double _start_term;
1482  double _term_time;
1483  size_t _term_attempts;
1484
1485  void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1486  void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
1487protected:
1488  G1CollectedHeap*              _g1h;
1489  G1ParScanThreadState*         _par_scan_state;
1490  RefToScanQueueSet*            _queues;
1491  ParallelTaskTerminator*       _terminator;
1492
1493  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1494  RefToScanQueueSet*      queues()         { return _queues; }
1495  ParallelTaskTerminator* terminator()     { return _terminator; }
1496
1497public:
1498  G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1499                                G1ParScanThreadState* par_scan_state,
1500                                RefToScanQueueSet* queues,
1501                                ParallelTaskTerminator* terminator)
1502    : _g1h(g1h), _par_scan_state(par_scan_state),
1503      _queues(queues), _terminator(terminator),
1504      _start_term(0.0), _term_time(0.0), _term_attempts(0) {}
1505
1506  void do_void();
1507
1508  double term_time() const { return _term_time; }
1509  size_t term_attempts() const { return _term_attempts; }
1510
1511private:
1512  inline bool offer_termination();
1513};
1514
1515#endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
1516