1/*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/metadataOnStackMark.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "code/codeCache.hpp"
30#include "code/icBuffer.hpp"
31#include "gc/g1/bufferingOopClosure.hpp"
32#include "gc/g1/concurrentG1Refine.hpp"
33#include "gc/g1/concurrentG1RefineThread.hpp"
34#include "gc/g1/concurrentMarkThread.inline.hpp"
35#include "gc/g1/g1Allocator.inline.hpp"
36#include "gc/g1/g1CollectedHeap.inline.hpp"
37#include "gc/g1/g1CollectionSet.hpp"
38#include "gc/g1/g1CollectorPolicy.hpp"
39#include "gc/g1/g1CollectorState.hpp"
40#include "gc/g1/g1EvacStats.inline.hpp"
41#include "gc/g1/g1GCPhaseTimes.hpp"
42#include "gc/g1/g1HeapSizingPolicy.hpp"
43#include "gc/g1/g1HeapTransition.hpp"
44#include "gc/g1/g1HeapVerifier.hpp"
45#include "gc/g1/g1HotCardCache.hpp"
46#include "gc/g1/g1MarkSweep.hpp"
47#include "gc/g1/g1OopClosures.inline.hpp"
48#include "gc/g1/g1ParScanThreadState.inline.hpp"
49#include "gc/g1/g1Policy.hpp"
50#include "gc/g1/g1RegionToSpaceMapper.hpp"
51#include "gc/g1/g1RemSet.inline.hpp"
52#include "gc/g1/g1RootClosures.hpp"
53#include "gc/g1/g1RootProcessor.hpp"
54#include "gc/g1/g1StringDedup.hpp"
55#include "gc/g1/g1YCTypes.hpp"
56#include "gc/g1/heapRegion.inline.hpp"
57#include "gc/g1/heapRegionRemSet.hpp"
58#include "gc/g1/heapRegionSet.inline.hpp"
59#include "gc/g1/suspendibleThreadSet.hpp"
60#include "gc/g1/vm_operations_g1.hpp"
61#include "gc/shared/gcHeapSummary.hpp"
62#include "gc/shared/gcId.hpp"
63#include "gc/shared/gcLocker.inline.hpp"
64#include "gc/shared/gcTimer.hpp"
65#include "gc/shared/gcTrace.hpp"
66#include "gc/shared/gcTraceTime.inline.hpp"
67#include "gc/shared/generationSpec.hpp"
68#include "gc/shared/isGCActiveMark.hpp"
69#include "gc/shared/preservedMarks.inline.hpp"
70#include "gc/shared/referenceProcessor.inline.hpp"
71#include "gc/shared/taskqueue.inline.hpp"
72#include "logging/log.hpp"
73#include "memory/allocation.hpp"
74#include "memory/iterator.hpp"
75#include "memory/resourceArea.hpp"
76#include "oops/oop.inline.hpp"
77#include "runtime/atomic.hpp"
78#include "runtime/init.hpp"
79#include "runtime/orderAccess.inline.hpp"
80#include "runtime/vmThread.hpp"
81#include "utilities/globalDefinitions.hpp"
82#include "utilities/stack.inline.hpp"
83
84size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
85
86// INVARIANTS/NOTES
87//
88// All allocation activity covered by the G1CollectedHeap interface is
89// serialized by acquiring the HeapLock.  This happens in mem_allocate
90// and allocate_new_tlab, which are the "entry" points to the
91// allocation code from the rest of the JVM.  (Note that this does not
92// apply to TLAB allocation, which is not part of this interface: it
93// is done by clients of this interface.)
94
95// Local to this file.
96
97class RefineCardTableEntryClosure: public CardTableEntryClosure {
98  bool _concurrent;
99public:
100  RefineCardTableEntryClosure() : _concurrent(true) { }
101
102  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
103    bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, NULL);
104    // This path is executed by the concurrent refine or mutator threads,
105    // concurrently, and so we do not care if card_ptr contains references
106    // that point into the collection set.
107    assert(!oops_into_cset, "should be");
108
109    if (_concurrent && SuspendibleThreadSet::should_yield()) {
110      // Caller will actually yield.
111      return false;
112    }
113    // Otherwise, we finished successfully; return true.
114    return true;
115  }
116
117  void set_concurrent(bool b) { _concurrent = b; }
118};
119
120
121class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
122 private:
123  size_t _num_dirtied;
124  G1CollectedHeap* _g1h;
125  G1SATBCardTableLoggingModRefBS* _g1_bs;
126
127  HeapRegion* region_for_card(jbyte* card_ptr) const {
128    return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
129  }
130
131  bool will_become_free(HeapRegion* hr) const {
132    // A region will be freed by free_collection_set if the region is in the
133    // collection set and has not had an evacuation failure.
134    return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
135  }
136
137 public:
138  RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
139    _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
140
141  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
142    HeapRegion* hr = region_for_card(card_ptr);
143
144    // Should only dirty cards in regions that won't be freed.
145    if (!will_become_free(hr)) {
146      *card_ptr = CardTableModRefBS::dirty_card_val();
147      _num_dirtied++;
148    }
149
150    return true;
151  }
152
153  size_t num_dirtied()   const { return _num_dirtied; }
154};
155
156
157void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
158  HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
159}
160
161void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
162  // The from card cache is not the memory that is actually committed. So we cannot
163  // take advantage of the zero_filled parameter.
164  reset_from_card_cache(start_idx, num_regions);
165}
166
167// Returns true if the reference points to an object that
168// can move in an incremental collection.
169bool G1CollectedHeap::is_scavengable(const void* p) {
170  HeapRegion* hr = heap_region_containing(p);
171  return !hr->is_pinned();
172}
173
174// Private methods.
175
176HeapRegion*
177G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
178  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
179  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
180    if (!_secondary_free_list.is_empty()) {
181      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
182                                      "secondary_free_list has %u entries",
183                                      _secondary_free_list.length());
184      // It looks as if there are free regions available on the
185      // secondary_free_list. Let's move them to the free_list and try
186      // again to allocate from it.
187      append_secondary_free_list();
188
189      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
190             "empty we should have moved at least one entry to the free_list");
191      HeapRegion* res = _hrm.allocate_free_region(is_old);
192      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
193                                      "allocated " HR_FORMAT " from secondary_free_list",
194                                      HR_FORMAT_PARAMS(res));
195      return res;
196    }
197
198    // Wait here until we get notified either when (a) there are no
199    // more free regions coming or (b) some regions have been moved on
200    // the secondary_free_list.
201    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
202  }
203
204  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
205                                  "could not allocate from secondary_free_list");
206  return NULL;
207}
208
209HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
210  assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
211         "the only time we use this to allocate a humongous region is "
212         "when we are allocating a single humongous region");
213
214  HeapRegion* res;
215  if (G1StressConcRegionFreeing) {
216    if (!_secondary_free_list.is_empty()) {
217      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
218                                      "forced to look at the secondary_free_list");
219      res = new_region_try_secondary_free_list(is_old);
220      if (res != NULL) {
221        return res;
222      }
223    }
224  }
225
226  res = _hrm.allocate_free_region(is_old);
227
228  if (res == NULL) {
229    log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
230                                    "res == NULL, trying the secondary_free_list");
231    res = new_region_try_secondary_free_list(is_old);
232  }
233  if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
234    // Currently, only attempts to allocate GC alloc regions set
235    // do_expand to true. So, we should only reach here during a
236    // safepoint. If this assumption changes we might have to
237    // reconsider the use of _expand_heap_after_alloc_failure.
238    assert(SafepointSynchronize::is_at_safepoint(), "invariant");
239
240    log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
241                              word_size * HeapWordSize);
242
243    if (expand(word_size * HeapWordSize)) {
244      // Given that expand() succeeded in expanding the heap, and we
245      // always expand the heap by an amount aligned to the heap
246      // region size, the free list should in theory not be empty.
247      // In either case allocate_free_region() will check for NULL.
248      res = _hrm.allocate_free_region(is_old);
249    } else {
250      _expand_heap_after_alloc_failure = false;
251    }
252  }
253  return res;
254}
255
256HeapWord*
257G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
258                                                           uint num_regions,
259                                                           size_t word_size,
260                                                           AllocationContext_t context) {
261  assert(first != G1_NO_HRM_INDEX, "pre-condition");
262  assert(is_humongous(word_size), "word_size should be humongous");
263  assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
264
265  // Index of last region in the series.
266  uint last = first + num_regions - 1;
267
268  // We need to initialize the region(s) we just discovered. This is
269  // a bit tricky given that it can happen concurrently with
270  // refinement threads refining cards on these regions and
271  // potentially wanting to refine the BOT as they are scanning
272  // those cards (this can happen shortly after a cleanup; see CR
273  // 6991377). So we have to set up the region(s) carefully and in
274  // a specific order.
275
276  // The word size sum of all the regions we will allocate.
277  size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
278  assert(word_size <= word_size_sum, "sanity");
279
280  // This will be the "starts humongous" region.
281  HeapRegion* first_hr = region_at(first);
282  // The header of the new object will be placed at the bottom of
283  // the first region.
284  HeapWord* new_obj = first_hr->bottom();
285  // This will be the new top of the new object.
286  HeapWord* obj_top = new_obj + word_size;
287
288  // First, we need to zero the header of the space that we will be
289  // allocating. When we update top further down, some refinement
290  // threads might try to scan the region. By zeroing the header we
291  // ensure that any thread that will try to scan the region will
292  // come across the zero klass word and bail out.
293  //
294  // NOTE: It would not have been correct to have used
295  // CollectedHeap::fill_with_object() and make the space look like
296  // an int array. The thread that is doing the allocation will
297  // later update the object header to a potentially different array
298  // type and, for a very short period of time, the klass and length
299  // fields will be inconsistent. This could cause a refinement
300  // thread to calculate the object size incorrectly.
301  Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
302
303  // Next, pad out the unused tail of the last region with filler
304  // objects, for improved usage accounting.
305  // How many words we use for filler objects.
306  size_t word_fill_size = word_size_sum - word_size;
307
308  // How many words memory we "waste" which cannot hold a filler object.
309  size_t words_not_fillable = 0;
310
311  if (word_fill_size >= min_fill_size()) {
312    fill_with_objects(obj_top, word_fill_size);
313  } else if (word_fill_size > 0) {
314    // We have space to fill, but we cannot fit an object there.
315    words_not_fillable = word_fill_size;
316    word_fill_size = 0;
317  }
318
319  // We will set up the first region as "starts humongous". This
320  // will also update the BOT covering all the regions to reflect
321  // that there is a single object that starts at the bottom of the
322  // first region.
323  first_hr->set_starts_humongous(obj_top, word_fill_size);
324  first_hr->set_allocation_context(context);
325  // Then, if there are any, we will set up the "continues
326  // humongous" regions.
327  HeapRegion* hr = NULL;
328  for (uint i = first + 1; i <= last; ++i) {
329    hr = region_at(i);
330    hr->set_continues_humongous(first_hr);
331    hr->set_allocation_context(context);
332  }
333
334  // Up to this point no concurrent thread would have been able to
335  // do any scanning on any region in this series. All the top
336  // fields still point to bottom, so the intersection between
337  // [bottom,top] and [card_start,card_end] will be empty. Before we
338  // update the top fields, we'll do a storestore to make sure that
339  // no thread sees the update to top before the zeroing of the
340  // object header and the BOT initialization.
341  OrderAccess::storestore();
342
343  // Now, we will update the top fields of the "continues humongous"
344  // regions except the last one.
345  for (uint i = first; i < last; ++i) {
346    hr = region_at(i);
347    hr->set_top(hr->end());
348  }
349
350  hr = region_at(last);
351  // If we cannot fit a filler object, we must set top to the end
352  // of the humongous object, otherwise we cannot iterate the heap
353  // and the BOT will not be complete.
354  hr->set_top(hr->end() - words_not_fillable);
355
356  assert(hr->bottom() < obj_top && obj_top <= hr->end(),
357         "obj_top should be in last region");
358
359  _verifier->check_bitmaps("Humongous Region Allocation", first_hr);
360
361  assert(words_not_fillable == 0 ||
362         first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
363         "Miscalculation in humongous allocation");
364
365  increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
366
367  for (uint i = first; i <= last; ++i) {
368    hr = region_at(i);
369    _humongous_set.add(hr);
370    _hr_printer.alloc(hr);
371  }
372
373  return new_obj;
374}
375
376size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
377  assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
378  return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
379}
380
381// If could fit into free regions w/o expansion, try.
382// Otherwise, if can expand, do so.
383// Otherwise, if using ex regions might help, try with ex given back.
384HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
385  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
386
387  _verifier->verify_region_sets_optional();
388
389  uint first = G1_NO_HRM_INDEX;
390  uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
391
392  if (obj_regions == 1) {
393    // Only one region to allocate, try to use a fast path by directly allocating
394    // from the free lists. Do not try to expand here, we will potentially do that
395    // later.
396    HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
397    if (hr != NULL) {
398      first = hr->hrm_index();
399    }
400  } else {
401    // We can't allocate humongous regions spanning more than one region while
402    // cleanupComplete() is running, since some of the regions we find to be
403    // empty might not yet be added to the free list. It is not straightforward
404    // to know in which list they are on so that we can remove them. We only
405    // need to do this if we need to allocate more than one region to satisfy the
406    // current humongous allocation request. If we are only allocating one region
407    // we use the one-region region allocation code (see above), that already
408    // potentially waits for regions from the secondary free list.
409    wait_while_free_regions_coming();
410    append_secondary_free_list_if_not_empty_with_lock();
411
412    // Policy: Try only empty regions (i.e. already committed first). Maybe we
413    // are lucky enough to find some.
414    first = _hrm.find_contiguous_only_empty(obj_regions);
415    if (first != G1_NO_HRM_INDEX) {
416      _hrm.allocate_free_regions_starting_at(first, obj_regions);
417    }
418  }
419
420  if (first == G1_NO_HRM_INDEX) {
421    // Policy: We could not find enough regions for the humongous object in the
422    // free list. Look through the heap to find a mix of free and uncommitted regions.
423    // If so, try expansion.
424    first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
425    if (first != G1_NO_HRM_INDEX) {
426      // We found something. Make sure these regions are committed, i.e. expand
427      // the heap. Alternatively we could do a defragmentation GC.
428      log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
429                                    word_size * HeapWordSize);
430
431      _hrm.expand_at(first, obj_regions, workers());
432      g1_policy()->record_new_heap_size(num_regions());
433
434#ifdef ASSERT
435      for (uint i = first; i < first + obj_regions; ++i) {
436        HeapRegion* hr = region_at(i);
437        assert(hr->is_free(), "sanity");
438        assert(hr->is_empty(), "sanity");
439        assert(is_on_master_free_list(hr), "sanity");
440      }
441#endif
442      _hrm.allocate_free_regions_starting_at(first, obj_regions);
443    } else {
444      // Policy: Potentially trigger a defragmentation GC.
445    }
446  }
447
448  HeapWord* result = NULL;
449  if (first != G1_NO_HRM_INDEX) {
450    result = humongous_obj_allocate_initialize_regions(first, obj_regions,
451                                                       word_size, context);
452    assert(result != NULL, "it should always return a valid result");
453
454    // A successful humongous object allocation changes the used space
455    // information of the old generation so we need to recalculate the
456    // sizes and update the jstat counters here.
457    g1mm()->update_sizes();
458  }
459
460  _verifier->verify_region_sets_optional();
461
462  return result;
463}
464
465HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
466  assert_heap_not_locked_and_not_at_safepoint();
467  assert(!is_humongous(word_size), "we do not allow humongous TLABs");
468
469  uint dummy_gc_count_before;
470  uint dummy_gclocker_retry_count = 0;
471  return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
472}
473
474HeapWord*
475G1CollectedHeap::mem_allocate(size_t word_size,
476                              bool*  gc_overhead_limit_was_exceeded) {
477  assert_heap_not_locked_and_not_at_safepoint();
478
479  // Loop until the allocation is satisfied, or unsatisfied after GC.
480  for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
481    uint gc_count_before;
482
483    HeapWord* result = NULL;
484    if (!is_humongous(word_size)) {
485      result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
486    } else {
487      result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
488    }
489    if (result != NULL) {
490      return result;
491    }
492
493    // Create the garbage collection operation...
494    VM_G1CollectForAllocation op(gc_count_before, word_size);
495    op.set_allocation_context(AllocationContext::current());
496
497    // ...and get the VM thread to execute it.
498    VMThread::execute(&op);
499
500    if (op.prologue_succeeded() && op.pause_succeeded()) {
501      // If the operation was successful we'll return the result even
502      // if it is NULL. If the allocation attempt failed immediately
503      // after a Full GC, it's unlikely we'll be able to allocate now.
504      HeapWord* result = op.result();
505      if (result != NULL && !is_humongous(word_size)) {
506        // Allocations that take place on VM operations do not do any
507        // card dirtying and we have to do it here. We only have to do
508        // this for non-humongous allocations, though.
509        dirty_young_block(result, word_size);
510      }
511      return result;
512    } else {
513      if (gclocker_retry_count > GCLockerRetryAllocationCount) {
514        return NULL;
515      }
516      assert(op.result() == NULL,
517             "the result should be NULL if the VM op did not succeed");
518    }
519
520    // Give a warning if we seem to be looping forever.
521    if ((QueuedAllocationWarningCount > 0) &&
522        (try_count % QueuedAllocationWarningCount == 0)) {
523      log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
524    }
525  }
526
527  ShouldNotReachHere();
528  return NULL;
529}
530
531HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
532                                                   AllocationContext_t context,
533                                                   uint* gc_count_before_ret,
534                                                   uint* gclocker_retry_count_ret) {
535  // Make sure you read the note in attempt_allocation_humongous().
536
537  assert_heap_not_locked_and_not_at_safepoint();
538  assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
539         "be called for humongous allocation requests");
540
541  // We should only get here after the first-level allocation attempt
542  // (attempt_allocation()) failed to allocate.
543
544  // We will loop until a) we manage to successfully perform the
545  // allocation or b) we successfully schedule a collection which
546  // fails to perform the allocation. b) is the only case when we'll
547  // return NULL.
548  HeapWord* result = NULL;
549  for (int try_count = 1; /* we'll return */; try_count += 1) {
550    bool should_try_gc;
551    uint gc_count_before;
552
553    {
554      MutexLockerEx x(Heap_lock);
555      result = _allocator->attempt_allocation_locked(word_size, context);
556      if (result != NULL) {
557        return result;
558      }
559
560      if (GCLocker::is_active_and_needs_gc()) {
561        if (g1_policy()->can_expand_young_list()) {
562          // No need for an ergo verbose message here,
563          // can_expand_young_list() does this when it returns true.
564          result = _allocator->attempt_allocation_force(word_size, context);
565          if (result != NULL) {
566            return result;
567          }
568        }
569        should_try_gc = false;
570      } else {
571        // The GCLocker may not be active but the GCLocker initiated
572        // GC may not yet have been performed (GCLocker::needs_gc()
573        // returns true). In this case we do not try this GC and
574        // wait until the GCLocker initiated GC is performed, and
575        // then retry the allocation.
576        if (GCLocker::needs_gc()) {
577          should_try_gc = false;
578        } else {
579          // Read the GC count while still holding the Heap_lock.
580          gc_count_before = total_collections();
581          should_try_gc = true;
582        }
583      }
584    }
585
586    if (should_try_gc) {
587      bool succeeded;
588      result = do_collection_pause(word_size, gc_count_before, &succeeded,
589                                   GCCause::_g1_inc_collection_pause);
590      if (result != NULL) {
591        assert(succeeded, "only way to get back a non-NULL result");
592        return result;
593      }
594
595      if (succeeded) {
596        // If we get here we successfully scheduled a collection which
597        // failed to allocate. No point in trying to allocate
598        // further. We'll just return NULL.
599        MutexLockerEx x(Heap_lock);
600        *gc_count_before_ret = total_collections();
601        return NULL;
602      }
603    } else {
604      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
605        MutexLockerEx x(Heap_lock);
606        *gc_count_before_ret = total_collections();
607        return NULL;
608      }
609      // The GCLocker is either active or the GCLocker initiated
610      // GC has not yet been performed. Stall until it is and
611      // then retry the allocation.
612      GCLocker::stall_until_clear();
613      (*gclocker_retry_count_ret) += 1;
614    }
615
616    // We can reach here if we were unsuccessful in scheduling a
617    // collection (because another thread beat us to it) or if we were
618    // stalled due to the GC locker. In either can we should retry the
619    // allocation attempt in case another thread successfully
620    // performed a collection and reclaimed enough space. We do the
621    // first attempt (without holding the Heap_lock) here and the
622    // follow-on attempt will be at the start of the next loop
623    // iteration (after taking the Heap_lock).
624    result = _allocator->attempt_allocation(word_size, context);
625    if (result != NULL) {
626      return result;
627    }
628
629    // Give a warning if we seem to be looping forever.
630    if ((QueuedAllocationWarningCount > 0) &&
631        (try_count % QueuedAllocationWarningCount == 0)) {
632      log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
633                      "retries %d times", try_count);
634    }
635  }
636
637  ShouldNotReachHere();
638  return NULL;
639}
640
641void G1CollectedHeap::begin_archive_alloc_range() {
642  assert_at_safepoint(true /* should_be_vm_thread */);
643  if (_archive_allocator == NULL) {
644    _archive_allocator = G1ArchiveAllocator::create_allocator(this);
645  }
646}
647
648bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
649  // Allocations in archive regions cannot be of a size that would be considered
650  // humongous even for a minimum-sized region, because G1 region sizes/boundaries
651  // may be different at archive-restore time.
652  return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
653}
654
655HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
656  assert_at_safepoint(true /* should_be_vm_thread */);
657  assert(_archive_allocator != NULL, "_archive_allocator not initialized");
658  if (is_archive_alloc_too_large(word_size)) {
659    return NULL;
660  }
661  return _archive_allocator->archive_mem_allocate(word_size);
662}
663
664void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
665                                              size_t end_alignment_in_bytes) {
666  assert_at_safepoint(true /* should_be_vm_thread */);
667  assert(_archive_allocator != NULL, "_archive_allocator not initialized");
668
669  // Call complete_archive to do the real work, filling in the MemRegion
670  // array with the archive regions.
671  _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
672  delete _archive_allocator;
673  _archive_allocator = NULL;
674}
675
676bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
677  assert(ranges != NULL, "MemRegion array NULL");
678  assert(count != 0, "No MemRegions provided");
679  MemRegion reserved = _hrm.reserved();
680  for (size_t i = 0; i < count; i++) {
681    if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
682      return false;
683    }
684  }
685  return true;
686}
687
688bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {
689  assert(!is_init_completed(), "Expect to be called at JVM init time");
690  assert(ranges != NULL, "MemRegion array NULL");
691  assert(count != 0, "No MemRegions provided");
692  MutexLockerEx x(Heap_lock);
693
694  MemRegion reserved = _hrm.reserved();
695  HeapWord* prev_last_addr = NULL;
696  HeapRegion* prev_last_region = NULL;
697
698  // Temporarily disable pretouching of heap pages. This interface is used
699  // when mmap'ing archived heap data in, so pre-touching is wasted.
700  FlagSetting fs(AlwaysPreTouch, false);
701
702  // Enable archive object checking in G1MarkSweep. We have to let it know
703  // about each archive range, so that objects in those ranges aren't marked.
704  G1MarkSweep::enable_archive_object_check();
705
706  // For each specified MemRegion range, allocate the corresponding G1
707  // regions and mark them as archive regions. We expect the ranges in
708  // ascending starting address order, without overlap.
709  for (size_t i = 0; i < count; i++) {
710    MemRegion curr_range = ranges[i];
711    HeapWord* start_address = curr_range.start();
712    size_t word_size = curr_range.word_size();
713    HeapWord* last_address = curr_range.last();
714    size_t commits = 0;
715
716    guarantee(reserved.contains(start_address) && reserved.contains(last_address),
717              "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
718              p2i(start_address), p2i(last_address));
719    guarantee(start_address > prev_last_addr,
720              "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
721              p2i(start_address), p2i(prev_last_addr));
722    prev_last_addr = last_address;
723
724    // Check for ranges that start in the same G1 region in which the previous
725    // range ended, and adjust the start address so we don't try to allocate
726    // the same region again. If the current range is entirely within that
727    // region, skip it, just adjusting the recorded top.
728    HeapRegion* start_region = _hrm.addr_to_region(start_address);
729    if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
730      start_address = start_region->end();
731      if (start_address > last_address) {
732        increase_used(word_size * HeapWordSize);
733        start_region->set_top(last_address + 1);
734        continue;
735      }
736      start_region->set_top(start_address);
737      curr_range = MemRegion(start_address, last_address + 1);
738      start_region = _hrm.addr_to_region(start_address);
739    }
740
741    // Perform the actual region allocation, exiting if it fails.
742    // Then note how much new space we have allocated.
743    if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
744      return false;
745    }
746    increase_used(word_size * HeapWordSize);
747    if (commits != 0) {
748      log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
749                                HeapRegion::GrainWords * HeapWordSize * commits);
750
751    }
752
753    // Mark each G1 region touched by the range as archive, add it to the old set,
754    // and set the allocation context and top.
755    HeapRegion* curr_region = _hrm.addr_to_region(start_address);
756    HeapRegion* last_region = _hrm.addr_to_region(last_address);
757    prev_last_region = last_region;
758
759    while (curr_region != NULL) {
760      assert(curr_region->is_empty() && !curr_region->is_pinned(),
761             "Region already in use (index %u)", curr_region->hrm_index());
762      curr_region->set_allocation_context(AllocationContext::system());
763      curr_region->set_archive();
764      _hr_printer.alloc(curr_region);
765      _old_set.add(curr_region);
766      if (curr_region != last_region) {
767        curr_region->set_top(curr_region->end());
768        curr_region = _hrm.next_region_in_heap(curr_region);
769      } else {
770        curr_region->set_top(last_address + 1);
771        curr_region = NULL;
772      }
773    }
774
775    // Notify mark-sweep of the archive range.
776    G1MarkSweep::set_range_archive(curr_range, true);
777  }
778  return true;
779}
780
781void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
782  assert(!is_init_completed(), "Expect to be called at JVM init time");
783  assert(ranges != NULL, "MemRegion array NULL");
784  assert(count != 0, "No MemRegions provided");
785  MemRegion reserved = _hrm.reserved();
786  HeapWord *prev_last_addr = NULL;
787  HeapRegion* prev_last_region = NULL;
788
789  // For each MemRegion, create filler objects, if needed, in the G1 regions
790  // that contain the address range. The address range actually within the
791  // MemRegion will not be modified. That is assumed to have been initialized
792  // elsewhere, probably via an mmap of archived heap data.
793  MutexLockerEx x(Heap_lock);
794  for (size_t i = 0; i < count; i++) {
795    HeapWord* start_address = ranges[i].start();
796    HeapWord* last_address = ranges[i].last();
797
798    assert(reserved.contains(start_address) && reserved.contains(last_address),
799           "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
800           p2i(start_address), p2i(last_address));
801    assert(start_address > prev_last_addr,
802           "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
803           p2i(start_address), p2i(prev_last_addr));
804
805    HeapRegion* start_region = _hrm.addr_to_region(start_address);
806    HeapRegion* last_region = _hrm.addr_to_region(last_address);
807    HeapWord* bottom_address = start_region->bottom();
808
809    // Check for a range beginning in the same region in which the
810    // previous one ended.
811    if (start_region == prev_last_region) {
812      bottom_address = prev_last_addr + 1;
813    }
814
815    // Verify that the regions were all marked as archive regions by
816    // alloc_archive_regions.
817    HeapRegion* curr_region = start_region;
818    while (curr_region != NULL) {
819      guarantee(curr_region->is_archive(),
820                "Expected archive region at index %u", curr_region->hrm_index());
821      if (curr_region != last_region) {
822        curr_region = _hrm.next_region_in_heap(curr_region);
823      } else {
824        curr_region = NULL;
825      }
826    }
827
828    prev_last_addr = last_address;
829    prev_last_region = last_region;
830
831    // Fill the memory below the allocated range with dummy object(s),
832    // if the region bottom does not match the range start, or if the previous
833    // range ended within the same G1 region, and there is a gap.
834    if (start_address != bottom_address) {
835      size_t fill_size = pointer_delta(start_address, bottom_address);
836      G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
837      increase_used(fill_size * HeapWordSize);
838    }
839  }
840}
841
842inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
843                                                     uint* gc_count_before_ret,
844                                                     uint* gclocker_retry_count_ret) {
845  assert_heap_not_locked_and_not_at_safepoint();
846  assert(!is_humongous(word_size), "attempt_allocation() should not "
847         "be called for humongous allocation requests");
848
849  AllocationContext_t context = AllocationContext::current();
850  HeapWord* result = _allocator->attempt_allocation(word_size, context);
851
852  if (result == NULL) {
853    result = attempt_allocation_slow(word_size,
854                                     context,
855                                     gc_count_before_ret,
856                                     gclocker_retry_count_ret);
857  }
858  assert_heap_not_locked();
859  if (result != NULL) {
860    dirty_young_block(result, word_size);
861  }
862  return result;
863}
864
865void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
866  assert(!is_init_completed(), "Expect to be called at JVM init time");
867  assert(ranges != NULL, "MemRegion array NULL");
868  assert(count != 0, "No MemRegions provided");
869  MemRegion reserved = _hrm.reserved();
870  HeapWord* prev_last_addr = NULL;
871  HeapRegion* prev_last_region = NULL;
872  size_t size_used = 0;
873  size_t uncommitted_regions = 0;
874
875  // For each Memregion, free the G1 regions that constitute it, and
876  // notify mark-sweep that the range is no longer to be considered 'archive.'
877  MutexLockerEx x(Heap_lock);
878  for (size_t i = 0; i < count; i++) {
879    HeapWord* start_address = ranges[i].start();
880    HeapWord* last_address = ranges[i].last();
881
882    assert(reserved.contains(start_address) && reserved.contains(last_address),
883           "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
884           p2i(start_address), p2i(last_address));
885    assert(start_address > prev_last_addr,
886           "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
887           p2i(start_address), p2i(prev_last_addr));
888    size_used += ranges[i].byte_size();
889    prev_last_addr = last_address;
890
891    HeapRegion* start_region = _hrm.addr_to_region(start_address);
892    HeapRegion* last_region = _hrm.addr_to_region(last_address);
893
894    // Check for ranges that start in the same G1 region in which the previous
895    // range ended, and adjust the start address so we don't try to free
896    // the same region again. If the current range is entirely within that
897    // region, skip it.
898    if (start_region == prev_last_region) {
899      start_address = start_region->end();
900      if (start_address > last_address) {
901        continue;
902      }
903      start_region = _hrm.addr_to_region(start_address);
904    }
905    prev_last_region = last_region;
906
907    // After verifying that each region was marked as an archive region by
908    // alloc_archive_regions, set it free and empty and uncommit it.
909    HeapRegion* curr_region = start_region;
910    while (curr_region != NULL) {
911      guarantee(curr_region->is_archive(),
912                "Expected archive region at index %u", curr_region->hrm_index());
913      uint curr_index = curr_region->hrm_index();
914      _old_set.remove(curr_region);
915      curr_region->set_free();
916      curr_region->set_top(curr_region->bottom());
917      if (curr_region != last_region) {
918        curr_region = _hrm.next_region_in_heap(curr_region);
919      } else {
920        curr_region = NULL;
921      }
922      _hrm.shrink_at(curr_index, 1);
923      uncommitted_regions++;
924    }
925
926    // Notify mark-sweep that this is no longer an archive range.
927    G1MarkSweep::set_range_archive(ranges[i], false);
928  }
929
930  if (uncommitted_regions != 0) {
931    log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
932                              HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
933  }
934  decrease_used(size_used);
935}
936
937HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
938                                                        uint* gc_count_before_ret,
939                                                        uint* gclocker_retry_count_ret) {
940  // The structure of this method has a lot of similarities to
941  // attempt_allocation_slow(). The reason these two were not merged
942  // into a single one is that such a method would require several "if
943  // allocation is not humongous do this, otherwise do that"
944  // conditional paths which would obscure its flow. In fact, an early
945  // version of this code did use a unified method which was harder to
946  // follow and, as a result, it had subtle bugs that were hard to
947  // track down. So keeping these two methods separate allows each to
948  // be more readable. It will be good to keep these two in sync as
949  // much as possible.
950
951  assert_heap_not_locked_and_not_at_safepoint();
952  assert(is_humongous(word_size), "attempt_allocation_humongous() "
953         "should only be called for humongous allocations");
954
955  // Humongous objects can exhaust the heap quickly, so we should check if we
956  // need to start a marking cycle at each humongous object allocation. We do
957  // the check before we do the actual allocation. The reason for doing it
958  // before the allocation is that we avoid having to keep track of the newly
959  // allocated memory while we do a GC.
960  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
961                                           word_size)) {
962    collect(GCCause::_g1_humongous_allocation);
963  }
964
965  // We will loop until a) we manage to successfully perform the
966  // allocation or b) we successfully schedule a collection which
967  // fails to perform the allocation. b) is the only case when we'll
968  // return NULL.
969  HeapWord* result = NULL;
970  for (int try_count = 1; /* we'll return */; try_count += 1) {
971    bool should_try_gc;
972    uint gc_count_before;
973
974    {
975      MutexLockerEx x(Heap_lock);
976
977      // Given that humongous objects are not allocated in young
978      // regions, we'll first try to do the allocation without doing a
979      // collection hoping that there's enough space in the heap.
980      result = humongous_obj_allocate(word_size, AllocationContext::current());
981      if (result != NULL) {
982        size_t size_in_regions = humongous_obj_size_in_regions(word_size);
983        g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
984        return result;
985      }
986
987      if (GCLocker::is_active_and_needs_gc()) {
988        should_try_gc = false;
989      } else {
990         // The GCLocker may not be active but the GCLocker initiated
991        // GC may not yet have been performed (GCLocker::needs_gc()
992        // returns true). In this case we do not try this GC and
993        // wait until the GCLocker initiated GC is performed, and
994        // then retry the allocation.
995        if (GCLocker::needs_gc()) {
996          should_try_gc = false;
997        } else {
998          // Read the GC count while still holding the Heap_lock.
999          gc_count_before = total_collections();
1000          should_try_gc = true;
1001        }
1002      }
1003    }
1004
1005    if (should_try_gc) {
1006      // If we failed to allocate the humongous object, we should try to
1007      // do a collection pause (if we're allowed) in case it reclaims
1008      // enough space for the allocation to succeed after the pause.
1009
1010      bool succeeded;
1011      result = do_collection_pause(word_size, gc_count_before, &succeeded,
1012                                   GCCause::_g1_humongous_allocation);
1013      if (result != NULL) {
1014        assert(succeeded, "only way to get back a non-NULL result");
1015        return result;
1016      }
1017
1018      if (succeeded) {
1019        // If we get here we successfully scheduled a collection which
1020        // failed to allocate. No point in trying to allocate
1021        // further. We'll just return NULL.
1022        MutexLockerEx x(Heap_lock);
1023        *gc_count_before_ret = total_collections();
1024        return NULL;
1025      }
1026    } else {
1027      if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1028        MutexLockerEx x(Heap_lock);
1029        *gc_count_before_ret = total_collections();
1030        return NULL;
1031      }
1032      // The GCLocker is either active or the GCLocker initiated
1033      // GC has not yet been performed. Stall until it is and
1034      // then retry the allocation.
1035      GCLocker::stall_until_clear();
1036      (*gclocker_retry_count_ret) += 1;
1037    }
1038
1039    // We can reach here if we were unsuccessful in scheduling a
1040    // collection (because another thread beat us to it) or if we were
1041    // stalled due to the GC locker. In either can we should retry the
1042    // allocation attempt in case another thread successfully
1043    // performed a collection and reclaimed enough space.  Give a
1044    // warning if we seem to be looping forever.
1045
1046    if ((QueuedAllocationWarningCount > 0) &&
1047        (try_count % QueuedAllocationWarningCount == 0)) {
1048      log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
1049                      "retries %d times", try_count);
1050    }
1051  }
1052
1053  ShouldNotReachHere();
1054  return NULL;
1055}
1056
1057HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1058                                                           AllocationContext_t context,
1059                                                           bool expect_null_mutator_alloc_region) {
1060  assert_at_safepoint(true /* should_be_vm_thread */);
1061  assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1062         "the current alloc region was unexpectedly found to be non-NULL");
1063
1064  if (!is_humongous(word_size)) {
1065    return _allocator->attempt_allocation_locked(word_size, context);
1066  } else {
1067    HeapWord* result = humongous_obj_allocate(word_size, context);
1068    if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1069      collector_state()->set_initiate_conc_mark_if_possible(true);
1070    }
1071    return result;
1072  }
1073
1074  ShouldNotReachHere();
1075}
1076
1077class PostMCRemSetClearClosure: public HeapRegionClosure {
1078  G1CollectedHeap* _g1h;
1079  ModRefBarrierSet* _mr_bs;
1080public:
1081  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1082    _g1h(g1h), _mr_bs(mr_bs) {}
1083
1084  bool doHeapRegion(HeapRegion* r) {
1085    HeapRegionRemSet* hrrs = r->rem_set();
1086
1087    _g1h->reset_gc_time_stamps(r);
1088
1089    if (r->is_continues_humongous()) {
1090      // We'll assert that the strong code root list and RSet is empty
1091      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1092      assert(hrrs->occupied() == 0, "RSet should be empty");
1093    } else {
1094      hrrs->clear();
1095    }
1096    // You might think here that we could clear just the cards
1097    // corresponding to the used region.  But no: if we leave a dirty card
1098    // in a region we might allocate into, then it would prevent that card
1099    // from being enqueued, and cause it to be missed.
1100    // Re: the performance cost: we shouldn't be doing full GC anyway!
1101    _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1102
1103    return false;
1104  }
1105};
1106
1107void G1CollectedHeap::clear_rsets_post_compaction() {
1108  PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1109  heap_region_iterate(&rs_clear);
1110}
1111
1112class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1113  G1CollectedHeap*   _g1h;
1114  UpdateRSOopClosure _cl;
1115public:
1116  RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
1117    _cl(g1->g1_rem_set(), worker_i),
1118    _g1h(g1)
1119  { }
1120
1121  bool doHeapRegion(HeapRegion* r) {
1122    if (!r->is_continues_humongous()) {
1123      _cl.set_from(r);
1124      r->oop_iterate(&_cl);
1125    }
1126    return false;
1127  }
1128};
1129
1130class ParRebuildRSTask: public AbstractGangTask {
1131  G1CollectedHeap* _g1;
1132  HeapRegionClaimer _hrclaimer;
1133
1134public:
1135  ParRebuildRSTask(G1CollectedHeap* g1) :
1136      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
1137
1138  void work(uint worker_id) {
1139    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
1140    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
1141  }
1142};
1143
1144class PostCompactionPrinterClosure: public HeapRegionClosure {
1145private:
1146  G1HRPrinter* _hr_printer;
1147public:
1148  bool doHeapRegion(HeapRegion* hr) {
1149    assert(!hr->is_young(), "not expecting to find young regions");
1150    _hr_printer->post_compaction(hr);
1151    return false;
1152  }
1153
1154  PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1155    : _hr_printer(hr_printer) { }
1156};
1157
1158void G1CollectedHeap::print_hrm_post_compaction() {
1159  if (_hr_printer.is_active()) {
1160    PostCompactionPrinterClosure cl(hr_printer());
1161    heap_region_iterate(&cl);
1162  }
1163
1164}
1165
1166bool G1CollectedHeap::do_full_collection(bool explicit_gc,
1167                                         bool clear_all_soft_refs) {
1168  assert_at_safepoint(true /* should_be_vm_thread */);
1169
1170  if (GCLocker::check_active_before_gc()) {
1171    return false;
1172  }
1173
1174  STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
1175  gc_timer->register_gc_start();
1176
1177  SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
1178  GCIdMark gc_id_mark;
1179  gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
1180
1181  SvcGCMarker sgcm(SvcGCMarker::FULL);
1182  ResourceMark rm;
1183
1184  print_heap_before_gc();
1185  print_heap_regions();
1186  trace_heap_before_gc(gc_tracer);
1187
1188  size_t metadata_prev_used = MetaspaceAux::used_bytes();
1189
1190  _verifier->verify_region_sets_optional();
1191
1192  const bool do_clear_all_soft_refs = clear_all_soft_refs ||
1193                           collector_policy()->should_clear_all_soft_refs();
1194
1195  ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
1196
1197  {
1198    IsGCActiveMark x;
1199
1200    // Timing
1201    assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1202    GCTraceCPUTime tcpu;
1203
1204    {
1205      GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
1206      TraceCollectorStats tcs(g1mm()->full_collection_counters());
1207      TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
1208
1209      G1HeapTransition heap_transition(this);
1210      g1_policy()->record_full_collection_start();
1211
1212      // Note: When we have a more flexible GC logging framework that
1213      // allows us to add optional attributes to a GC log record we
1214      // could consider timing and reporting how long we wait in the
1215      // following two methods.
1216      wait_while_free_regions_coming();
1217      // If we start the compaction before the CM threads finish
1218      // scanning the root regions we might trip them over as we'll
1219      // be moving objects / updating references. So let's wait until
1220      // they are done. By telling them to abort, they should complete
1221      // early.
1222      _cm->root_regions()->abort();
1223      _cm->root_regions()->wait_until_scan_finished();
1224      append_secondary_free_list_if_not_empty_with_lock();
1225
1226      gc_prologue(true);
1227      increment_total_collections(true /* full gc */);
1228      increment_old_marking_cycles_started();
1229
1230      assert(used() == recalculate_used(), "Should be equal");
1231
1232      _verifier->verify_before_gc();
1233
1234      _verifier->check_bitmaps("Full GC Start");
1235      pre_full_gc_dump(gc_timer);
1236
1237#if defined(COMPILER2) || INCLUDE_JVMCI
1238      DerivedPointerTable::clear();
1239#endif
1240
1241      // Disable discovery and empty the discovered lists
1242      // for the CM ref processor.
1243      ref_processor_cm()->disable_discovery();
1244      ref_processor_cm()->abandon_partial_discovery();
1245      ref_processor_cm()->verify_no_references_recorded();
1246
1247      // Abandon current iterations of concurrent marking and concurrent
1248      // refinement, if any are in progress.
1249      concurrent_mark()->abort();
1250
1251      // Make sure we'll choose a new allocation region afterwards.
1252      _allocator->release_mutator_alloc_region();
1253      _allocator->abandon_gc_alloc_regions();
1254      g1_rem_set()->cleanupHRRS();
1255
1256      // We may have added regions to the current incremental collection
1257      // set between the last GC or pause and now. We need to clear the
1258      // incremental collection set and then start rebuilding it afresh
1259      // after this full GC.
1260      abandon_collection_set(collection_set());
1261
1262      tear_down_region_sets(false /* free_list_only */);
1263      collector_state()->set_gcs_are_young(true);
1264
1265      // See the comments in g1CollectedHeap.hpp and
1266      // G1CollectedHeap::ref_processing_init() about
1267      // how reference processing currently works in G1.
1268
1269      // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1270      ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1271
1272      // Temporarily clear the STW ref processor's _is_alive_non_header field.
1273      ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1274
1275      ref_processor_stw()->enable_discovery();
1276      ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1277
1278      // Do collection work
1279      {
1280        HandleMark hm;  // Discard invalid handles created during gc
1281        G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1282      }
1283
1284      assert(num_free_regions() == 0, "we should not have added any free regions");
1285      rebuild_region_sets(false /* free_list_only */);
1286
1287      // Enqueue any discovered reference objects that have
1288      // not been removed from the discovered lists.
1289      ref_processor_stw()->enqueue_discovered_references();
1290
1291#if defined(COMPILER2) || INCLUDE_JVMCI
1292      DerivedPointerTable::update_pointers();
1293#endif
1294
1295      MemoryService::track_memory_usage();
1296
1297      assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1298      ref_processor_stw()->verify_no_references_recorded();
1299
1300      // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1301      ClassLoaderDataGraph::purge();
1302      MetaspaceAux::verify_metrics();
1303
1304      // Note: since we've just done a full GC, concurrent
1305      // marking is no longer active. Therefore we need not
1306      // re-enable reference discovery for the CM ref processor.
1307      // That will be done at the start of the next marking cycle.
1308      assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1309      ref_processor_cm()->verify_no_references_recorded();
1310
1311      reset_gc_time_stamp();
1312      // Since everything potentially moved, we will clear all remembered
1313      // sets, and clear all cards.  Later we will rebuild remembered
1314      // sets. We will also reset the GC time stamps of the regions.
1315      clear_rsets_post_compaction();
1316      check_gc_time_stamps();
1317
1318      resize_if_necessary_after_full_collection();
1319
1320      // We should do this after we potentially resize the heap so
1321      // that all the COMMIT / UNCOMMIT events are generated before
1322      // the compaction events.
1323      print_hrm_post_compaction();
1324
1325      if (_hot_card_cache->use_cache()) {
1326        _hot_card_cache->reset_card_counts();
1327        _hot_card_cache->reset_hot_cache();
1328      }
1329
1330      // Rebuild remembered sets of all regions.
1331      uint n_workers =
1332        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
1333                                                workers()->active_workers(),
1334                                                Threads::number_of_non_daemon_threads());
1335      workers()->update_active_workers(n_workers);
1336      log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, workers()->total_workers());
1337
1338      ParRebuildRSTask rebuild_rs_task(this);
1339      workers()->run_task(&rebuild_rs_task);
1340
1341      // Rebuild the strong code root lists for each region
1342      rebuild_strong_code_roots();
1343
1344      if (true) { // FIXME
1345        MetaspaceGC::compute_new_size();
1346      }
1347
1348#ifdef TRACESPINNING
1349      ParallelTaskTerminator::print_termination_counts();
1350#endif
1351
1352      // Discard all rset updates
1353      JavaThread::dirty_card_queue_set().abandon_logs();
1354      assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1355
1356      // At this point there should be no regions in the
1357      // entire heap tagged as young.
1358      assert(check_young_list_empty(), "young list should be empty at this point");
1359
1360      // Update the number of full collections that have been completed.
1361      increment_old_marking_cycles_completed(false /* concurrent */);
1362
1363      _hrm.verify_optional();
1364      _verifier->verify_region_sets_optional();
1365
1366      _verifier->verify_after_gc();
1367
1368      // Clear the previous marking bitmap, if needed for bitmap verification.
1369      // Note we cannot do this when we clear the next marking bitmap in
1370      // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371      // objects marked during a full GC against the previous bitmap.
1372      // But we need to clear it before calling check_bitmaps below since
1373      // the full GC has compacted objects and updated TAMS but not updated
1374      // the prev bitmap.
1375      if (G1VerifyBitmaps) {
1376        GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377        _cm->clear_prev_bitmap(workers());
1378      }
1379      _verifier->check_bitmaps("Full GC End");
1380
1381      // Start a new incremental collection set for the next pause
1382      collection_set()->start_incremental_building();
1383
1384      clear_cset_fast_test();
1385
1386      _allocator->init_mutator_alloc_region();
1387
1388      g1_policy()->record_full_collection_end();
1389
1390      // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1391      // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1392      // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1393      // before any GC notifications are raised.
1394      g1mm()->update_sizes();
1395
1396      gc_epilogue(true);
1397
1398      heap_transition.print();
1399
1400      print_heap_after_gc();
1401      print_heap_regions();
1402      trace_heap_after_gc(gc_tracer);
1403
1404      post_full_gc_dump(gc_timer);
1405    }
1406
1407    gc_timer->register_gc_end();
1408    gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1409  }
1410
1411  return true;
1412}
1413
1414void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
1415  // Currently, there is no facility in the do_full_collection(bool) API to notify
1416  // the caller that the collection did not succeed (e.g., because it was locked
1417  // out by the GC locker). So, right now, we'll ignore the return value.
1418  bool dummy = do_full_collection(true,                /* explicit_gc */
1419                                  clear_all_soft_refs);
1420}
1421
1422void G1CollectedHeap::resize_if_necessary_after_full_collection() {
1423  // Include bytes that will be pre-allocated to support collections, as "used".
1424  const size_t used_after_gc = used();
1425  const size_t capacity_after_gc = capacity();
1426  const size_t free_after_gc = capacity_after_gc - used_after_gc;
1427
1428  // This is enforced in arguments.cpp.
1429  assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
1430         "otherwise the code below doesn't make sense");
1431
1432  // We don't have floating point command-line arguments
1433  const double minimum_free_percentage = (double) MinHeapFreeRatio / 100.0;
1434  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
1435  const double maximum_free_percentage = (double) MaxHeapFreeRatio / 100.0;
1436  const double minimum_used_percentage = 1.0 - maximum_free_percentage;
1437
1438  const size_t min_heap_size = collector_policy()->min_heap_byte_size();
1439  const size_t max_heap_size = collector_policy()->max_heap_byte_size();
1440
1441  // We have to be careful here as these two calculations can overflow
1442  // 32-bit size_t's.
1443  double used_after_gc_d = (double) used_after_gc;
1444  double minimum_desired_capacity_d = used_after_gc_d / maximum_used_percentage;
1445  double maximum_desired_capacity_d = used_after_gc_d / minimum_used_percentage;
1446
1447  // Let's make sure that they are both under the max heap size, which
1448  // by default will make them fit into a size_t.
1449  double desired_capacity_upper_bound = (double) max_heap_size;
1450  minimum_desired_capacity_d = MIN2(minimum_desired_capacity_d,
1451                                    desired_capacity_upper_bound);
1452  maximum_desired_capacity_d = MIN2(maximum_desired_capacity_d,
1453                                    desired_capacity_upper_bound);
1454
1455  // We can now safely turn them into size_t's.
1456  size_t minimum_desired_capacity = (size_t) minimum_desired_capacity_d;
1457  size_t maximum_desired_capacity = (size_t) maximum_desired_capacity_d;
1458
1459  // This assert only makes sense here, before we adjust them
1460  // with respect to the min and max heap size.
1461  assert(minimum_desired_capacity <= maximum_desired_capacity,
1462         "minimum_desired_capacity = " SIZE_FORMAT ", "
1463         "maximum_desired_capacity = " SIZE_FORMAT,
1464         minimum_desired_capacity, maximum_desired_capacity);
1465
1466  // Should not be greater than the heap max size. No need to adjust
1467  // it with respect to the heap min size as it's a lower bound (i.e.,
1468  // we'll try to make the capacity larger than it, not smaller).
1469  minimum_desired_capacity = MIN2(minimum_desired_capacity, max_heap_size);
1470  // Should not be less than the heap min size. No need to adjust it
1471  // with respect to the heap max size as it's an upper bound (i.e.,
1472  // we'll try to make the capacity smaller than it, not greater).
1473  maximum_desired_capacity =  MAX2(maximum_desired_capacity, min_heap_size);
1474
1475  if (capacity_after_gc < minimum_desired_capacity) {
1476    // Don't expand unless it's significant
1477    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1478
1479    log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
1480                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1481                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1482
1483    expand(expand_bytes, _workers);
1484
1485    // No expansion, now see if we want to shrink
1486  } else if (capacity_after_gc > maximum_desired_capacity) {
1487    // Capacity too large, compute shrinking size
1488    size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1489
1490    log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
1491                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1492                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
1493
1494    shrink(shrink_bytes);
1495  }
1496}
1497
1498HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1499                                                            AllocationContext_t context,
1500                                                            bool do_gc,
1501                                                            bool clear_all_soft_refs,
1502                                                            bool expect_null_mutator_alloc_region,
1503                                                            bool* gc_succeeded) {
1504  *gc_succeeded = true;
1505  // Let's attempt the allocation first.
1506  HeapWord* result =
1507    attempt_allocation_at_safepoint(word_size,
1508                                    context,
1509                                    expect_null_mutator_alloc_region);
1510  if (result != NULL) {
1511    assert(*gc_succeeded, "sanity");
1512    return result;
1513  }
1514
1515  // In a G1 heap, we're supposed to keep allocation from failing by
1516  // incremental pauses.  Therefore, at least for now, we'll favor
1517  // expansion over collection.  (This might change in the future if we can
1518  // do something smarter than full collection to satisfy a failed alloc.)
1519  result = expand_and_allocate(word_size, context);
1520  if (result != NULL) {
1521    assert(*gc_succeeded, "sanity");
1522    return result;
1523  }
1524
1525  if (do_gc) {
1526    // Expansion didn't work, we'll try to do a Full GC.
1527    *gc_succeeded = do_full_collection(false, /* explicit_gc */
1528                                       clear_all_soft_refs);
1529  }
1530
1531  return NULL;
1532}
1533
1534HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1535                                                     AllocationContext_t context,
1536                                                     bool* succeeded) {
1537  assert_at_safepoint(true /* should_be_vm_thread */);
1538
1539  // Attempts to allocate followed by Full GC.
1540  HeapWord* result =
1541    satisfy_failed_allocation_helper(word_size,
1542                                     context,
1543                                     true,  /* do_gc */
1544                                     false, /* clear_all_soft_refs */
1545                                     false, /* expect_null_mutator_alloc_region */
1546                                     succeeded);
1547
1548  if (result != NULL || !*succeeded) {
1549    return result;
1550  }
1551
1552  // Attempts to allocate followed by Full GC that will collect all soft references.
1553  result = satisfy_failed_allocation_helper(word_size,
1554                                            context,
1555                                            true, /* do_gc */
1556                                            true, /* clear_all_soft_refs */
1557                                            true, /* expect_null_mutator_alloc_region */
1558                                            succeeded);
1559
1560  if (result != NULL || !*succeeded) {
1561    return result;
1562  }
1563
1564  // Attempts to allocate, no GC
1565  result = satisfy_failed_allocation_helper(word_size,
1566                                            context,
1567                                            false, /* do_gc */
1568                                            false, /* clear_all_soft_refs */
1569                                            true,  /* expect_null_mutator_alloc_region */
1570                                            succeeded);
1571
1572  if (result != NULL) {
1573    assert(*succeeded, "sanity");
1574    return result;
1575  }
1576
1577  assert(!collector_policy()->should_clear_all_soft_refs(),
1578         "Flag should have been handled and cleared prior to this point");
1579
1580  // What else?  We might try synchronous finalization later.  If the total
1581  // space available is large enough for the allocation, then a more
1582  // complete compaction phase than we've tried so far might be
1583  // appropriate.
1584  assert(*succeeded, "sanity");
1585  return NULL;
1586}
1587
1588// Attempting to expand the heap sufficiently
1589// to support an allocation of the given "word_size".  If
1590// successful, perform the allocation and return the address of the
1591// allocated block, or else "NULL".
1592
1593HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1594  assert_at_safepoint(true /* should_be_vm_thread */);
1595
1596  _verifier->verify_region_sets_optional();
1597
1598  size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1599  log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1600                            word_size * HeapWordSize);
1601
1602
1603  if (expand(expand_bytes, _workers)) {
1604    _hrm.verify_optional();
1605    _verifier->verify_region_sets_optional();
1606    return attempt_allocation_at_safepoint(word_size,
1607                                           context,
1608                                           false /* expect_null_mutator_alloc_region */);
1609  }
1610  return NULL;
1611}
1612
1613bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1614  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1615  aligned_expand_bytes = align_size_up(aligned_expand_bytes,
1616                                       HeapRegion::GrainBytes);
1617
1618  log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B",
1619                            expand_bytes, aligned_expand_bytes);
1620
1621  if (is_maximal_no_gc()) {
1622    log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1623    return false;
1624  }
1625
1626  double expand_heap_start_time_sec = os::elapsedTime();
1627  uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1628  assert(regions_to_expand > 0, "Must expand by at least one region");
1629
1630  uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);
1631  if (expand_time_ms != NULL) {
1632    *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1633  }
1634
1635  if (expanded_by > 0) {
1636    size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1637    assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1638    g1_policy()->record_new_heap_size(num_regions());
1639  } else {
1640    log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1641
1642    // The expansion of the virtual storage space was unsuccessful.
1643    // Let's see if it was because we ran out of swap.
1644    if (G1ExitOnExpansionFailure &&
1645        _hrm.available() >= regions_to_expand) {
1646      // We had head room...
1647      vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1648    }
1649  }
1650  return regions_to_expand > 0;
1651}
1652
1653void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1654  size_t aligned_shrink_bytes =
1655    ReservedSpace::page_align_size_down(shrink_bytes);
1656  aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
1657                                         HeapRegion::GrainBytes);
1658  uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1659
1660  uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1661  size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1662
1663
1664  log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1665                            shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1666  if (num_regions_removed > 0) {
1667    g1_policy()->record_new_heap_size(num_regions());
1668  } else {
1669    log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1670  }
1671}
1672
1673void G1CollectedHeap::shrink(size_t shrink_bytes) {
1674  _verifier->verify_region_sets_optional();
1675
1676  // We should only reach here at the end of a Full GC which means we
1677  // should not not be holding to any GC alloc regions. The method
1678  // below will make sure of that and do any remaining clean up.
1679  _allocator->abandon_gc_alloc_regions();
1680
1681  // Instead of tearing down / rebuilding the free lists here, we
1682  // could instead use the remove_all_pending() method on free_list to
1683  // remove only the ones that we need to remove.
1684  tear_down_region_sets(true /* free_list_only */);
1685  shrink_helper(shrink_bytes);
1686  rebuild_region_sets(true /* free_list_only */);
1687
1688  _hrm.verify_optional();
1689  _verifier->verify_region_sets_optional();
1690}
1691
1692// Public methods.
1693
1694G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1695  CollectedHeap(),
1696  _collector_policy(collector_policy),
1697  _g1_policy(create_g1_policy()),
1698  _collection_set(this, _g1_policy),
1699  _dirty_card_queue_set(false),
1700  _is_alive_closure_cm(this),
1701  _is_alive_closure_stw(this),
1702  _ref_processor_cm(NULL),
1703  _ref_processor_stw(NULL),
1704  _bot(NULL),
1705  _hot_card_cache(NULL),
1706  _g1_rem_set(NULL),
1707  _cg1r(NULL),
1708  _g1mm(NULL),
1709  _refine_cte_cl(NULL),
1710  _preserved_marks_set(true /* in_c_heap */),
1711  _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1712  _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1713  _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1714  _humongous_reclaim_candidates(),
1715  _has_humongous_reclaim_candidates(false),
1716  _archive_allocator(NULL),
1717  _free_regions_coming(false),
1718  _gc_time_stamp(0),
1719  _summary_bytes_used(0),
1720  _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1721  _old_evac_stats("Old", OldPLABSize, PLABWeight),
1722  _expand_heap_after_alloc_failure(true),
1723  _old_marking_cycles_started(0),
1724  _old_marking_cycles_completed(0),
1725  _in_cset_fast_test(),
1726  _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1727  _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1728
1729  _workers = new WorkGang("GC Thread", ParallelGCThreads,
1730                          /* are_GC_task_threads */true,
1731                          /* are_ConcurrentGC_threads */false);
1732  _workers->initialize_workers();
1733  _verifier = new G1HeapVerifier(this);
1734
1735  _allocator = G1Allocator::create_allocator(this);
1736
1737  _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1738
1739  _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1740
1741  // Override the default _filler_array_max_size so that no humongous filler
1742  // objects are created.
1743  _filler_array_max_size = _humongous_object_threshold_in_words;
1744
1745  uint n_queues = ParallelGCThreads;
1746  _task_queues = new RefToScanQueueSet(n_queues);
1747
1748  _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1749
1750  for (uint i = 0; i < n_queues; i++) {
1751    RefToScanQueue* q = new RefToScanQueue();
1752    q->initialize();
1753    _task_queues->register_queue(i, q);
1754    ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1755  }
1756
1757  // Initialize the G1EvacuationFailureALot counters and flags.
1758  NOT_PRODUCT(reset_evacuation_should_fail();)
1759
1760  guarantee(_task_queues != NULL, "task_queues allocation failure.");
1761}
1762
1763G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1764                                                                 size_t size,
1765                                                                 size_t translation_factor) {
1766  size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1767  // Allocate a new reserved space, preferring to use large pages.
1768  ReservedSpace rs(size, preferred_page_size);
1769  G1RegionToSpaceMapper* result  =
1770    G1RegionToSpaceMapper::create_mapper(rs,
1771                                         size,
1772                                         rs.alignment(),
1773                                         HeapRegion::GrainBytes,
1774                                         translation_factor,
1775                                         mtGC);
1776
1777  os::trace_page_sizes_for_requested_size(description,
1778                                          size,
1779                                          preferred_page_size,
1780                                          rs.alignment(),
1781                                          rs.base(),
1782                                          rs.size());
1783
1784  return result;
1785}
1786
1787jint G1CollectedHeap::initialize() {
1788  CollectedHeap::pre_initialize();
1789  os::enable_vtime();
1790
1791  // Necessary to satisfy locking discipline assertions.
1792
1793  MutexLocker x(Heap_lock);
1794
1795  // While there are no constraints in the GC code that HeapWordSize
1796  // be any particular value, there are multiple other areas in the
1797  // system which believe this to be true (e.g. oop->object_size in some
1798  // cases incorrectly returns the size in wordSize units rather than
1799  // HeapWordSize).
1800  guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1801
1802  size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1803  size_t max_byte_size = collector_policy()->max_heap_byte_size();
1804  size_t heap_alignment = collector_policy()->heap_alignment();
1805
1806  // Ensure that the sizes are properly aligned.
1807  Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1808  Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1809  Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1810
1811  _refine_cte_cl = new RefineCardTableEntryClosure();
1812
1813  jint ecode = JNI_OK;
1814  _cg1r = ConcurrentG1Refine::create(_refine_cte_cl, &ecode);
1815  if (_cg1r == NULL) {
1816    return ecode;
1817  }
1818
1819  // Reserve the maximum.
1820
1821  // When compressed oops are enabled, the preferred heap base
1822  // is calculated by subtracting the requested size from the
1823  // 32Gb boundary and using the result as the base address for
1824  // heap reservation. If the requested size is not aligned to
1825  // HeapRegion::GrainBytes (i.e. the alignment that is passed
1826  // into the ReservedHeapSpace constructor) then the actual
1827  // base of the reserved heap may end up differing from the
1828  // address that was requested (i.e. the preferred heap base).
1829  // If this happens then we could end up using a non-optimal
1830  // compressed oops mode.
1831
1832  ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1833                                                 heap_alignment);
1834
1835  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1836
1837  // Create the barrier set for the entire reserved region.
1838  G1SATBCardTableLoggingModRefBS* bs
1839    = new G1SATBCardTableLoggingModRefBS(reserved_region());
1840  bs->initialize();
1841  assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
1842  set_barrier_set(bs);
1843
1844  // Create the hot card cache.
1845  _hot_card_cache = new G1HotCardCache(this);
1846
1847  // Also create a G1 rem set.
1848  _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
1849
1850  // Carve out the G1 part of the heap.
1851  ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1852  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
1853  G1RegionToSpaceMapper* heap_storage =
1854    G1RegionToSpaceMapper::create_mapper(g1_rs,
1855                                         g1_rs.size(),
1856                                         page_size,
1857                                         HeapRegion::GrainBytes,
1858                                         1,
1859                                         mtJavaHeap);
1860  os::trace_page_sizes("Heap",
1861                       collector_policy()->min_heap_byte_size(),
1862                       max_byte_size,
1863                       page_size,
1864                       heap_rs.base(),
1865                       heap_rs.size());
1866  heap_storage->set_mapping_changed_listener(&_listener);
1867
1868  // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
1869  G1RegionToSpaceMapper* bot_storage =
1870    create_aux_memory_mapper("Block Offset Table",
1871                             G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
1872                             G1BlockOffsetTable::heap_map_factor());
1873
1874  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
1875  G1RegionToSpaceMapper* cardtable_storage =
1876    create_aux_memory_mapper("Card Table",
1877                             G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
1878                             G1SATBCardTableLoggingModRefBS::heap_map_factor());
1879
1880  G1RegionToSpaceMapper* card_counts_storage =
1881    create_aux_memory_mapper("Card Counts Table",
1882                             G1CardCounts::compute_size(g1_rs.size() / HeapWordSize),
1883                             G1CardCounts::heap_map_factor());
1884
1885  size_t bitmap_size = G1CMBitMap::compute_size(g1_rs.size());
1886  G1RegionToSpaceMapper* prev_bitmap_storage =
1887    create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1888  G1RegionToSpaceMapper* next_bitmap_storage =
1889    create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1890
1891  _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1892  g1_barrier_set()->initialize(cardtable_storage);
1893  // Do later initialization work for concurrent refinement.
1894  _hot_card_cache->initialize(card_counts_storage);
1895
1896  // 6843694 - ensure that the maximum region index can fit
1897  // in the remembered set structures.
1898  const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1899  guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1900
1901  g1_rem_set()->initialize(max_capacity(), max_regions());
1902
1903  size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1904  guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1905  guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1906            "too many cards per region");
1907
1908  FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1909
1910  _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1911
1912  {
1913    HeapWord* start = _hrm.reserved().start();
1914    HeapWord* end = _hrm.reserved().end();
1915    size_t granularity = HeapRegion::GrainBytes;
1916
1917    _in_cset_fast_test.initialize(start, end, granularity);
1918    _humongous_reclaim_candidates.initialize(start, end, granularity);
1919  }
1920
1921  // Create the G1ConcurrentMark data structure and thread.
1922  // (Must do this late, so that "max_regions" is defined.)
1923  _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1924  if (_cm == NULL || !_cm->completed_initialization()) {
1925    vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1926    return JNI_ENOMEM;
1927  }
1928  _cmThread = _cm->cmThread();
1929
1930  // Now expand into the initial heap size.
1931  if (!expand(init_byte_size, _workers)) {
1932    vm_shutdown_during_initialization("Failed to allocate initial heap.");
1933    return JNI_ENOMEM;
1934  }
1935
1936  // Perform any initialization actions delegated to the policy.
1937  g1_policy()->init(this, &_collection_set);
1938
1939  JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1940                                               SATB_Q_FL_lock,
1941                                               G1SATBProcessCompletedThreshold,
1942                                               Shared_SATB_Q_lock);
1943
1944  JavaThread::dirty_card_queue_set().initialize(_refine_cte_cl,
1945                                                DirtyCardQ_CBL_mon,
1946                                                DirtyCardQ_FL_lock,
1947                                                (int)concurrent_g1_refine()->yellow_zone(),
1948                                                (int)concurrent_g1_refine()->red_zone(),
1949                                                Shared_DirtyCardQ_lock,
1950                                                NULL,  // fl_owner
1951                                                true); // init_free_ids
1952
1953  dirty_card_queue_set().initialize(NULL, // Should never be called by the Java code
1954                                    DirtyCardQ_CBL_mon,
1955                                    DirtyCardQ_FL_lock,
1956                                    -1, // never trigger processing
1957                                    -1, // no limit on length
1958                                    Shared_DirtyCardQ_lock,
1959                                    &JavaThread::dirty_card_queue_set());
1960
1961  // Here we allocate the dummy HeapRegion that is required by the
1962  // G1AllocRegion class.
1963  HeapRegion* dummy_region = _hrm.get_dummy_region();
1964
1965  // We'll re-use the same region whether the alloc region will
1966  // require BOT updates or not and, if it doesn't, then a non-young
1967  // region will complain that it cannot support allocations without
1968  // BOT updates. So we'll tag the dummy region as eden to avoid that.
1969  dummy_region->set_eden();
1970  // Make sure it's full.
1971  dummy_region->set_top(dummy_region->end());
1972  G1AllocRegion::setup(this, dummy_region);
1973
1974  _allocator->init_mutator_alloc_region();
1975
1976  // Do create of the monitoring and management support so that
1977  // values in the heap have been properly initialized.
1978  _g1mm = new G1MonitoringSupport(this);
1979
1980  G1StringDedup::initialize();
1981
1982  _preserved_marks_set.init(ParallelGCThreads);
1983
1984  _collection_set.initialize(max_regions());
1985
1986  return JNI_OK;
1987}
1988
1989void G1CollectedHeap::stop() {
1990  // Stop all concurrent threads. We do this to make sure these threads
1991  // do not continue to execute and access resources (e.g. logging)
1992  // that are destroyed during shutdown.
1993  _cg1r->stop();
1994  _cmThread->stop();
1995  if (G1StringDedup::is_enabled()) {
1996    G1StringDedup::stop();
1997  }
1998}
1999
2000size_t G1CollectedHeap::conservative_max_heap_alignment() {
2001  return HeapRegion::max_region_size();
2002}
2003
2004void G1CollectedHeap::post_initialize() {
2005  ref_processing_init();
2006}
2007
2008void G1CollectedHeap::ref_processing_init() {
2009  // Reference processing in G1 currently works as follows:
2010  //
2011  // * There are two reference processor instances. One is
2012  //   used to record and process discovered references
2013  //   during concurrent marking; the other is used to
2014  //   record and process references during STW pauses
2015  //   (both full and incremental).
2016  // * Both ref processors need to 'span' the entire heap as
2017  //   the regions in the collection set may be dotted around.
2018  //
2019  // * For the concurrent marking ref processor:
2020  //   * Reference discovery is enabled at initial marking.
2021  //   * Reference discovery is disabled and the discovered
2022  //     references processed etc during remarking.
2023  //   * Reference discovery is MT (see below).
2024  //   * Reference discovery requires a barrier (see below).
2025  //   * Reference processing may or may not be MT
2026  //     (depending on the value of ParallelRefProcEnabled
2027  //     and ParallelGCThreads).
2028  //   * A full GC disables reference discovery by the CM
2029  //     ref processor and abandons any entries on it's
2030  //     discovered lists.
2031  //
2032  // * For the STW processor:
2033  //   * Non MT discovery is enabled at the start of a full GC.
2034  //   * Processing and enqueueing during a full GC is non-MT.
2035  //   * During a full GC, references are processed after marking.
2036  //
2037  //   * Discovery (may or may not be MT) is enabled at the start
2038  //     of an incremental evacuation pause.
2039  //   * References are processed near the end of a STW evacuation pause.
2040  //   * For both types of GC:
2041  //     * Discovery is atomic - i.e. not concurrent.
2042  //     * Reference discovery will not need a barrier.
2043
2044  MemRegion mr = reserved_region();
2045
2046  // Concurrent Mark ref processor
2047  _ref_processor_cm =
2048    new ReferenceProcessor(mr,    // span
2049                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
2050                                // mt processing
2051                           ParallelGCThreads,
2052                                // degree of mt processing
2053                           (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2054                                // mt discovery
2055                           MAX2(ParallelGCThreads, ConcGCThreads),
2056                                // degree of mt discovery
2057                           false,
2058                                // Reference discovery is not atomic
2059                           &_is_alive_closure_cm);
2060                                // is alive closure
2061                                // (for efficiency/performance)
2062
2063  // STW ref processor
2064  _ref_processor_stw =
2065    new ReferenceProcessor(mr,    // span
2066                           ParallelRefProcEnabled && (ParallelGCThreads > 1),
2067                                // mt processing
2068                           ParallelGCThreads,
2069                                // degree of mt processing
2070                           (ParallelGCThreads > 1),
2071                                // mt discovery
2072                           ParallelGCThreads,
2073                                // degree of mt discovery
2074                           true,
2075                                // Reference discovery is atomic
2076                           &_is_alive_closure_stw);
2077                                // is alive closure
2078                                // (for efficiency/performance)
2079}
2080
2081CollectorPolicy* G1CollectedHeap::collector_policy() const {
2082  return _collector_policy;
2083}
2084
2085size_t G1CollectedHeap::capacity() const {
2086  return _hrm.length() * HeapRegion::GrainBytes;
2087}
2088
2089void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
2090  hr->reset_gc_time_stamp();
2091}
2092
2093#ifndef PRODUCT
2094
2095class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
2096private:
2097  unsigned _gc_time_stamp;
2098  bool _failures;
2099
2100public:
2101  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
2102    _gc_time_stamp(gc_time_stamp), _failures(false) { }
2103
2104  virtual bool doHeapRegion(HeapRegion* hr) {
2105    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
2106    if (_gc_time_stamp != region_gc_time_stamp) {
2107      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
2108                            region_gc_time_stamp, _gc_time_stamp);
2109      _failures = true;
2110    }
2111    return false;
2112  }
2113
2114  bool failures() { return _failures; }
2115};
2116
2117void G1CollectedHeap::check_gc_time_stamps() {
2118  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
2119  heap_region_iterate(&cl);
2120  guarantee(!cl.failures(), "all GC time stamps should have been reset");
2121}
2122#endif // PRODUCT
2123
2124void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2125  _hot_card_cache->drain(cl, worker_i);
2126}
2127
2128void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
2129  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2130  size_t n_completed_buffers = 0;
2131  while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2132    n_completed_buffers++;
2133  }
2134  g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2135  dcqs.clear_n_completed_buffers();
2136  assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2137}
2138
2139// Computes the sum of the storage used by the various regions.
2140size_t G1CollectedHeap::used() const {
2141  size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2142  if (_archive_allocator != NULL) {
2143    result += _archive_allocator->used();
2144  }
2145  return result;
2146}
2147
2148size_t G1CollectedHeap::used_unlocked() const {
2149  return _summary_bytes_used;
2150}
2151
2152class SumUsedClosure: public HeapRegionClosure {
2153  size_t _used;
2154public:
2155  SumUsedClosure() : _used(0) {}
2156  bool doHeapRegion(HeapRegion* r) {
2157    _used += r->used();
2158    return false;
2159  }
2160  size_t result() { return _used; }
2161};
2162
2163size_t G1CollectedHeap::recalculate_used() const {
2164  double recalculate_used_start = os::elapsedTime();
2165
2166  SumUsedClosure blk;
2167  heap_region_iterate(&blk);
2168
2169  g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2170  return blk.result();
2171}
2172
2173bool  G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2174  switch (cause) {
2175    case GCCause::_java_lang_system_gc:                 return ExplicitGCInvokesConcurrent;
2176    case GCCause::_dcmd_gc_run:                         return ExplicitGCInvokesConcurrent;
2177    case GCCause::_update_allocation_context_stats_inc: return true;
2178    case GCCause::_wb_conc_mark:                        return true;
2179    default :                                           return false;
2180  }
2181}
2182
2183bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
2184  switch (cause) {
2185    case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
2186    case GCCause::_g1_humongous_allocation: return true;
2187    default:                                return is_user_requested_concurrent_full_gc(cause);
2188  }
2189}
2190
2191#ifndef PRODUCT
2192void G1CollectedHeap::allocate_dummy_regions() {
2193  // Let's fill up most of the region
2194  size_t word_size = HeapRegion::GrainWords - 1024;
2195  // And as a result the region we'll allocate will be humongous.
2196  guarantee(is_humongous(word_size), "sanity");
2197
2198  // _filler_array_max_size is set to humongous object threshold
2199  // but temporarily change it to use CollectedHeap::fill_with_object().
2200  SizeTFlagSetting fs(_filler_array_max_size, word_size);
2201
2202  for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
2203    // Let's use the existing mechanism for the allocation
2204    HeapWord* dummy_obj = humongous_obj_allocate(word_size,
2205                                                 AllocationContext::system());
2206    if (dummy_obj != NULL) {
2207      MemRegion mr(dummy_obj, word_size);
2208      CollectedHeap::fill_with_object(mr);
2209    } else {
2210      // If we can't allocate once, we probably cannot allocate
2211      // again. Let's get out of the loop.
2212      break;
2213    }
2214  }
2215}
2216#endif // !PRODUCT
2217
2218void G1CollectedHeap::increment_old_marking_cycles_started() {
2219  assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2220         _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2221         "Wrong marking cycle count (started: %d, completed: %d)",
2222         _old_marking_cycles_started, _old_marking_cycles_completed);
2223
2224  _old_marking_cycles_started++;
2225}
2226
2227void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2228  MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2229
2230  // We assume that if concurrent == true, then the caller is a
2231  // concurrent thread that was joined the Suspendible Thread
2232  // Set. If there's ever a cheap way to check this, we should add an
2233  // assert here.
2234
2235  // Given that this method is called at the end of a Full GC or of a
2236  // concurrent cycle, and those can be nested (i.e., a Full GC can
2237  // interrupt a concurrent cycle), the number of full collections
2238  // completed should be either one (in the case where there was no
2239  // nesting) or two (when a Full GC interrupted a concurrent cycle)
2240  // behind the number of full collections started.
2241
2242  // This is the case for the inner caller, i.e. a Full GC.
2243  assert(concurrent ||
2244         (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
2245         (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
2246         "for inner caller (Full GC): _old_marking_cycles_started = %u "
2247         "is inconsistent with _old_marking_cycles_completed = %u",
2248         _old_marking_cycles_started, _old_marking_cycles_completed);
2249
2250  // This is the case for the outer caller, i.e. the concurrent cycle.
2251  assert(!concurrent ||
2252         (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2253         "for outer caller (concurrent cycle): "
2254         "_old_marking_cycles_started = %u "
2255         "is inconsistent with _old_marking_cycles_completed = %u",
2256         _old_marking_cycles_started, _old_marking_cycles_completed);
2257
2258  _old_marking_cycles_completed += 1;
2259
2260  // We need to clear the "in_progress" flag in the CM thread before
2261  // we wake up any waiters (especially when ExplicitInvokesConcurrent
2262  // is set) so that if a waiter requests another System.gc() it doesn't
2263  // incorrectly see that a marking cycle is still in progress.
2264  if (concurrent) {
2265    _cmThread->set_idle();
2266  }
2267
2268  // This notify_all() will ensure that a thread that called
2269  // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2270  // and it's waiting for a full GC to finish will be woken up. It is
2271  // waiting in VM_G1IncCollectionPause::doit_epilogue().
2272  FullGCCount_lock->notify_all();
2273}
2274
2275void G1CollectedHeap::collect(GCCause::Cause cause) {
2276  assert_heap_not_locked();
2277
2278  uint gc_count_before;
2279  uint old_marking_count_before;
2280  uint full_gc_count_before;
2281  bool retry_gc;
2282
2283  do {
2284    retry_gc = false;
2285
2286    {
2287      MutexLocker ml(Heap_lock);
2288
2289      // Read the GC count while holding the Heap_lock
2290      gc_count_before = total_collections();
2291      full_gc_count_before = total_full_collections();
2292      old_marking_count_before = _old_marking_cycles_started;
2293    }
2294
2295    if (should_do_concurrent_full_gc(cause)) {
2296      // Schedule an initial-mark evacuation pause that will start a
2297      // concurrent cycle. We're setting word_size to 0 which means that
2298      // we are not requesting a post-GC allocation.
2299      VM_G1IncCollectionPause op(gc_count_before,
2300                                 0,     /* word_size */
2301                                 true,  /* should_initiate_conc_mark */
2302                                 g1_policy()->max_pause_time_ms(),
2303                                 cause);
2304      op.set_allocation_context(AllocationContext::current());
2305
2306      VMThread::execute(&op);
2307      if (!op.pause_succeeded()) {
2308        if (old_marking_count_before == _old_marking_cycles_started) {
2309          retry_gc = op.should_retry_gc();
2310        } else {
2311          // A Full GC happened while we were trying to schedule the
2312          // initial-mark GC. No point in starting a new cycle given
2313          // that the whole heap was collected anyway.
2314        }
2315
2316        if (retry_gc) {
2317          if (GCLocker::is_active_and_needs_gc()) {
2318            GCLocker::stall_until_clear();
2319          }
2320        }
2321      }
2322    } else {
2323      if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2324          DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2325
2326        // Schedule a standard evacuation pause. We're setting word_size
2327        // to 0 which means that we are not requesting a post-GC allocation.
2328        VM_G1IncCollectionPause op(gc_count_before,
2329                                   0,     /* word_size */
2330                                   false, /* should_initiate_conc_mark */
2331                                   g1_policy()->max_pause_time_ms(),
2332                                   cause);
2333        VMThread::execute(&op);
2334      } else {
2335        // Schedule a Full GC.
2336        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2337        VMThread::execute(&op);
2338      }
2339    }
2340  } while (retry_gc);
2341}
2342
2343bool G1CollectedHeap::is_in(const void* p) const {
2344  if (_hrm.reserved().contains(p)) {
2345    // Given that we know that p is in the reserved space,
2346    // heap_region_containing() should successfully
2347    // return the containing region.
2348    HeapRegion* hr = heap_region_containing(p);
2349    return hr->is_in(p);
2350  } else {
2351    return false;
2352  }
2353}
2354
2355#ifdef ASSERT
2356bool G1CollectedHeap::is_in_exact(const void* p) const {
2357  bool contains = reserved_region().contains(p);
2358  bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2359  if (contains && available) {
2360    return true;
2361  } else {
2362    return false;
2363  }
2364}
2365#endif
2366
2367bool G1CollectedHeap::obj_in_cs(oop obj) {
2368  HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
2369  return r != NULL && r->in_collection_set();
2370}
2371
2372// Iteration functions.
2373
2374// Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2375
2376class IterateOopClosureRegionClosure: public HeapRegionClosure {
2377  ExtendedOopClosure* _cl;
2378public:
2379  IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2380  bool doHeapRegion(HeapRegion* r) {
2381    if (!r->is_continues_humongous()) {
2382      r->oop_iterate(_cl);
2383    }
2384    return false;
2385  }
2386};
2387
2388// Iterates an ObjectClosure over all objects within a HeapRegion.
2389
2390class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2391  ObjectClosure* _cl;
2392public:
2393  IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2394  bool doHeapRegion(HeapRegion* r) {
2395    if (!r->is_continues_humongous()) {
2396      r->object_iterate(_cl);
2397    }
2398    return false;
2399  }
2400};
2401
2402void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2403  IterateObjectClosureRegionClosure blk(cl);
2404  heap_region_iterate(&blk);
2405}
2406
2407void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2408  _hrm.iterate(cl);
2409}
2410
2411void
2412G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2413                                         uint worker_id,
2414                                         HeapRegionClaimer *hrclaimer,
2415                                         bool concurrent) const {
2416  _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2417}
2418
2419void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2420  _collection_set.iterate(cl);
2421}
2422
2423void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2424  _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
2425}
2426
2427HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2428  HeapRegion* result = _hrm.next_region_in_heap(from);
2429  while (result != NULL && result->is_pinned()) {
2430    result = _hrm.next_region_in_heap(result);
2431  }
2432  return result;
2433}
2434
2435HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2436  HeapRegion* hr = heap_region_containing(addr);
2437  return hr->block_start(addr);
2438}
2439
2440size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2441  HeapRegion* hr = heap_region_containing(addr);
2442  return hr->block_size(addr);
2443}
2444
2445bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2446  HeapRegion* hr = heap_region_containing(addr);
2447  return hr->block_is_obj(addr);
2448}
2449
2450bool G1CollectedHeap::supports_tlab_allocation() const {
2451  return true;
2452}
2453
2454size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2455  return (_g1_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2456}
2457
2458size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2459  return _eden.length() * HeapRegion::GrainBytes;
2460}
2461
2462// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2463// must be equal to the humongous object limit.
2464size_t G1CollectedHeap::max_tlab_size() const {
2465  return align_size_down(_humongous_object_threshold_in_words, MinObjAlignment);
2466}
2467
2468size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2469  AllocationContext_t context = AllocationContext::current();
2470  return _allocator->unsafe_max_tlab_alloc(context);
2471}
2472
2473size_t G1CollectedHeap::max_capacity() const {
2474  return _hrm.reserved().byte_size();
2475}
2476
2477jlong G1CollectedHeap::millis_since_last_gc() {
2478  // See the notes in GenCollectedHeap::millis_since_last_gc()
2479  // for more information about the implementation.
2480  jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
2481    _g1_policy->collection_pause_end_millis();
2482  if (ret_val < 0) {
2483    log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
2484      ". returning zero instead.", ret_val);
2485    return 0;
2486  }
2487  return ret_val;
2488}
2489
2490void G1CollectedHeap::prepare_for_verify() {
2491  _verifier->prepare_for_verify();
2492}
2493
2494void G1CollectedHeap::verify(VerifyOption vo) {
2495  _verifier->verify(vo);
2496}
2497
2498class PrintRegionClosure: public HeapRegionClosure {
2499  outputStream* _st;
2500public:
2501  PrintRegionClosure(outputStream* st) : _st(st) {}
2502  bool doHeapRegion(HeapRegion* r) {
2503    r->print_on(_st);
2504    return false;
2505  }
2506};
2507
2508bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2509                                       const HeapRegion* hr,
2510                                       const VerifyOption vo) const {
2511  switch (vo) {
2512  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2513  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2514  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
2515  default:                            ShouldNotReachHere();
2516  }
2517  return false; // keep some compilers happy
2518}
2519
2520bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2521                                       const VerifyOption vo) const {
2522  switch (vo) {
2523  case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
2524  case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
2525  case VerifyOption_G1UseMarkWord: {
2526    HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
2527    return !obj->is_gc_marked() && !hr->is_archive();
2528  }
2529  default:                            ShouldNotReachHere();
2530  }
2531  return false; // keep some compilers happy
2532}
2533
2534void G1CollectedHeap::print_heap_regions() const {
2535  Log(gc, heap, region) log;
2536  if (log.is_trace()) {
2537    ResourceMark rm;
2538    print_regions_on(log.trace_stream());
2539  }
2540}
2541
2542void G1CollectedHeap::print_on(outputStream* st) const {
2543  st->print(" %-20s", "garbage-first heap");
2544  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
2545            capacity()/K, used_unlocked()/K);
2546  st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
2547            p2i(_hrm.reserved().start()),
2548            p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
2549            p2i(_hrm.reserved().end()));
2550  st->cr();
2551  st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
2552  uint young_regions = young_regions_count();
2553  st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
2554            (size_t) young_regions * HeapRegion::GrainBytes / K);
2555  uint survivor_regions = survivor_regions_count();
2556  st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
2557            (size_t) survivor_regions * HeapRegion::GrainBytes / K);
2558  st->cr();
2559  MetaspaceAux::print_on(st);
2560}
2561
2562void G1CollectedHeap::print_regions_on(outputStream* st) const {
2563  st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2564               "HS=humongous(starts), HC=humongous(continues), "
2565               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
2566               "AC=allocation context, "
2567               "TAMS=top-at-mark-start (previous, next)");
2568  PrintRegionClosure blk(st);
2569  heap_region_iterate(&blk);
2570}
2571
2572void G1CollectedHeap::print_extended_on(outputStream* st) const {
2573  print_on(st);
2574
2575  // Print the per-region information.
2576  print_regions_on(st);
2577}
2578
2579void G1CollectedHeap::print_on_error(outputStream* st) const {
2580  this->CollectedHeap::print_on_error(st);
2581
2582  if (_cm != NULL) {
2583    st->cr();
2584    _cm->print_on_error(st);
2585  }
2586}
2587
2588void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
2589  workers()->print_worker_threads_on(st);
2590  _cmThread->print_on(st);
2591  st->cr();
2592  _cm->print_worker_threads_on(st);
2593  _cg1r->print_worker_threads_on(st); // also prints the sample thread
2594  if (G1StringDedup::is_enabled()) {
2595    G1StringDedup::print_worker_threads_on(st);
2596  }
2597}
2598
2599void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2600  workers()->threads_do(tc);
2601  tc->do_thread(_cmThread);
2602  _cm->threads_do(tc);
2603  _cg1r->threads_do(tc); // also iterates over the sample thread
2604  if (G1StringDedup::is_enabled()) {
2605    G1StringDedup::threads_do(tc);
2606  }
2607}
2608
2609void G1CollectedHeap::print_tracing_info() const {
2610  g1_rem_set()->print_summary_info();
2611  concurrent_mark()->print_summary_info();
2612}
2613
2614#ifndef PRODUCT
2615// Helpful for debugging RSet issues.
2616
2617class PrintRSetsClosure : public HeapRegionClosure {
2618private:
2619  const char* _msg;
2620  size_t _occupied_sum;
2621
2622public:
2623  bool doHeapRegion(HeapRegion* r) {
2624    HeapRegionRemSet* hrrs = r->rem_set();
2625    size_t occupied = hrrs->occupied();
2626    _occupied_sum += occupied;
2627
2628    tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2629    if (occupied == 0) {
2630      tty->print_cr("  RSet is empty");
2631    } else {
2632      hrrs->print();
2633    }
2634    tty->print_cr("----------");
2635    return false;
2636  }
2637
2638  PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2639    tty->cr();
2640    tty->print_cr("========================================");
2641    tty->print_cr("%s", msg);
2642    tty->cr();
2643  }
2644
2645  ~PrintRSetsClosure() {
2646    tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2647    tty->print_cr("========================================");
2648    tty->cr();
2649  }
2650};
2651
2652void G1CollectedHeap::print_cset_rsets() {
2653  PrintRSetsClosure cl("Printing CSet RSets");
2654  collection_set_iterate(&cl);
2655}
2656
2657void G1CollectedHeap::print_all_rsets() {
2658  PrintRSetsClosure cl("Printing All RSets");;
2659  heap_region_iterate(&cl);
2660}
2661#endif // PRODUCT
2662
2663G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2664
2665  size_t eden_used_bytes = heap()->eden_regions_count() * HeapRegion::GrainBytes;
2666  size_t survivor_used_bytes = heap()->survivor_regions_count() * HeapRegion::GrainBytes;
2667  size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2668
2669  size_t eden_capacity_bytes =
2670    (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2671
2672  VirtualSpaceSummary heap_summary = create_heap_space_summary();
2673  return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2674                       eden_capacity_bytes, survivor_used_bytes, num_regions());
2675}
2676
2677G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2678  return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2679                       stats->unused(), stats->used(), stats->region_end_waste(),
2680                       stats->regions_filled(), stats->direct_allocated(),
2681                       stats->failure_used(), stats->failure_waste());
2682}
2683
2684void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2685  const G1HeapSummary& heap_summary = create_g1_heap_summary();
2686  gc_tracer->report_gc_heap_summary(when, heap_summary);
2687
2688  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2689  gc_tracer->report_metaspace_summary(when, metaspace_summary);
2690}
2691
2692G1CollectedHeap* G1CollectedHeap::heap() {
2693  CollectedHeap* heap = Universe::heap();
2694  assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
2695  assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
2696  return (G1CollectedHeap*)heap;
2697}
2698
2699void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
2700  // always_do_update_barrier = false;
2701  assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
2702  // Fill TLAB's and such
2703  accumulate_statistics_all_tlabs();
2704  ensure_parsability(true);
2705
2706  g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2707}
2708
2709void G1CollectedHeap::gc_epilogue(bool full) {
2710  // we are at the end of the GC. Total collections has already been increased.
2711  g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2712
2713  // FIXME: what is this about?
2714  // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
2715  // is set.
2716#if defined(COMPILER2) || INCLUDE_JVMCI
2717  assert(DerivedPointerTable::is_empty(), "derived pointer present");
2718#endif
2719  // always_do_update_barrier = true;
2720
2721  resize_all_tlabs();
2722  allocation_context_stats().update(full);
2723
2724  // We have just completed a GC. Update the soft reference
2725  // policy with the new heap occupancy
2726  Universe::update_heap_info_at_gc();
2727}
2728
2729HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2730                                               uint gc_count_before,
2731                                               bool* succeeded,
2732                                               GCCause::Cause gc_cause) {
2733  assert_heap_not_locked_and_not_at_safepoint();
2734  VM_G1IncCollectionPause op(gc_count_before,
2735                             word_size,
2736                             false, /* should_initiate_conc_mark */
2737                             g1_policy()->max_pause_time_ms(),
2738                             gc_cause);
2739
2740  op.set_allocation_context(AllocationContext::current());
2741  VMThread::execute(&op);
2742
2743  HeapWord* result = op.result();
2744  bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2745  assert(result == NULL || ret_succeeded,
2746         "the result should be NULL if the VM did not succeed");
2747  *succeeded = ret_succeeded;
2748
2749  assert_heap_not_locked();
2750  return result;
2751}
2752
2753void
2754G1CollectedHeap::doConcurrentMark() {
2755  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2756  if (!_cmThread->in_progress()) {
2757    _cmThread->set_started();
2758    CGC_lock->notify();
2759  }
2760}
2761
2762size_t G1CollectedHeap::pending_card_num() {
2763  size_t extra_cards = 0;
2764  JavaThread *curr = Threads::first();
2765  while (curr != NULL) {
2766    DirtyCardQueue& dcq = curr->dirty_card_queue();
2767    extra_cards += dcq.size();
2768    curr = curr->next();
2769  }
2770  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2771  size_t buffer_size = dcqs.buffer_size();
2772  size_t buffer_num = dcqs.completed_buffers_num();
2773
2774  // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
2775  // in bytes - not the number of 'entries'. We need to convert
2776  // into a number of cards.
2777  return (buffer_size * buffer_num + extra_cards) / oopSize;
2778}
2779
2780class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2781 private:
2782  size_t _total_humongous;
2783  size_t _candidate_humongous;
2784
2785  DirtyCardQueue _dcq;
2786
2787  // We don't nominate objects with many remembered set entries, on
2788  // the assumption that such objects are likely still live.
2789  bool is_remset_small(HeapRegion* region) const {
2790    HeapRegionRemSet* const rset = region->rem_set();
2791    return G1EagerReclaimHumongousObjectsWithStaleRefs
2792      ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
2793      : rset->is_empty();
2794  }
2795
2796  bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
2797    assert(region->is_starts_humongous(), "Must start a humongous object");
2798
2799    oop obj = oop(region->bottom());
2800
2801    // Dead objects cannot be eager reclaim candidates. Due to class
2802    // unloading it is unsafe to query their classes so we return early.
2803    if (heap->is_obj_dead(obj, region)) {
2804      return false;
2805    }
2806
2807    // Candidate selection must satisfy the following constraints
2808    // while concurrent marking is in progress:
2809    //
2810    // * In order to maintain SATB invariants, an object must not be
2811    // reclaimed if it was allocated before the start of marking and
2812    // has not had its references scanned.  Such an object must have
2813    // its references (including type metadata) scanned to ensure no
2814    // live objects are missed by the marking process.  Objects
2815    // allocated after the start of concurrent marking don't need to
2816    // be scanned.
2817    //
2818    // * An object must not be reclaimed if it is on the concurrent
2819    // mark stack.  Objects allocated after the start of concurrent
2820    // marking are never pushed on the mark stack.
2821    //
2822    // Nominating only objects allocated after the start of concurrent
2823    // marking is sufficient to meet both constraints.  This may miss
2824    // some objects that satisfy the constraints, but the marking data
2825    // structures don't support efficiently performing the needed
2826    // additional tests or scrubbing of the mark stack.
2827    //
2828    // However, we presently only nominate is_typeArray() objects.
2829    // A humongous object containing references induces remembered
2830    // set entries on other regions.  In order to reclaim such an
2831    // object, those remembered sets would need to be cleaned up.
2832    //
2833    // We also treat is_typeArray() objects specially, allowing them
2834    // to be reclaimed even if allocated before the start of
2835    // concurrent mark.  For this we rely on mark stack insertion to
2836    // exclude is_typeArray() objects, preventing reclaiming an object
2837    // that is in the mark stack.  We also rely on the metadata for
2838    // such objects to be built-in and so ensured to be kept live.
2839    // Frequent allocation and drop of large binary blobs is an
2840    // important use case for eager reclaim, and this special handling
2841    // may reduce needed headroom.
2842
2843    return obj->is_typeArray() && is_remset_small(region);
2844  }
2845
2846 public:
2847  RegisterHumongousWithInCSetFastTestClosure()
2848  : _total_humongous(0),
2849    _candidate_humongous(0),
2850    _dcq(&JavaThread::dirty_card_queue_set()) {
2851  }
2852
2853  virtual bool doHeapRegion(HeapRegion* r) {
2854    if (!r->is_starts_humongous()) {
2855      return false;
2856    }
2857    G1CollectedHeap* g1h = G1CollectedHeap::heap();
2858
2859    bool is_candidate = humongous_region_is_candidate(g1h, r);
2860    uint rindex = r->hrm_index();
2861    g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2862    if (is_candidate) {
2863      _candidate_humongous++;
2864      g1h->register_humongous_region_with_cset(rindex);
2865      // Is_candidate already filters out humongous object with large remembered sets.
2866      // If we have a humongous object with a few remembered sets, we simply flush these
2867      // remembered set entries into the DCQS. That will result in automatic
2868      // re-evaluation of their remembered set entries during the following evacuation
2869      // phase.
2870      if (!r->rem_set()->is_empty()) {
2871        guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2872                  "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2873        G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2874        HeapRegionRemSetIterator hrrs(r->rem_set());
2875        size_t card_index;
2876        while (hrrs.has_next(card_index)) {
2877          jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
2878          // The remembered set might contain references to already freed
2879          // regions. Filter out such entries to avoid failing card table
2880          // verification.
2881          if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
2882            if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
2883              *card_ptr = CardTableModRefBS::dirty_card_val();
2884              _dcq.enqueue(card_ptr);
2885            }
2886          }
2887        }
2888        assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2889               "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2890               hrrs.n_yielded(), r->rem_set()->occupied());
2891        r->rem_set()->clear_locked();
2892      }
2893      assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
2894    }
2895    _total_humongous++;
2896
2897    return false;
2898  }
2899
2900  size_t total_humongous() const { return _total_humongous; }
2901  size_t candidate_humongous() const { return _candidate_humongous; }
2902
2903  void flush_rem_set_entries() { _dcq.flush(); }
2904};
2905
2906void G1CollectedHeap::register_humongous_regions_with_cset() {
2907  if (!G1EagerReclaimHumongousObjects) {
2908    g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
2909    return;
2910  }
2911  double time = os::elapsed_counter();
2912
2913  // Collect reclaim candidate information and register candidates with cset.
2914  RegisterHumongousWithInCSetFastTestClosure cl;
2915  heap_region_iterate(&cl);
2916
2917  time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2918  g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2919                                                                  cl.total_humongous(),
2920                                                                  cl.candidate_humongous());
2921  _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2922
2923  // Finally flush all remembered set entries to re-check into the global DCQS.
2924  cl.flush_rem_set_entries();
2925}
2926
2927class VerifyRegionRemSetClosure : public HeapRegionClosure {
2928  public:
2929    bool doHeapRegion(HeapRegion* hr) {
2930      if (!hr->is_archive() && !hr->is_continues_humongous()) {
2931        hr->verify_rem_set();
2932      }
2933      return false;
2934    }
2935};
2936
2937uint G1CollectedHeap::num_task_queues() const {
2938  return _task_queues->size();
2939}
2940
2941#if TASKQUEUE_STATS
2942void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2943  st->print_raw_cr("GC Task Stats");
2944  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2945  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2946}
2947
2948void G1CollectedHeap::print_taskqueue_stats() const {
2949  if (!log_is_enabled(Trace, gc, task, stats)) {
2950    return;
2951  }
2952  Log(gc, task, stats) log;
2953  ResourceMark rm;
2954  outputStream* st = log.trace_stream();
2955
2956  print_taskqueue_stats_hdr(st);
2957
2958  TaskQueueStats totals;
2959  const uint n = num_task_queues();
2960  for (uint i = 0; i < n; ++i) {
2961    st->print("%3u ", i); task_queue(i)->stats.print(st); st->cr();
2962    totals += task_queue(i)->stats;
2963  }
2964  st->print_raw("tot "); totals.print(st); st->cr();
2965
2966  DEBUG_ONLY(totals.verify());
2967}
2968
2969void G1CollectedHeap::reset_taskqueue_stats() {
2970  const uint n = num_task_queues();
2971  for (uint i = 0; i < n; ++i) {
2972    task_queue(i)->stats.reset();
2973  }
2974}
2975#endif // TASKQUEUE_STATS
2976
2977void G1CollectedHeap::wait_for_root_region_scanning() {
2978  double scan_wait_start = os::elapsedTime();
2979  // We have to wait until the CM threads finish scanning the
2980  // root regions as it's the only way to ensure that all the
2981  // objects on them have been correctly scanned before we start
2982  // moving them during the GC.
2983  bool waited = _cm->root_regions()->wait_until_scan_finished();
2984  double wait_time_ms = 0.0;
2985  if (waited) {
2986    double scan_wait_end = os::elapsedTime();
2987    wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2988  }
2989  g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2990}
2991
2992class G1PrintCollectionSetClosure : public HeapRegionClosure {
2993private:
2994  G1HRPrinter* _hr_printer;
2995public:
2996  G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2997
2998  virtual bool doHeapRegion(HeapRegion* r) {
2999    _hr_printer->cset(r);
3000    return false;
3001  }
3002};
3003
3004bool
3005G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3006  assert_at_safepoint(true /* should_be_vm_thread */);
3007  guarantee(!is_gc_active(), "collection is not reentrant");
3008
3009  if (GCLocker::check_active_before_gc()) {
3010    return false;
3011  }
3012
3013  _gc_timer_stw->register_gc_start();
3014
3015  GCIdMark gc_id_mark;
3016  _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3017
3018  SvcGCMarker sgcm(SvcGCMarker::MINOR);
3019  ResourceMark rm;
3020
3021  g1_policy()->note_gc_start();
3022
3023  wait_for_root_region_scanning();
3024
3025  print_heap_before_gc();
3026  print_heap_regions();
3027  trace_heap_before_gc(_gc_tracer_stw);
3028
3029  _verifier->verify_region_sets_optional();
3030  _verifier->verify_dirty_young_regions();
3031
3032  // We should not be doing initial mark unless the conc mark thread is running
3033  if (!_cmThread->should_terminate()) {
3034    // This call will decide whether this pause is an initial-mark
3035    // pause. If it is, during_initial_mark_pause() will return true
3036    // for the duration of this pause.
3037    g1_policy()->decide_on_conc_mark_initiation();
3038  }
3039
3040  // We do not allow initial-mark to be piggy-backed on a mixed GC.
3041  assert(!collector_state()->during_initial_mark_pause() ||
3042          collector_state()->gcs_are_young(), "sanity");
3043
3044  // We also do not allow mixed GCs during marking.
3045  assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3046
3047  // Record whether this pause is an initial mark. When the current
3048  // thread has completed its logging output and it's safe to signal
3049  // the CM thread, the flag's value in the policy has been reset.
3050  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3051
3052  // Inner scope for scope based logging, timers, and stats collection
3053  {
3054    EvacuationInfo evacuation_info;
3055
3056    if (collector_state()->during_initial_mark_pause()) {
3057      // We are about to start a marking cycle, so we increment the
3058      // full collection counter.
3059      increment_old_marking_cycles_started();
3060      _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
3061    }
3062
3063    _gc_tracer_stw->report_yc_type(collector_state()->yc_type());
3064
3065    GCTraceCPUTime tcpu;
3066
3067    FormatBuffer<> gc_string("Pause ");
3068    if (collector_state()->during_initial_mark_pause()) {
3069      gc_string.append("Initial Mark");
3070    } else if (collector_state()->gcs_are_young()) {
3071      gc_string.append("Young");
3072    } else {
3073      gc_string.append("Mixed");
3074    }
3075    GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
3076
3077    uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3078                                                                  workers()->active_workers(),
3079                                                                  Threads::number_of_non_daemon_threads());
3080    workers()->update_active_workers(active_workers);
3081    log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
3082
3083    TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3084    TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3085
3086    // If the secondary_free_list is not empty, append it to the
3087    // free_list. No need to wait for the cleanup operation to finish;
3088    // the region allocation code will check the secondary_free_list
3089    // and wait if necessary. If the G1StressConcRegionFreeing flag is
3090    // set, skip this step so that the region allocation code has to
3091    // get entries from the secondary_free_list.
3092    if (!G1StressConcRegionFreeing) {
3093      append_secondary_free_list_if_not_empty_with_lock();
3094    }
3095
3096    G1HeapTransition heap_transition(this);
3097    size_t heap_used_bytes_before_gc = used();
3098
3099    // Don't dynamically change the number of GC threads this early.  A value of
3100    // 0 is used to indicate serial work.  When parallel work is done,
3101    // it will be set.
3102
3103    { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3104      IsGCActiveMark x;
3105
3106      gc_prologue(false);
3107      increment_total_collections(false /* full gc */);
3108      increment_gc_time_stamp();
3109
3110      if (VerifyRememberedSets) {
3111        log_info(gc, verify)("[Verifying RemSets before GC]");
3112        VerifyRegionRemSetClosure v_cl;
3113        heap_region_iterate(&v_cl);
3114      }
3115
3116      _verifier->verify_before_gc();
3117
3118      _verifier->check_bitmaps("GC Start");
3119
3120#if defined(COMPILER2) || INCLUDE_JVMCI
3121      DerivedPointerTable::clear();
3122#endif
3123
3124      // Please see comment in g1CollectedHeap.hpp and
3125      // G1CollectedHeap::ref_processing_init() to see how
3126      // reference processing currently works in G1.
3127
3128      // Enable discovery in the STW reference processor
3129      if (g1_policy()->should_process_references()) {
3130        ref_processor_stw()->enable_discovery();
3131      } else {
3132        ref_processor_stw()->disable_discovery();
3133      }
3134
3135      {
3136        // We want to temporarily turn off discovery by the
3137        // CM ref processor, if necessary, and turn it back on
3138        // on again later if we do. Using a scoped
3139        // NoRefDiscovery object will do this.
3140        NoRefDiscovery no_cm_discovery(ref_processor_cm());
3141
3142        // Forget the current alloc region (we might even choose it to be part
3143        // of the collection set!).
3144        _allocator->release_mutator_alloc_region();
3145
3146        // This timing is only used by the ergonomics to handle our pause target.
3147        // It is unclear why this should not include the full pause. We will
3148        // investigate this in CR 7178365.
3149        //
3150        // Preserving the old comment here if that helps the investigation:
3151        //
3152        // The elapsed time induced by the start time below deliberately elides
3153        // the possible verification above.
3154        double sample_start_time_sec = os::elapsedTime();
3155
3156        g1_policy()->record_collection_pause_start(sample_start_time_sec);
3157
3158        if (collector_state()->during_initial_mark_pause()) {
3159          concurrent_mark()->checkpointRootsInitialPre();
3160        }
3161
3162        g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
3163
3164        evacuation_info.set_collectionset_regions(collection_set()->region_length());
3165
3166        // Make sure the remembered sets are up to date. This needs to be
3167        // done before register_humongous_regions_with_cset(), because the
3168        // remembered sets are used there to choose eager reclaim candidates.
3169        // If the remembered sets are not up to date we might miss some
3170        // entries that need to be handled.
3171        g1_rem_set()->cleanupHRRS();
3172
3173        register_humongous_regions_with_cset();
3174
3175        assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3176
3177        // We call this after finalize_cset() to
3178        // ensure that the CSet has been finalized.
3179        _cm->verify_no_cset_oops();
3180
3181        if (_hr_printer.is_active()) {
3182          G1PrintCollectionSetClosure cl(&_hr_printer);
3183          _collection_set.iterate(&cl);
3184        }
3185
3186        // Initialize the GC alloc regions.
3187        _allocator->init_gc_alloc_regions(evacuation_info);
3188
3189        G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3190        pre_evacuate_collection_set();
3191
3192        // Actually do the work...
3193        evacuate_collection_set(evacuation_info, &per_thread_states);
3194
3195        post_evacuate_collection_set(evacuation_info, &per_thread_states);
3196
3197        const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3198        free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3199
3200        eagerly_reclaim_humongous_regions();
3201
3202        record_obj_copy_mem_stats();
3203        _survivor_evac_stats.adjust_desired_plab_sz();
3204        _old_evac_stats.adjust_desired_plab_sz();
3205
3206        // Start a new incremental collection set for the next pause.
3207        collection_set()->start_incremental_building();
3208
3209        clear_cset_fast_test();
3210
3211        guarantee(_eden.length() == 0, "eden should have been cleared");
3212        g1_policy()->transfer_survivors_to_cset(survivor());
3213
3214        if (evacuation_failed()) {
3215          set_used(recalculate_used());
3216          if (_archive_allocator != NULL) {
3217            _archive_allocator->clear_used();
3218          }
3219          for (uint i = 0; i < ParallelGCThreads; i++) {
3220            if (_evacuation_failed_info_array[i].has_failed()) {
3221              _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3222            }
3223          }
3224        } else {
3225          // The "used" of the the collection set have already been subtracted
3226          // when they were freed.  Add in the bytes evacuated.
3227          increase_used(g1_policy()->bytes_copied_during_gc());
3228        }
3229
3230        if (collector_state()->during_initial_mark_pause()) {
3231          // We have to do this before we notify the CM threads that
3232          // they can start working to make sure that all the
3233          // appropriate initialization is done on the CM object.
3234          concurrent_mark()->checkpointRootsInitialPost();
3235          collector_state()->set_mark_in_progress(true);
3236          // Note that we don't actually trigger the CM thread at
3237          // this point. We do that later when we're sure that
3238          // the current thread has completed its logging output.
3239        }
3240
3241        allocate_dummy_regions();
3242
3243        _allocator->init_mutator_alloc_region();
3244
3245        {
3246          size_t expand_bytes = _heap_sizing_policy->expansion_amount();
3247          if (expand_bytes > 0) {
3248            size_t bytes_before = capacity();
3249            // No need for an ergo logging here,
3250            // expansion_amount() does this when it returns a value > 0.
3251            double expand_ms;
3252            if (!expand(expand_bytes, _workers, &expand_ms)) {
3253              // We failed to expand the heap. Cannot do anything about it.
3254            }
3255            g1_policy()->phase_times()->record_expand_heap_time(expand_ms);
3256          }
3257        }
3258
3259        // We redo the verification but now wrt to the new CSet which
3260        // has just got initialized after the previous CSet was freed.
3261        _cm->verify_no_cset_oops();
3262
3263        // This timing is only used by the ergonomics to handle our pause target.
3264        // It is unclear why this should not include the full pause. We will
3265        // investigate this in CR 7178365.
3266        double sample_end_time_sec = os::elapsedTime();
3267        double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3268        size_t total_cards_scanned = per_thread_states.total_cards_scanned();
3269        g1_policy()->record_collection_pause_end(pause_time_ms, total_cards_scanned, heap_used_bytes_before_gc);
3270
3271        evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3272        evacuation_info.set_bytes_copied(g1_policy()->bytes_copied_during_gc());
3273
3274        MemoryService::track_memory_usage();
3275
3276        // In prepare_for_verify() below we'll need to scan the deferred
3277        // update buffers to bring the RSets up-to-date if
3278        // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
3279        // the update buffers we'll probably need to scan cards on the
3280        // regions we just allocated to (i.e., the GC alloc
3281        // regions). However, during the last GC we called
3282        // set_saved_mark() on all the GC alloc regions, so card
3283        // scanning might skip the [saved_mark_word()...top()] area of
3284        // those regions (i.e., the area we allocated objects into
3285        // during the last GC). But it shouldn't. Given that
3286        // saved_mark_word() is conditional on whether the GC time stamp
3287        // on the region is current or not, by incrementing the GC time
3288        // stamp here we invalidate all the GC time stamps on all the
3289        // regions and saved_mark_word() will simply return top() for
3290        // all the regions. This is a nicer way of ensuring this rather
3291        // than iterating over the regions and fixing them. In fact, the
3292        // GC time stamp increment here also ensures that
3293        // saved_mark_word() will return top() between pauses, i.e.,
3294        // during concurrent refinement. So we don't need the
3295        // is_gc_active() check to decided which top to use when
3296        // scanning cards (see CR 7039627).
3297        increment_gc_time_stamp();
3298
3299        if (VerifyRememberedSets) {
3300          log_info(gc, verify)("[Verifying RemSets after GC]");
3301          VerifyRegionRemSetClosure v_cl;
3302          heap_region_iterate(&v_cl);
3303        }
3304
3305        _verifier->verify_after_gc();
3306        _verifier->check_bitmaps("GC End");
3307
3308        assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3309        ref_processor_stw()->verify_no_references_recorded();
3310
3311        // CM reference discovery will be re-enabled if necessary.
3312      }
3313
3314#ifdef TRACESPINNING
3315      ParallelTaskTerminator::print_termination_counts();
3316#endif
3317
3318      gc_epilogue(false);
3319    }
3320
3321    // Print the remainder of the GC log output.
3322    if (evacuation_failed()) {
3323      log_info(gc)("To-space exhausted");
3324    }
3325
3326    g1_policy()->print_phases();
3327    heap_transition.print();
3328
3329    // It is not yet to safe to tell the concurrent mark to
3330    // start as we have some optional output below. We don't want the
3331    // output from the concurrent mark thread interfering with this
3332    // logging output either.
3333
3334    _hrm.verify_optional();
3335    _verifier->verify_region_sets_optional();
3336
3337    TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
3338    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3339
3340    print_heap_after_gc();
3341    print_heap_regions();
3342    trace_heap_after_gc(_gc_tracer_stw);
3343
3344    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
3345    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
3346    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
3347    // before any GC notifications are raised.
3348    g1mm()->update_sizes();
3349
3350    _gc_tracer_stw->report_evacuation_info(&evacuation_info);
3351    _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
3352    _gc_timer_stw->register_gc_end();
3353    _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
3354  }
3355  // It should now be safe to tell the concurrent mark thread to start
3356  // without its logging output interfering with the logging output
3357  // that came from the pause.
3358
3359  if (should_start_conc_mark) {
3360    // CAUTION: after the doConcurrentMark() call below,
3361    // the concurrent marking thread(s) could be running
3362    // concurrently with us. Make sure that anything after
3363    // this point does not assume that we are the only GC thread
3364    // running. Note: of course, the actual marking work will
3365    // not start until the safepoint itself is released in
3366    // SuspendibleThreadSet::desynchronize().
3367    doConcurrentMark();
3368  }
3369
3370  return true;
3371}
3372
3373void G1CollectedHeap::remove_self_forwarding_pointers() {
3374  G1ParRemoveSelfForwardPtrsTask rsfp_task;
3375  workers()->run_task(&rsfp_task);
3376}
3377
3378void G1CollectedHeap::restore_after_evac_failure() {
3379  double remove_self_forwards_start = os::elapsedTime();
3380
3381  remove_self_forwarding_pointers();
3382  SharedRestorePreservedMarksTaskExecutor task_executor(workers());
3383  _preserved_marks_set.restore(&task_executor);
3384
3385  g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0);
3386}
3387
3388void G1CollectedHeap::preserve_mark_during_evac_failure(uint worker_id, oop obj, markOop m) {
3389  if (!_evacuation_failed) {
3390    _evacuation_failed = true;
3391  }
3392
3393  _evacuation_failed_info_array[worker_id].register_copy_failure(obj->size());
3394  _preserved_marks_set.get(worker_id)->push_if_necessary(obj, m);
3395}
3396
3397bool G1ParEvacuateFollowersClosure::offer_termination() {
3398  G1ParScanThreadState* const pss = par_scan_state();
3399  start_term_time();
3400  const bool res = terminator()->offer_termination();
3401  end_term_time();
3402  return res;
3403}
3404
3405void G1ParEvacuateFollowersClosure::do_void() {
3406  G1ParScanThreadState* const pss = par_scan_state();
3407  pss->trim_queue();
3408  do {
3409    pss->steal_and_trim_queue(queues());
3410  } while (!offer_termination());
3411}
3412
3413class G1ParTask : public AbstractGangTask {
3414protected:
3415  G1CollectedHeap*         _g1h;
3416  G1ParScanThreadStateSet* _pss;
3417  RefToScanQueueSet*       _queues;
3418  G1RootProcessor*         _root_processor;
3419  ParallelTaskTerminator   _terminator;
3420  uint                     _n_workers;
3421
3422public:
3423  G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor, uint n_workers)
3424    : AbstractGangTask("G1 collection"),
3425      _g1h(g1h),
3426      _pss(per_thread_states),
3427      _queues(task_queues),
3428      _root_processor(root_processor),
3429      _terminator(n_workers, _queues),
3430      _n_workers(n_workers)
3431  {}
3432
3433  void work(uint worker_id) {
3434    if (worker_id >= _n_workers) return;  // no work needed this round
3435
3436    double start_sec = os::elapsedTime();
3437    _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, start_sec);
3438
3439    {
3440      ResourceMark rm;
3441      HandleMark   hm;
3442
3443      ReferenceProcessor*             rp = _g1h->ref_processor_stw();
3444
3445      G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
3446      pss->set_ref_processor(rp);
3447
3448      double start_strong_roots_sec = os::elapsedTime();
3449
3450      _root_processor->evacuate_roots(pss->closures(), worker_id);
3451
3452      G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
3453
3454      // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
3455      // treating the nmethods visited to act as roots for concurrent marking.
3456      // We only want to make sure that the oops in the nmethods are adjusted with regard to the
3457      // objects copied by the current evacuation.
3458      size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
3459                                                                             pss->closures()->weak_codeblobs(),
3460                                                                             worker_id);
3461
3462      _pss->add_cards_scanned(worker_id, cards_scanned);
3463
3464      double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
3465
3466      double term_sec = 0.0;
3467      size_t evac_term_attempts = 0;
3468      {
3469        double start = os::elapsedTime();
3470        G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
3471        evac.do_void();
3472
3473        evac_term_attempts = evac.term_attempts();
3474        term_sec = evac.term_time();
3475        double elapsed_sec = os::elapsedTime() - start;
3476        _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
3477        _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
3478        _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
3479      }
3480
3481      assert(pss->queue_is_empty(), "should be empty");
3482
3483      if (log_is_enabled(Debug, gc, task, stats)) {
3484        MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
3485        size_t lab_waste;
3486        size_t lab_undo_waste;
3487        pss->waste(lab_waste, lab_undo_waste);
3488        _g1h->print_termination_stats(worker_id,
3489                                      (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
3490                                      strong_roots_sec * 1000.0,                  /* strong roots time */
3491                                      term_sec * 1000.0,                          /* evac term time */
3492                                      evac_term_attempts,                         /* evac term attempts */
3493                                      lab_waste,                                  /* alloc buffer waste */
3494                                      lab_undo_waste                              /* undo waste */
3495                                      );
3496      }
3497
3498      // Close the inner scope so that the ResourceMark and HandleMark
3499      // destructors are executed here and are included as part of the
3500      // "GC Worker Time".
3501    }
3502    _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
3503  }
3504};
3505
3506void G1CollectedHeap::print_termination_stats_hdr() {
3507  log_debug(gc, task, stats)("GC Termination Stats");
3508  log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
3509  log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
3510  log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
3511}
3512
3513void G1CollectedHeap::print_termination_stats(uint worker_id,
3514                                              double elapsed_ms,
3515                                              double strong_roots_ms,
3516                                              double term_ms,
3517                                              size_t term_attempts,
3518                                              size_t alloc_buffer_waste,
3519                                              size_t undo_waste) const {
3520  log_debug(gc, task, stats)
3521              ("%3d %9.2f %9.2f %6.2f "
3522               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
3523               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
3524               worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
3525               term_ms, term_ms * 100 / elapsed_ms, term_attempts,
3526               (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
3527               alloc_buffer_waste * HeapWordSize / K,
3528               undo_waste * HeapWordSize / K);
3529}
3530
3531class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
3532private:
3533  BoolObjectClosure* _is_alive;
3534  int _initial_string_table_size;
3535  int _initial_symbol_table_size;
3536
3537  bool  _process_strings;
3538  int _strings_processed;
3539  int _strings_removed;
3540
3541  bool  _process_symbols;
3542  int _symbols_processed;
3543  int _symbols_removed;
3544
3545public:
3546  G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
3547    AbstractGangTask("String/Symbol Unlinking"),
3548    _is_alive(is_alive),
3549    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
3550    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
3551
3552    _initial_string_table_size = StringTable::the_table()->table_size();
3553    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
3554    if (process_strings) {
3555      StringTable::clear_parallel_claimed_index();
3556    }
3557    if (process_symbols) {
3558      SymbolTable::clear_parallel_claimed_index();
3559    }
3560  }
3561
3562  ~G1StringSymbolTableUnlinkTask() {
3563    guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size,
3564              "claim value %d after unlink less than initial string table size %d",
3565              StringTable::parallel_claimed_index(), _initial_string_table_size);
3566    guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
3567              "claim value %d after unlink less than initial symbol table size %d",
3568              SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
3569
3570    log_info(gc, stringtable)(
3571        "Cleaned string and symbol table, "
3572        "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
3573        "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
3574        strings_processed(), strings_removed(),
3575        symbols_processed(), symbols_removed());
3576  }
3577
3578  void work(uint worker_id) {
3579    int strings_processed = 0;
3580    int strings_removed = 0;
3581    int symbols_processed = 0;
3582    int symbols_removed = 0;
3583    if (_process_strings) {
3584      StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
3585      Atomic::add(strings_processed, &_strings_processed);
3586      Atomic::add(strings_removed, &_strings_removed);
3587    }
3588    if (_process_symbols) {
3589      SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
3590      Atomic::add(symbols_processed, &_symbols_processed);
3591      Atomic::add(symbols_removed, &_symbols_removed);
3592    }
3593  }
3594
3595  size_t strings_processed() const { return (size_t)_strings_processed; }
3596  size_t strings_removed()   const { return (size_t)_strings_removed; }
3597
3598  size_t symbols_processed() const { return (size_t)_symbols_processed; }
3599  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
3600};
3601
3602class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
3603private:
3604  static Monitor* _lock;
3605
3606  BoolObjectClosure* const _is_alive;
3607  const bool               _unloading_occurred;
3608  const uint               _num_workers;
3609
3610  // Variables used to claim nmethods.
3611  CompiledMethod* _first_nmethod;
3612  volatile CompiledMethod* _claimed_nmethod;
3613
3614  // The list of nmethods that need to be processed by the second pass.
3615  volatile CompiledMethod* _postponed_list;
3616  volatile uint            _num_entered_barrier;
3617
3618 public:
3619  G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
3620      _is_alive(is_alive),
3621      _unloading_occurred(unloading_occurred),
3622      _num_workers(num_workers),
3623      _first_nmethod(NULL),
3624      _claimed_nmethod(NULL),
3625      _postponed_list(NULL),
3626      _num_entered_barrier(0)
3627  {
3628    CompiledMethod::increase_unloading_clock();
3629    // Get first alive nmethod
3630    CompiledMethodIterator iter = CompiledMethodIterator();
3631    if(iter.next_alive()) {
3632      _first_nmethod = iter.method();
3633    }
3634    _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
3635  }
3636
3637  ~G1CodeCacheUnloadingTask() {
3638    CodeCache::verify_clean_inline_caches();
3639
3640    CodeCache::set_needs_cache_clean(false);
3641    guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
3642
3643    CodeCache::verify_icholder_relocations();
3644  }
3645
3646 private:
3647  void add_to_postponed_list(CompiledMethod* nm) {
3648      CompiledMethod* old;
3649      do {
3650        old = (CompiledMethod*)_postponed_list;
3651        nm->set_unloading_next(old);
3652      } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
3653  }
3654
3655  void clean_nmethod(CompiledMethod* nm) {
3656    bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
3657
3658    if (postponed) {
3659      // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
3660      add_to_postponed_list(nm);
3661    }
3662
3663    // Mark that this thread has been cleaned/unloaded.
3664    // After this call, it will be safe to ask if this nmethod was unloaded or not.
3665    nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
3666  }
3667
3668  void clean_nmethod_postponed(CompiledMethod* nm) {
3669    nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
3670  }
3671
3672  static const int MaxClaimNmethods = 16;
3673
3674  void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
3675    CompiledMethod* first;
3676    CompiledMethodIterator last;
3677
3678    do {
3679      *num_claimed_nmethods = 0;
3680
3681      first = (CompiledMethod*)_claimed_nmethod;
3682      last = CompiledMethodIterator(first);
3683
3684      if (first != NULL) {
3685
3686        for (int i = 0; i < MaxClaimNmethods; i++) {
3687          if (!last.next_alive()) {
3688            break;
3689          }
3690          claimed_nmethods[i] = last.method();
3691          (*num_claimed_nmethods)++;
3692        }
3693      }
3694
3695    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
3696  }
3697
3698  CompiledMethod* claim_postponed_nmethod() {
3699    CompiledMethod* claim;
3700    CompiledMethod* next;
3701
3702    do {
3703      claim = (CompiledMethod*)_postponed_list;
3704      if (claim == NULL) {
3705        return NULL;
3706      }
3707
3708      next = claim->unloading_next();
3709
3710    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
3711
3712    return claim;
3713  }
3714
3715 public:
3716  // Mark that we're done with the first pass of nmethod cleaning.
3717  void barrier_mark(uint worker_id) {
3718    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3719    _num_entered_barrier++;
3720    if (_num_entered_barrier == _num_workers) {
3721      ml.notify_all();
3722    }
3723  }
3724
3725  // See if we have to wait for the other workers to
3726  // finish their first-pass nmethod cleaning work.
3727  void barrier_wait(uint worker_id) {
3728    if (_num_entered_barrier < _num_workers) {
3729      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
3730      while (_num_entered_barrier < _num_workers) {
3731          ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
3732      }
3733    }
3734  }
3735
3736  // Cleaning and unloading of nmethods. Some work has to be postponed
3737  // to the second pass, when we know which nmethods survive.
3738  void work_first_pass(uint worker_id) {
3739    // The first nmethods is claimed by the first worker.
3740    if (worker_id == 0 && _first_nmethod != NULL) {
3741      clean_nmethod(_first_nmethod);
3742      _first_nmethod = NULL;
3743    }
3744
3745    int num_claimed_nmethods;
3746    CompiledMethod* claimed_nmethods[MaxClaimNmethods];
3747
3748    while (true) {
3749      claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
3750
3751      if (num_claimed_nmethods == 0) {
3752        break;
3753      }
3754
3755      for (int i = 0; i < num_claimed_nmethods; i++) {
3756        clean_nmethod(claimed_nmethods[i]);
3757      }
3758    }
3759  }
3760
3761  void work_second_pass(uint worker_id) {
3762    CompiledMethod* nm;
3763    // Take care of postponed nmethods.
3764    while ((nm = claim_postponed_nmethod()) != NULL) {
3765      clean_nmethod_postponed(nm);
3766    }
3767  }
3768};
3769
3770Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
3771
3772class G1KlassCleaningTask : public StackObj {
3773  BoolObjectClosure*                      _is_alive;
3774  volatile jint                           _clean_klass_tree_claimed;
3775  ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
3776
3777 public:
3778  G1KlassCleaningTask(BoolObjectClosure* is_alive) :
3779      _is_alive(is_alive),
3780      _clean_klass_tree_claimed(0),
3781      _klass_iterator() {
3782  }
3783
3784 private:
3785  bool claim_clean_klass_tree_task() {
3786    if (_clean_klass_tree_claimed) {
3787      return false;
3788    }
3789
3790    return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0;
3791  }
3792
3793  InstanceKlass* claim_next_klass() {
3794    Klass* klass;
3795    do {
3796      klass =_klass_iterator.next_klass();
3797    } while (klass != NULL && !klass->is_instance_klass());
3798
3799    // this can be null so don't call InstanceKlass::cast
3800    return static_cast<InstanceKlass*>(klass);
3801  }
3802
3803public:
3804
3805  void clean_klass(InstanceKlass* ik) {
3806    ik->clean_weak_instanceklass_links(_is_alive);
3807  }
3808
3809  void work() {
3810    ResourceMark rm;
3811
3812    // One worker will clean the subklass/sibling klass tree.
3813    if (claim_clean_klass_tree_task()) {
3814      Klass::clean_subklass_tree(_is_alive);
3815    }
3816
3817    // All workers will help cleaning the classes,
3818    InstanceKlass* klass;
3819    while ((klass = claim_next_klass()) != NULL) {
3820      clean_klass(klass);
3821    }
3822  }
3823};
3824
3825// To minimize the remark pause times, the tasks below are done in parallel.
3826class G1ParallelCleaningTask : public AbstractGangTask {
3827private:
3828  G1StringSymbolTableUnlinkTask _string_symbol_task;
3829  G1CodeCacheUnloadingTask      _code_cache_task;
3830  G1KlassCleaningTask           _klass_cleaning_task;
3831
3832public:
3833  // The constructor is run in the VMThread.
3834  G1ParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) :
3835      AbstractGangTask("Parallel Cleaning"),
3836      _string_symbol_task(is_alive, process_strings, process_symbols),
3837      _code_cache_task(num_workers, is_alive, unloading_occurred),
3838      _klass_cleaning_task(is_alive) {
3839  }
3840
3841  // The parallel work done by all worker threads.
3842  void work(uint worker_id) {
3843    // Do first pass of code cache cleaning.
3844    _code_cache_task.work_first_pass(worker_id);
3845
3846    // Let the threads mark that the first pass is done.
3847    _code_cache_task.barrier_mark(worker_id);
3848
3849    // Clean the Strings and Symbols.
3850    _string_symbol_task.work(worker_id);
3851
3852    // Wait for all workers to finish the first code cache cleaning pass.
3853    _code_cache_task.barrier_wait(worker_id);
3854
3855    // Do the second code cache cleaning work, which realize on
3856    // the liveness information gathered during the first pass.
3857    _code_cache_task.work_second_pass(worker_id);
3858
3859    // Clean all klasses that were not unloaded.
3860    _klass_cleaning_task.work();
3861  }
3862};
3863
3864
3865void G1CollectedHeap::parallel_cleaning(BoolObjectClosure* is_alive,
3866                                        bool process_strings,
3867                                        bool process_symbols,
3868                                        bool class_unloading_occurred) {
3869  uint n_workers = workers()->active_workers();
3870
3871  G1ParallelCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols,
3872                                        n_workers, class_unloading_occurred);
3873  workers()->run_task(&g1_unlink_task);
3874}
3875
3876void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
3877                                                     bool process_strings, bool process_symbols) {
3878  { // Timing scope
3879    G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
3880    workers()->run_task(&g1_unlink_task);
3881  }
3882}
3883
3884class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3885 private:
3886  DirtyCardQueueSet* _queue;
3887  G1CollectedHeap* _g1h;
3888 public:
3889  G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3890    _queue(queue), _g1h(g1h) { }
3891
3892  virtual void work(uint worker_id) {
3893    G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3894    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3895
3896    RedirtyLoggedCardTableEntryClosure cl(_g1h);
3897    _queue->par_apply_closure_to_all_completed_buffers(&cl);
3898
3899    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3900  }
3901};
3902
3903void G1CollectedHeap::redirty_logged_cards() {
3904  double redirty_logged_cards_start = os::elapsedTime();
3905
3906  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3907  dirty_card_queue_set().reset_for_par_iteration();
3908  workers()->run_task(&redirty_task);
3909
3910  DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3911  dcq.merge_bufferlists(&dirty_card_queue_set());
3912  assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3913
3914  g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3915}
3916
3917// Weak Reference Processing support
3918
3919// An always "is_alive" closure that is used to preserve referents.
3920// If the object is non-null then it's alive.  Used in the preservation
3921// of referent objects that are pointed to by reference objects
3922// discovered by the CM ref processor.
3923class G1AlwaysAliveClosure: public BoolObjectClosure {
3924  G1CollectedHeap* _g1;
3925public:
3926  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3927  bool do_object_b(oop p) {
3928    if (p != NULL) {
3929      return true;
3930    }
3931    return false;
3932  }
3933};
3934
3935bool G1STWIsAliveClosure::do_object_b(oop p) {
3936  // An object is reachable if it is outside the collection set,
3937  // or is inside and copied.
3938  return !_g1->is_in_cset(p) || p->is_forwarded();
3939}
3940
3941// Non Copying Keep Alive closure
3942class G1KeepAliveClosure: public OopClosure {
3943  G1CollectedHeap* _g1;
3944public:
3945  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3946  void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3947  void do_oop(oop* p) {
3948    oop obj = *p;
3949    assert(obj != NULL, "the caller should have filtered out NULL values");
3950
3951    const InCSetState cset_state = _g1->in_cset_state(obj);
3952    if (!cset_state.is_in_cset_or_humongous()) {
3953      return;
3954    }
3955    if (cset_state.is_in_cset()) {
3956      assert( obj->is_forwarded(), "invariant" );
3957      *p = obj->forwardee();
3958    } else {
3959      assert(!obj->is_forwarded(), "invariant" );
3960      assert(cset_state.is_humongous(),
3961             "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
3962      _g1->set_humongous_is_live(obj);
3963    }
3964  }
3965};
3966
3967// Copying Keep Alive closure - can be called from both
3968// serial and parallel code as long as different worker
3969// threads utilize different G1ParScanThreadState instances
3970// and different queues.
3971
3972class G1CopyingKeepAliveClosure: public OopClosure {
3973  G1CollectedHeap*         _g1h;
3974  OopClosure*              _copy_non_heap_obj_cl;
3975  G1ParScanThreadState*    _par_scan_state;
3976
3977public:
3978  G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3979                            OopClosure* non_heap_obj_cl,
3980                            G1ParScanThreadState* pss):
3981    _g1h(g1h),
3982    _copy_non_heap_obj_cl(non_heap_obj_cl),
3983    _par_scan_state(pss)
3984  {}
3985
3986  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3987  virtual void do_oop(      oop* p) { do_oop_work(p); }
3988
3989  template <class T> void do_oop_work(T* p) {
3990    oop obj = oopDesc::load_decode_heap_oop(p);
3991
3992    if (_g1h->is_in_cset_or_humongous(obj)) {
3993      // If the referent object has been forwarded (either copied
3994      // to a new location or to itself in the event of an
3995      // evacuation failure) then we need to update the reference
3996      // field and, if both reference and referent are in the G1
3997      // heap, update the RSet for the referent.
3998      //
3999      // If the referent has not been forwarded then we have to keep
4000      // it alive by policy. Therefore we have copy the referent.
4001      //
4002      // If the reference field is in the G1 heap then we can push
4003      // on the PSS queue. When the queue is drained (after each
4004      // phase of reference processing) the object and it's followers
4005      // will be copied, the reference field set to point to the
4006      // new location, and the RSet updated. Otherwise we need to
4007      // use the the non-heap or metadata closures directly to copy
4008      // the referent object and update the pointer, while avoiding
4009      // updating the RSet.
4010
4011      if (_g1h->is_in_g1_reserved(p)) {
4012        _par_scan_state->push_on_queue(p);
4013      } else {
4014        assert(!Metaspace::contains((const void*)p),
4015               "Unexpectedly found a pointer from metadata: " PTR_FORMAT, p2i(p));
4016        _copy_non_heap_obj_cl->do_oop(p);
4017      }
4018    }
4019  }
4020};
4021
4022// Serial drain queue closure. Called as the 'complete_gc'
4023// closure for each discovered list in some of the
4024// reference processing phases.
4025
4026class G1STWDrainQueueClosure: public VoidClosure {
4027protected:
4028  G1CollectedHeap* _g1h;
4029  G1ParScanThreadState* _par_scan_state;
4030
4031  G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
4032
4033public:
4034  G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
4035    _g1h(g1h),
4036    _par_scan_state(pss)
4037  { }
4038
4039  void do_void() {
4040    G1ParScanThreadState* const pss = par_scan_state();
4041    pss->trim_queue();
4042  }
4043};
4044
4045// Parallel Reference Processing closures
4046
4047// Implementation of AbstractRefProcTaskExecutor for parallel reference
4048// processing during G1 evacuation pauses.
4049
4050class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
4051private:
4052  G1CollectedHeap*          _g1h;
4053  G1ParScanThreadStateSet*  _pss;
4054  RefToScanQueueSet*        _queues;
4055  WorkGang*                 _workers;
4056  uint                      _active_workers;
4057
4058public:
4059  G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
4060                           G1ParScanThreadStateSet* per_thread_states,
4061                           WorkGang* workers,
4062                           RefToScanQueueSet *task_queues,
4063                           uint n_workers) :
4064    _g1h(g1h),
4065    _pss(per_thread_states),
4066    _queues(task_queues),
4067    _workers(workers),
4068    _active_workers(n_workers)
4069  {
4070    g1h->ref_processor_stw()->set_active_mt_degree(n_workers);
4071  }
4072
4073  // Executes the given task using concurrent marking worker threads.
4074  virtual void execute(ProcessTask& task);
4075  virtual void execute(EnqueueTask& task);
4076};
4077
4078// Gang task for possibly parallel reference processing
4079
4080class G1STWRefProcTaskProxy: public AbstractGangTask {
4081  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
4082  ProcessTask&     _proc_task;
4083  G1CollectedHeap* _g1h;
4084  G1ParScanThreadStateSet* _pss;
4085  RefToScanQueueSet* _task_queues;
4086  ParallelTaskTerminator* _terminator;
4087
4088public:
4089  G1STWRefProcTaskProxy(ProcessTask& proc_task,
4090                        G1CollectedHeap* g1h,
4091                        G1ParScanThreadStateSet* per_thread_states,
4092                        RefToScanQueueSet *task_queues,
4093                        ParallelTaskTerminator* terminator) :
4094    AbstractGangTask("Process reference objects in parallel"),
4095    _proc_task(proc_task),
4096    _g1h(g1h),
4097    _pss(per_thread_states),
4098    _task_queues(task_queues),
4099    _terminator(terminator)
4100  {}
4101
4102  virtual void work(uint worker_id) {
4103    // The reference processing task executed by a single worker.
4104    ResourceMark rm;
4105    HandleMark   hm;
4106
4107    G1STWIsAliveClosure is_alive(_g1h);
4108
4109    G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4110    pss->set_ref_processor(NULL);
4111
4112    // Keep alive closure.
4113    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4114
4115    // Complete GC closure
4116    G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
4117
4118    // Call the reference processing task's work routine.
4119    _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
4120
4121    // Note we cannot assert that the refs array is empty here as not all
4122    // of the processing tasks (specifically phase2 - pp2_work) execute
4123    // the complete_gc closure (which ordinarily would drain the queue) so
4124    // the queue may not be empty.
4125  }
4126};
4127
4128// Driver routine for parallel reference processing.
4129// Creates an instance of the ref processing gang
4130// task and has the worker threads execute it.
4131void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
4132  assert(_workers != NULL, "Need parallel worker threads.");
4133
4134  ParallelTaskTerminator terminator(_active_workers, _queues);
4135  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
4136
4137  _workers->run_task(&proc_task_proxy);
4138}
4139
4140// Gang task for parallel reference enqueueing.
4141
4142class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
4143  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
4144  EnqueueTask& _enq_task;
4145
4146public:
4147  G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
4148    AbstractGangTask("Enqueue reference objects in parallel"),
4149    _enq_task(enq_task)
4150  { }
4151
4152  virtual void work(uint worker_id) {
4153    _enq_task.work(worker_id);
4154  }
4155};
4156
4157// Driver routine for parallel reference enqueueing.
4158// Creates an instance of the ref enqueueing gang
4159// task and has the worker threads execute it.
4160
4161void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
4162  assert(_workers != NULL, "Need parallel worker threads.");
4163
4164  G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
4165
4166  _workers->run_task(&enq_task_proxy);
4167}
4168
4169// End of weak reference support closures
4170
4171// Abstract task used to preserve (i.e. copy) any referent objects
4172// that are in the collection set and are pointed to by reference
4173// objects discovered by the CM ref processor.
4174
4175class G1ParPreserveCMReferentsTask: public AbstractGangTask {
4176protected:
4177  G1CollectedHeap*         _g1h;
4178  G1ParScanThreadStateSet* _pss;
4179  RefToScanQueueSet*       _queues;
4180  ParallelTaskTerminator   _terminator;
4181  uint                     _n_workers;
4182
4183public:
4184  G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
4185    AbstractGangTask("ParPreserveCMReferents"),
4186    _g1h(g1h),
4187    _pss(per_thread_states),
4188    _queues(task_queues),
4189    _terminator(workers, _queues),
4190    _n_workers(workers)
4191  {
4192    g1h->ref_processor_cm()->set_active_mt_degree(workers);
4193  }
4194
4195  void work(uint worker_id) {
4196    G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
4197
4198    ResourceMark rm;
4199    HandleMark   hm;
4200
4201    G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
4202    pss->set_ref_processor(NULL);
4203    assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4204
4205    // Is alive closure
4206    G1AlwaysAliveClosure always_alive(_g1h);
4207
4208    // Copying keep alive closure. Applied to referent objects that need
4209    // to be copied.
4210    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
4211
4212    ReferenceProcessor* rp = _g1h->ref_processor_cm();
4213
4214    uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
4215    uint stride = MIN2(MAX2(_n_workers, 1U), limit);
4216
4217    // limit is set using max_num_q() - which was set using ParallelGCThreads.
4218    // So this must be true - but assert just in case someone decides to
4219    // change the worker ids.
4220    assert(worker_id < limit, "sanity");
4221    assert(!rp->discovery_is_atomic(), "check this code");
4222
4223    // Select discovered lists [i, i+stride, i+2*stride,...,limit)
4224    for (uint idx = worker_id; idx < limit; idx += stride) {
4225      DiscoveredList& ref_list = rp->discovered_refs()[idx];
4226
4227      DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
4228      while (iter.has_next()) {
4229        // Since discovery is not atomic for the CM ref processor, we
4230        // can see some null referent objects.
4231        iter.load_ptrs(DEBUG_ONLY(true));
4232        oop ref = iter.obj();
4233
4234        // This will filter nulls.
4235        if (iter.is_referent_alive()) {
4236          iter.make_referent_alive();
4237        }
4238        iter.move_to_next();
4239      }
4240    }
4241
4242    // Drain the queue - which may cause stealing
4243    G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4244    drain_queue.do_void();
4245    // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4246    assert(pss->queue_is_empty(), "should be");
4247  }
4248};
4249
4250void G1CollectedHeap::process_weak_jni_handles() {
4251  double ref_proc_start = os::elapsedTime();
4252
4253  G1STWIsAliveClosure is_alive(this);
4254  G1KeepAliveClosure keep_alive(this);
4255  JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4256
4257  double ref_proc_time = os::elapsedTime() - ref_proc_start;
4258  g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4259}
4260
4261void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4262  // Any reference objects, in the collection set, that were 'discovered'
4263  // by the CM ref processor should have already been copied (either by
4264  // applying the external root copy closure to the discovered lists, or
4265  // by following an RSet entry).
4266  //
4267  // But some of the referents, that are in the collection set, that these
4268  // reference objects point to may not have been copied: the STW ref
4269  // processor would have seen that the reference object had already
4270  // been 'discovered' and would have skipped discovering the reference,
4271  // but would not have treated the reference object as a regular oop.
4272  // As a result the copy closure would not have been applied to the
4273  // referent object.
4274  //
4275  // We need to explicitly copy these referent objects - the references
4276  // will be processed at the end of remarking.
4277  //
4278  // We also need to do this copying before we process the reference
4279  // objects discovered by the STW ref processor in case one of these
4280  // referents points to another object which is also referenced by an
4281  // object discovered by the STW ref processor.
4282  double preserve_cm_referents_time = 0.0;
4283
4284  // To avoid spawning task when there is no work to do, check that
4285  // a concurrent cycle is active and that some references have been
4286  // discovered.
4287  if (concurrent_mark()->cmThread()->during_cycle() &&
4288      ref_processor_cm()->has_discovered_references()) {
4289    double preserve_cm_referents_start = os::elapsedTime();
4290    uint no_of_gc_workers = workers()->active_workers();
4291    G1ParPreserveCMReferentsTask keep_cm_referents(this,
4292                                                   per_thread_states,
4293                                                   no_of_gc_workers,
4294                                                   _task_queues);
4295    workers()->run_task(&keep_cm_referents);
4296    preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
4297  }
4298
4299  g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
4300}
4301
4302// Weak Reference processing during an evacuation pause (part 1).
4303void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4304  double ref_proc_start = os::elapsedTime();
4305
4306  ReferenceProcessor* rp = _ref_processor_stw;
4307  assert(rp->discovery_enabled(), "should have been enabled");
4308
4309  // Closure to test whether a referent is alive.
4310  G1STWIsAliveClosure is_alive(this);
4311
4312  // Even when parallel reference processing is enabled, the processing
4313  // of JNI refs is serial and performed serially by the current thread
4314  // rather than by a worker. The following PSS will be used for processing
4315  // JNI refs.
4316
4317  // Use only a single queue for this PSS.
4318  G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4319  pss->set_ref_processor(NULL);
4320  assert(pss->queue_is_empty(), "pre-condition");
4321
4322  // Keep alive closure.
4323  G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4324
4325  // Serial Complete GC closure
4326  G1STWDrainQueueClosure drain_queue(this, pss);
4327
4328  // Setup the soft refs policy...
4329  rp->setup_policy(false);
4330
4331  ReferenceProcessorStats stats;
4332  if (!rp->processing_is_mt()) {
4333    // Serial reference processing...
4334    stats = rp->process_discovered_references(&is_alive,
4335                                              &keep_alive,
4336                                              &drain_queue,
4337                                              NULL,
4338                                              _gc_timer_stw);
4339  } else {
4340    uint no_of_gc_workers = workers()->active_workers();
4341
4342    // Parallel reference processing
4343    assert(no_of_gc_workers <= rp->max_num_q(),
4344           "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4345           no_of_gc_workers,  rp->max_num_q());
4346
4347    G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4348    stats = rp->process_discovered_references(&is_alive,
4349                                              &keep_alive,
4350                                              &drain_queue,
4351                                              &par_task_executor,
4352                                              _gc_timer_stw);
4353  }
4354
4355  _gc_tracer_stw->report_gc_reference_stats(stats);
4356
4357  // We have completed copying any necessary live referent objects.
4358  assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4359
4360  double ref_proc_time = os::elapsedTime() - ref_proc_start;
4361  g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4362}
4363
4364// Weak Reference processing during an evacuation pause (part 2).
4365void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4366  double ref_enq_start = os::elapsedTime();
4367
4368  ReferenceProcessor* rp = _ref_processor_stw;
4369  assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4370
4371  // Now enqueue any remaining on the discovered lists on to
4372  // the pending list.
4373  if (!rp->processing_is_mt()) {
4374    // Serial reference processing...
4375    rp->enqueue_discovered_references();
4376  } else {
4377    // Parallel reference enqueueing
4378
4379    uint n_workers = workers()->active_workers();
4380
4381    assert(n_workers <= rp->max_num_q(),
4382           "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4383           n_workers,  rp->max_num_q());
4384
4385    G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4386    rp->enqueue_discovered_references(&par_task_executor);
4387  }
4388
4389  rp->verify_no_references_recorded();
4390  assert(!rp->discovery_enabled(), "should have been disabled");
4391
4392  // If during an initial mark pause we install a pending list head which is not otherwise reachable
4393  // ensure that it is marked in the bitmap for concurrent marking to discover.
4394  if (collector_state()->during_initial_mark_pause()) {
4395    oop pll_head = Universe::reference_pending_list();
4396    if (pll_head != NULL) {
4397      _cm->grayRoot(pll_head);
4398    }
4399  }
4400
4401  // FIXME
4402  // CM's reference processing also cleans up the string and symbol tables.
4403  // Should we do that here also? We could, but it is a serial operation
4404  // and could significantly increase the pause time.
4405
4406  double ref_enq_time = os::elapsedTime() - ref_enq_start;
4407  g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4408}
4409
4410void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4411  double merge_pss_time_start = os::elapsedTime();
4412  per_thread_states->flush();
4413  g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4414}
4415
4416void G1CollectedHeap::pre_evacuate_collection_set() {
4417  _expand_heap_after_alloc_failure = true;
4418  _evacuation_failed = false;
4419
4420  // Disable the hot card cache.
4421  _hot_card_cache->reset_hot_cache_claimed_index();
4422  _hot_card_cache->set_use_cache(false);
4423
4424  g1_rem_set()->prepare_for_oops_into_collection_set_do();
4425  _preserved_marks_set.assert_empty();
4426}
4427
4428void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4429  // Should G1EvacuationFailureALot be in effect for this GC?
4430  NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
4431
4432  assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4433
4434  G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
4435
4436  // InitialMark needs claim bits to keep track of the marked-through CLDs.
4437  if (collector_state()->during_initial_mark_pause()) {
4438    double start_clear_claimed_marks = os::elapsedTime();
4439
4440    ClassLoaderDataGraph::clear_claimed_marks();
4441
4442    double recorded_clear_claimed_marks_time_ms = (os::elapsedTime() - start_clear_claimed_marks) * 1000.0;
4443    phase_times->record_clear_claimed_marks_time_ms(recorded_clear_claimed_marks_time_ms);
4444  }
4445
4446  double start_par_time_sec = os::elapsedTime();
4447  double end_par_time_sec;
4448
4449  {
4450    const uint n_workers = workers()->active_workers();
4451    G1RootProcessor root_processor(this, n_workers);
4452    G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
4453
4454    print_termination_stats_hdr();
4455
4456    workers()->run_task(&g1_par_task);
4457    end_par_time_sec = os::elapsedTime();
4458
4459    // Closing the inner scope will execute the destructor
4460    // for the G1RootProcessor object. We record the current
4461    // elapsed time before closing the scope so that time
4462    // taken for the destructor is NOT included in the
4463    // reported parallel time.
4464  }
4465
4466  double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4467  phase_times->record_par_time(par_time_ms);
4468
4469  double code_root_fixup_time_ms =
4470        (os::elapsedTime() - end_par_time_sec) * 1000.0;
4471  phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4472}
4473
4474void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4475  // Process any discovered reference objects - we have
4476  // to do this _before_ we retire the GC alloc regions
4477  // as we may have to copy some 'reachable' referent
4478  // objects (and their reachable sub-graphs) that were
4479  // not copied during the pause.
4480  if (g1_policy()->should_process_references()) {
4481    preserve_cm_referents(per_thread_states);
4482    process_discovered_references(per_thread_states);
4483  } else {
4484    ref_processor_stw()->verify_no_references_recorded();
4485    process_weak_jni_handles();
4486  }
4487
4488  if (G1StringDedup::is_enabled()) {
4489    double fixup_start = os::elapsedTime();
4490
4491    G1STWIsAliveClosure is_alive(this);
4492    G1KeepAliveClosure keep_alive(this);
4493    G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4494
4495    double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4496    g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4497  }
4498
4499  g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4500
4501  if (evacuation_failed()) {
4502    restore_after_evac_failure();
4503
4504    // Reset the G1EvacuationFailureALot counters and flags
4505    // Note: the values are reset only when an actual
4506    // evacuation failure occurs.
4507    NOT_PRODUCT(reset_evacuation_should_fail();)
4508  }
4509
4510  _preserved_marks_set.assert_empty();
4511
4512  // Enqueue any remaining references remaining on the STW
4513  // reference processor's discovered lists. We need to do
4514  // this after the card table is cleaned (and verified) as
4515  // the act of enqueueing entries on to the pending list
4516  // will log these updates (and dirty their associated
4517  // cards). We need these updates logged to update any
4518  // RSets.
4519  if (g1_policy()->should_process_references()) {
4520    enqueue_discovered_references(per_thread_states);
4521  } else {
4522    g1_policy()->phase_times()->record_ref_enq_time(0);
4523  }
4524
4525  _allocator->release_gc_alloc_regions(evacuation_info);
4526
4527  merge_per_thread_state_info(per_thread_states);
4528
4529  // Reset and re-enable the hot card cache.
4530  // Note the counts for the cards in the regions in the
4531  // collection set are reset when the collection set is freed.
4532  _hot_card_cache->reset_hot_cache();
4533  _hot_card_cache->set_use_cache(true);
4534
4535  purge_code_root_memory();
4536
4537  redirty_logged_cards();
4538#if defined(COMPILER2) || INCLUDE_JVMCI
4539  DerivedPointerTable::update_pointers();
4540#endif
4541  g1_policy()->print_age_table();
4542}
4543
4544void G1CollectedHeap::record_obj_copy_mem_stats() {
4545  g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4546
4547  _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4548                                               create_g1_evac_summary(&_old_evac_stats));
4549}
4550
4551void G1CollectedHeap::free_region(HeapRegion* hr,
4552                                  FreeRegionList* free_list,
4553                                  bool skip_remset,
4554                                  bool skip_hot_card_cache,
4555                                  bool locked) {
4556  assert(!hr->is_free(), "the region should not be free");
4557  assert(!hr->is_empty(), "the region should not be empty");
4558  assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4559  assert(free_list != NULL, "pre-condition");
4560
4561  if (G1VerifyBitmaps) {
4562    MemRegion mr(hr->bottom(), hr->end());
4563    concurrent_mark()->clearRangePrevBitmap(mr);
4564  }
4565
4566  // Clear the card counts for this region.
4567  // Note: we only need to do this if the region is not young
4568  // (since we don't refine cards in young regions).
4569  if (!skip_hot_card_cache && !hr->is_young()) {
4570    _hot_card_cache->reset_card_counts(hr);
4571  }
4572  hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
4573  free_list->add_ordered(hr);
4574}
4575
4576void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4577                                            FreeRegionList* free_list,
4578                                            bool skip_remset) {
4579  assert(hr->is_humongous(), "this is only for humongous regions");
4580  assert(free_list != NULL, "pre-condition");
4581  hr->clear_humongous();
4582  free_region(hr, free_list, skip_remset);
4583}
4584
4585void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4586                                           const uint humongous_regions_removed) {
4587  if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4588    MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4589    _old_set.bulk_remove(old_regions_removed);
4590    _humongous_set.bulk_remove(humongous_regions_removed);
4591  }
4592
4593}
4594
4595void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4596  assert(list != NULL, "list can't be null");
4597  if (!list->is_empty()) {
4598    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4599    _hrm.insert_list_into_free_list(list);
4600  }
4601}
4602
4603void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
4604  decrease_used(bytes);
4605}
4606
4607class G1ParScrubRemSetTask: public AbstractGangTask {
4608protected:
4609  G1RemSet* _g1rs;
4610  HeapRegionClaimer _hrclaimer;
4611
4612public:
4613  G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4614    AbstractGangTask("G1 ScrubRS"),
4615    _g1rs(g1_rs),
4616    _hrclaimer(num_workers) {
4617  }
4618
4619  void work(uint worker_id) {
4620    _g1rs->scrub(worker_id, &_hrclaimer);
4621  }
4622};
4623
4624void G1CollectedHeap::scrub_rem_set() {
4625  uint num_workers = workers()->active_workers();
4626  G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4627  workers()->run_task(&g1_par_scrub_rs_task);
4628}
4629
4630class G1FreeCollectionSetTask : public AbstractGangTask {
4631private:
4632
4633  // Closure applied to all regions in the collection set to do work that needs to
4634  // be done serially in a single thread.
4635  class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4636  private:
4637    EvacuationInfo* _evacuation_info;
4638    const size_t* _surviving_young_words;
4639
4640    // Bytes used in successfully evacuated regions before the evacuation.
4641    size_t _before_used_bytes;
4642    // Bytes used in unsucessfully evacuated regions before the evacuation
4643    size_t _after_used_bytes;
4644
4645    size_t _bytes_allocated_in_old_since_last_gc;
4646
4647    size_t _failure_used_words;
4648    size_t _failure_waste_words;
4649
4650    FreeRegionList _local_free_list;
4651  public:
4652    G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4653      HeapRegionClosure(),
4654      _evacuation_info(evacuation_info),
4655      _surviving_young_words(surviving_young_words),
4656      _before_used_bytes(0),
4657      _after_used_bytes(0),
4658      _bytes_allocated_in_old_since_last_gc(0),
4659      _failure_used_words(0),
4660      _failure_waste_words(0),
4661      _local_free_list("Local Region List for CSet Freeing") {
4662    }
4663
4664    virtual bool doHeapRegion(HeapRegion* r) {
4665      G1CollectedHeap* g1h = G1CollectedHeap::heap();
4666
4667      assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4668      g1h->clear_in_cset(r);
4669
4670      if (r->is_young()) {
4671        assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4672               "Young index %d is wrong for region %u of type %s with %u young regions",
4673               r->young_index_in_cset(),
4674               r->hrm_index(),
4675               r->get_type_str(),
4676               g1h->collection_set()->young_region_length());
4677        size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4678        r->record_surv_words_in_group(words_survived);
4679      }
4680
4681      if (!r->evacuation_failed()) {
4682        assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4683        _before_used_bytes += r->used();
4684        g1h->free_region(r,
4685                         &_local_free_list,
4686                         true, /* skip_remset */
4687                         true, /* skip_hot_card_cache */
4688                         true  /* locked */);
4689      } else {
4690        r->uninstall_surv_rate_group();
4691        r->set_young_index_in_cset(-1);
4692        r->set_evacuation_failed(false);
4693        // When moving a young gen region to old gen, we "allocate" that whole region
4694        // there. This is in addition to any already evacuated objects. Notify the
4695        // policy about that.
4696        // Old gen regions do not cause an additional allocation: both the objects
4697        // still in the region and the ones already moved are accounted for elsewhere.
4698        if (r->is_young()) {
4699          _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4700        }
4701        // The region is now considered to be old.
4702        r->set_old();
4703        // Do some allocation statistics accounting. Regions that failed evacuation
4704        // are always made old, so there is no need to update anything in the young
4705        // gen statistics, but we need to update old gen statistics.
4706        size_t used_words = r->marked_bytes() / HeapWordSize;
4707
4708        _failure_used_words += used_words;
4709        _failure_waste_words += HeapRegion::GrainWords - used_words;
4710
4711        g1h->old_set_add(r);
4712        _after_used_bytes += r->used();
4713      }
4714      return false;
4715    }
4716
4717    void complete_work() {
4718      G1CollectedHeap* g1h = G1CollectedHeap::heap();
4719
4720      _evacuation_info->set_regions_freed(_local_free_list.length());
4721      _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4722
4723      g1h->prepend_to_freelist(&_local_free_list);
4724      g1h->decrement_summary_bytes(_before_used_bytes);
4725
4726      G1Policy* policy = g1h->g1_policy();
4727      policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4728
4729      g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4730    }
4731  };
4732
4733  G1CollectionSet* _collection_set;
4734  G1SerialFreeCollectionSetClosure _cl;
4735  const size_t* _surviving_young_words;
4736
4737  size_t _rs_lengths;
4738
4739  volatile jint _serial_work_claim;
4740
4741  struct WorkItem {
4742    uint region_idx;
4743    bool is_young;
4744    bool evacuation_failed;
4745
4746    WorkItem(HeapRegion* r) {
4747      region_idx = r->hrm_index();
4748      is_young = r->is_young();
4749      evacuation_failed = r->evacuation_failed();
4750    }
4751  };
4752
4753  volatile size_t _parallel_work_claim;
4754  size_t _num_work_items;
4755  WorkItem* _work_items;
4756
4757  void do_serial_work() {
4758    // Need to grab the lock to be allowed to modify the old region list.
4759    MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4760    _collection_set->iterate(&_cl);
4761  }
4762
4763  void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4764    G1CollectedHeap* g1h = G1CollectedHeap::heap();
4765
4766    HeapRegion* r = g1h->region_at(region_idx);
4767    assert(!g1h->is_on_master_free_list(r), "sanity");
4768
4769    Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4770
4771    if (!is_young) {
4772      g1h->_hot_card_cache->reset_card_counts(r);
4773    }
4774
4775    if (!evacuation_failed) {
4776      r->rem_set()->clear_locked();
4777    }
4778  }
4779
4780  class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4781  private:
4782    size_t _cur_idx;
4783    WorkItem* _work_items;
4784  public:
4785    G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4786
4787    virtual bool doHeapRegion(HeapRegion* r) {
4788      _work_items[_cur_idx++] = WorkItem(r);
4789      return false;
4790    }
4791  };
4792
4793  void prepare_work() {
4794    G1PrepareFreeCollectionSetClosure cl(_work_items);
4795    _collection_set->iterate(&cl);
4796  }
4797
4798  void complete_work() {
4799    _cl.complete_work();
4800
4801    G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4802    policy->record_max_rs_lengths(_rs_lengths);
4803    policy->cset_regions_freed();
4804  }
4805public:
4806  G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4807    AbstractGangTask("G1 Free Collection Set"),
4808    _cl(evacuation_info, surviving_young_words),
4809    _collection_set(collection_set),
4810    _surviving_young_words(surviving_young_words),
4811    _serial_work_claim(0),
4812    _rs_lengths(0),
4813    _parallel_work_claim(0),
4814    _num_work_items(collection_set->region_length()),
4815    _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4816    prepare_work();
4817  }
4818
4819  ~G1FreeCollectionSetTask() {
4820    complete_work();
4821    FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4822  }
4823
4824  // Chunk size for work distribution. The chosen value has been determined experimentally
4825  // to be a good tradeoff between overhead and achievable parallelism.
4826  static uint chunk_size() { return 32; }
4827
4828  virtual void work(uint worker_id) {
4829    G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4830
4831    // Claim serial work.
4832    if (_serial_work_claim == 0) {
4833      jint value = Atomic::add(1, &_serial_work_claim) - 1;
4834      if (value == 0) {
4835        double serial_time = os::elapsedTime();
4836        do_serial_work();
4837        timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4838      }
4839    }
4840
4841    // Start parallel work.
4842    double young_time = 0.0;
4843    bool has_young_time = false;
4844    double non_young_time = 0.0;
4845    bool has_non_young_time = false;
4846
4847    while (true) {
4848      size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4849      size_t cur = end - chunk_size();
4850
4851      if (cur >= _num_work_items) {
4852        break;
4853      }
4854
4855      double start_time = os::elapsedTime();
4856
4857      end = MIN2(end, _num_work_items);
4858
4859      for (; cur < end; cur++) {
4860        bool is_young = _work_items[cur].is_young;
4861
4862        do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4863
4864        double end_time = os::elapsedTime();
4865        double time_taken = end_time - start_time;
4866        if (is_young) {
4867          young_time += time_taken;
4868          has_young_time = true;
4869        } else {
4870          non_young_time += time_taken;
4871          has_non_young_time = true;
4872        }
4873        start_time = end_time;
4874      }
4875    }
4876
4877    if (has_young_time) {
4878      timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4879    }
4880    if (has_non_young_time) {
4881      timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
4882    }
4883  }
4884};
4885
4886void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4887  _eden.clear();
4888
4889  double free_cset_start_time = os::elapsedTime();
4890
4891  {
4892    uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4893    uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4894
4895    G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4896
4897    log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4898                        cl.name(),
4899                        num_workers,
4900                        _collection_set.region_length());
4901    workers()->run_task(&cl, num_workers);
4902  }
4903  g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4904
4905  collection_set->clear();
4906}
4907
4908class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4909 private:
4910  FreeRegionList* _free_region_list;
4911  HeapRegionSet* _proxy_set;
4912  uint _humongous_regions_removed;
4913  size_t _freed_bytes;
4914 public:
4915
4916  G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4917    _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4918  }
4919
4920  virtual bool doHeapRegion(HeapRegion* r) {
4921    if (!r->is_starts_humongous()) {
4922      return false;
4923    }
4924
4925    G1CollectedHeap* g1h = G1CollectedHeap::heap();
4926
4927    oop obj = (oop)r->bottom();
4928    G1CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
4929
4930    // The following checks whether the humongous object is live are sufficient.
4931    // The main additional check (in addition to having a reference from the roots
4932    // or the young gen) is whether the humongous object has a remembered set entry.
4933    //
4934    // A humongous object cannot be live if there is no remembered set for it
4935    // because:
4936    // - there can be no references from within humongous starts regions referencing
4937    // the object because we never allocate other objects into them.
4938    // (I.e. there are no intra-region references that may be missed by the
4939    // remembered set)
4940    // - as soon there is a remembered set entry to the humongous starts region
4941    // (i.e. it has "escaped" to an old object) this remembered set entry will stay
4942    // until the end of a concurrent mark.
4943    //
4944    // It is not required to check whether the object has been found dead by marking
4945    // or not, in fact it would prevent reclamation within a concurrent cycle, as
4946    // all objects allocated during that time are considered live.
4947    // SATB marking is even more conservative than the remembered set.
4948    // So if at this point in the collection there is no remembered set entry,
4949    // nobody has a reference to it.
4950    // At the start of collection we flush all refinement logs, and remembered sets
4951    // are completely up-to-date wrt to references to the humongous object.
4952    //
4953    // Other implementation considerations:
4954    // - never consider object arrays at this time because they would pose
4955    // considerable effort for cleaning up the the remembered sets. This is
4956    // required because stale remembered sets might reference locations that
4957    // are currently allocated into.
4958    uint region_idx = r->hrm_index();
4959    if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
4960        !r->rem_set()->is_empty()) {
4961      log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT "  with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4962                               region_idx,
4963                               (size_t)obj->size() * HeapWordSize,
4964                               p2i(r->bottom()),
4965                               r->rem_set()->occupied(),
4966                               r->rem_set()->strong_code_roots_list_length(),
4967                               next_bitmap->isMarked(r->bottom()),
4968                               g1h->is_humongous_reclaim_candidate(region_idx),
4969                               obj->is_typeArray()
4970                              );
4971      return false;
4972    }
4973
4974    guarantee(obj->is_typeArray(),
4975              "Only eagerly reclaiming type arrays is supported, but the object "
4976              PTR_FORMAT " is not.", p2i(r->bottom()));
4977
4978    log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4979                             region_idx,
4980                             (size_t)obj->size() * HeapWordSize,
4981                             p2i(r->bottom()),
4982                             r->rem_set()->occupied(),
4983                             r->rem_set()->strong_code_roots_list_length(),
4984                             next_bitmap->isMarked(r->bottom()),
4985                             g1h->is_humongous_reclaim_candidate(region_idx),
4986                             obj->is_typeArray()
4987                            );
4988
4989    // Need to clear mark bit of the humongous object if already set.
4990    if (next_bitmap->isMarked(r->bottom())) {
4991      next_bitmap->clear(r->bottom());
4992    }
4993    do {
4994      HeapRegion* next = g1h->next_region_in_humongous(r);
4995      _freed_bytes += r->used();
4996      r->set_containing_set(NULL);
4997      _humongous_regions_removed++;
4998      g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
4999      r = next;
5000    } while (r != NULL);
5001
5002    return false;
5003  }
5004
5005  uint humongous_free_count() {
5006    return _humongous_regions_removed;
5007  }
5008
5009  size_t bytes_freed() const {
5010    return _freed_bytes;
5011  }
5012};
5013
5014void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5015  assert_at_safepoint(true);
5016
5017  if (!G1EagerReclaimHumongousObjects ||
5018      (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5019    g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5020    return;
5021  }
5022
5023  double start_time = os::elapsedTime();
5024
5025  FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5026
5027  G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5028  heap_region_iterate(&cl);
5029
5030  remove_from_old_sets(0, cl.humongous_free_count());
5031
5032  G1HRPrinter* hrp = hr_printer();
5033  if (hrp->is_active()) {
5034    FreeRegionListIterator iter(&local_cleanup_list);
5035    while (iter.more_available()) {
5036      HeapRegion* hr = iter.get_next();
5037      hrp->cleanup(hr);
5038    }
5039  }
5040
5041  prepend_to_freelist(&local_cleanup_list);
5042  decrement_summary_bytes(cl.bytes_freed());
5043
5044  g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
5045                                                                    cl.humongous_free_count());
5046}
5047
5048class G1AbandonCollectionSetClosure : public HeapRegionClosure {
5049public:
5050  virtual bool doHeapRegion(HeapRegion* r) {
5051    assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
5052    G1CollectedHeap::heap()->clear_in_cset(r);
5053    r->set_young_index_in_cset(-1);
5054    return false;
5055  }
5056};
5057
5058void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
5059  G1AbandonCollectionSetClosure cl;
5060  collection_set->iterate(&cl);
5061
5062  collection_set->clear();
5063  collection_set->stop_incremental_building();
5064}
5065
5066void G1CollectedHeap::set_free_regions_coming() {
5067  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
5068
5069  assert(!free_regions_coming(), "pre-condition");
5070  _free_regions_coming = true;
5071}
5072
5073void G1CollectedHeap::reset_free_regions_coming() {
5074  assert(free_regions_coming(), "pre-condition");
5075
5076  {
5077    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5078    _free_regions_coming = false;
5079    SecondaryFreeList_lock->notify_all();
5080  }
5081
5082  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
5083}
5084
5085void G1CollectedHeap::wait_while_free_regions_coming() {
5086  // Most of the time we won't have to wait, so let's do a quick test
5087  // first before we take the lock.
5088  if (!free_regions_coming()) {
5089    return;
5090  }
5091
5092  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions");
5093
5094  {
5095    MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
5096    while (free_regions_coming()) {
5097      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
5098    }
5099  }
5100
5101  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
5102}
5103
5104bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5105  return _allocator->is_retained_old_region(hr);
5106}
5107
5108void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5109  _eden.add(hr);
5110  _g1_policy->set_region_eden(hr);
5111}
5112
5113#ifdef ASSERT
5114
5115class NoYoungRegionsClosure: public HeapRegionClosure {
5116private:
5117  bool _success;
5118public:
5119  NoYoungRegionsClosure() : _success(true) { }
5120  bool doHeapRegion(HeapRegion* r) {
5121    if (r->is_young()) {
5122      log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5123                            p2i(r->bottom()), p2i(r->end()));
5124      _success = false;
5125    }
5126    return false;
5127  }
5128  bool success() { return _success; }
5129};
5130
5131bool G1CollectedHeap::check_young_list_empty() {
5132  bool ret = (young_regions_count() == 0);
5133
5134  NoYoungRegionsClosure closure;
5135  heap_region_iterate(&closure);
5136  ret = ret && closure.success();
5137
5138  return ret;
5139}
5140
5141#endif // ASSERT
5142
5143class TearDownRegionSetsClosure : public HeapRegionClosure {
5144private:
5145  HeapRegionSet *_old_set;
5146
5147public:
5148  TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5149
5150  bool doHeapRegion(HeapRegion* r) {
5151    if (r->is_old()) {
5152      _old_set->remove(r);
5153    } else if(r->is_young()) {
5154      r->uninstall_surv_rate_group();
5155    } else {
5156      // We ignore free regions, we'll empty the free list afterwards.
5157      // We ignore humongous regions, we're not tearing down the
5158      // humongous regions set.
5159      assert(r->is_free() || r->is_humongous(),
5160             "it cannot be another type");
5161    }
5162    return false;
5163  }
5164
5165  ~TearDownRegionSetsClosure() {
5166    assert(_old_set->is_empty(), "post-condition");
5167  }
5168};
5169
5170void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5171  assert_at_safepoint(true /* should_be_vm_thread */);
5172
5173  if (!free_list_only) {
5174    TearDownRegionSetsClosure cl(&_old_set);
5175    heap_region_iterate(&cl);
5176
5177    // Note that emptying the _young_list is postponed and instead done as
5178    // the first step when rebuilding the regions sets again. The reason for
5179    // this is that during a full GC string deduplication needs to know if
5180    // a collected region was young or old when the full GC was initiated.
5181  }
5182  _hrm.remove_all_free_regions();
5183}
5184
5185void G1CollectedHeap::increase_used(size_t bytes) {
5186  _summary_bytes_used += bytes;
5187}
5188
5189void G1CollectedHeap::decrease_used(size_t bytes) {
5190  assert(_summary_bytes_used >= bytes,
5191         "invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
5192         _summary_bytes_used, bytes);
5193  _summary_bytes_used -= bytes;
5194}
5195
5196void G1CollectedHeap::set_used(size_t bytes) {
5197  _summary_bytes_used = bytes;
5198}
5199
5200class RebuildRegionSetsClosure : public HeapRegionClosure {
5201private:
5202  bool            _free_list_only;
5203  HeapRegionSet*   _old_set;
5204  HeapRegionManager*   _hrm;
5205  size_t          _total_used;
5206
5207public:
5208  RebuildRegionSetsClosure(bool free_list_only,
5209                           HeapRegionSet* old_set, HeapRegionManager* hrm) :
5210    _free_list_only(free_list_only),
5211    _old_set(old_set), _hrm(hrm), _total_used(0) {
5212    assert(_hrm->num_free_regions() == 0, "pre-condition");
5213    if (!free_list_only) {
5214      assert(_old_set->is_empty(), "pre-condition");
5215    }
5216  }
5217
5218  bool doHeapRegion(HeapRegion* r) {
5219    if (r->is_empty()) {
5220      // Add free regions to the free list
5221      r->set_free();
5222      r->set_allocation_context(AllocationContext::system());
5223      _hrm->insert_into_free_list(r);
5224    } else if (!_free_list_only) {
5225
5226      if (r->is_humongous()) {
5227        // We ignore humongous regions. We left the humongous set unchanged.
5228      } else {
5229        assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5230        // We now consider all regions old, so register as such. Leave
5231        // archive regions set that way, however, while still adding
5232        // them to the old set.
5233        if (!r->is_archive()) {
5234          r->set_old();
5235        }
5236        _old_set->add(r);
5237      }
5238      _total_used += r->used();
5239    }
5240
5241    return false;
5242  }
5243
5244  size_t total_used() {
5245    return _total_used;
5246  }
5247};
5248
5249void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5250  assert_at_safepoint(true /* should_be_vm_thread */);
5251
5252  if (!free_list_only) {
5253    _eden.clear();
5254    _survivor.clear();
5255  }
5256
5257  RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
5258  heap_region_iterate(&cl);
5259
5260  if (!free_list_only) {
5261    set_used(cl.total_used());
5262    if (_archive_allocator != NULL) {
5263      _archive_allocator->clear_used();
5264    }
5265  }
5266  assert(used_unlocked() == recalculate_used(),
5267         "inconsistent used_unlocked(), "
5268         "value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
5269         used_unlocked(), recalculate_used());
5270}
5271
5272void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
5273  _refine_cte_cl->set_concurrent(concurrent);
5274}
5275
5276bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
5277  HeapRegion* hr = heap_region_containing(p);
5278  return hr->is_in(p);
5279}
5280
5281// Methods for the mutator alloc region
5282
5283HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
5284                                                      bool force) {
5285  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5286  assert(!force || g1_policy()->can_expand_young_list(),
5287         "if force is true we should be able to expand the young list");
5288  bool should_allocate = g1_policy()->should_allocate_mutator_region();
5289  if (force || should_allocate) {
5290    HeapRegion* new_alloc_region = new_region(word_size,
5291                                              false /* is_old */,
5292                                              false /* do_expand */);
5293    if (new_alloc_region != NULL) {
5294      set_region_short_lived_locked(new_alloc_region);
5295      _hr_printer.alloc(new_alloc_region, !should_allocate);
5296      _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
5297      return new_alloc_region;
5298    }
5299  }
5300  return NULL;
5301}
5302
5303void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
5304                                                  size_t allocated_bytes) {
5305  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
5306  assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
5307
5308  collection_set()->add_eden_region(alloc_region);
5309  increase_used(allocated_bytes);
5310  _hr_printer.retire(alloc_region);
5311  // We update the eden sizes here, when the region is retired,
5312  // instead of when it's allocated, since this is the point that its
5313  // used space has been recored in _summary_bytes_used.
5314  g1mm()->update_eden_size();
5315}
5316
5317// Methods for the GC alloc regions
5318
5319bool G1CollectedHeap::has_more_regions(InCSetState dest) {
5320  if (dest.is_old()) {
5321    return true;
5322  } else {
5323    return survivor_regions_count() < g1_policy()->max_survivor_regions();
5324  }
5325}
5326
5327HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, InCSetState dest) {
5328  assert(FreeList_lock->owned_by_self(), "pre-condition");
5329
5330  if (!has_more_regions(dest)) {
5331    return NULL;
5332  }
5333
5334  const bool is_survivor = dest.is_young();
5335
5336  HeapRegion* new_alloc_region = new_region(word_size,
5337                                            !is_survivor,
5338                                            true /* do_expand */);
5339  if (new_alloc_region != NULL) {
5340    // We really only need to do this for old regions given that we
5341    // should never scan survivors. But it doesn't hurt to do it
5342    // for survivors too.
5343    new_alloc_region->record_timestamp();
5344    if (is_survivor) {
5345      new_alloc_region->set_survivor();
5346      _survivor.add(new_alloc_region);
5347      _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
5348    } else {
5349      new_alloc_region->set_old();
5350      _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
5351    }
5352    _hr_printer.alloc(new_alloc_region);
5353    bool during_im = collector_state()->during_initial_mark_pause();
5354    new_alloc_region->note_start_of_copying(during_im);
5355    return new_alloc_region;
5356  }
5357  return NULL;
5358}
5359
5360void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
5361                                             size_t allocated_bytes,
5362                                             InCSetState dest) {
5363  bool during_im = collector_state()->during_initial_mark_pause();
5364  alloc_region->note_end_of_copying(during_im);
5365  g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
5366  if (dest.is_old()) {
5367    _old_set.add(alloc_region);
5368  }
5369  _hr_printer.retire(alloc_region);
5370}
5371
5372HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
5373  bool expanded = false;
5374  uint index = _hrm.find_highest_free(&expanded);
5375
5376  if (index != G1_NO_HRM_INDEX) {
5377    if (expanded) {
5378      log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5379                                HeapRegion::GrainWords * HeapWordSize);
5380    }
5381    _hrm.allocate_free_regions_starting_at(index, 1);
5382    return region_at(index);
5383  }
5384  return NULL;
5385}
5386
5387// Optimized nmethod scanning
5388
5389class RegisterNMethodOopClosure: public OopClosure {
5390  G1CollectedHeap* _g1h;
5391  nmethod* _nm;
5392
5393  template <class T> void do_oop_work(T* p) {
5394    T heap_oop = oopDesc::load_heap_oop(p);
5395    if (!oopDesc::is_null(heap_oop)) {
5396      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5397      HeapRegion* hr = _g1h->heap_region_containing(obj);
5398      assert(!hr->is_continues_humongous(),
5399             "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5400             " starting at " HR_FORMAT,
5401             p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5402
5403      // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5404      hr->add_strong_code_root_locked(_nm);
5405    }
5406  }
5407
5408public:
5409  RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5410    _g1h(g1h), _nm(nm) {}
5411
5412  void do_oop(oop* p)       { do_oop_work(p); }
5413  void do_oop(narrowOop* p) { do_oop_work(p); }
5414};
5415
5416class UnregisterNMethodOopClosure: public OopClosure {
5417  G1CollectedHeap* _g1h;
5418  nmethod* _nm;
5419
5420  template <class T> void do_oop_work(T* p) {
5421    T heap_oop = oopDesc::load_heap_oop(p);
5422    if (!oopDesc::is_null(heap_oop)) {
5423      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5424      HeapRegion* hr = _g1h->heap_region_containing(obj);
5425      assert(!hr->is_continues_humongous(),
5426             "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5427             " starting at " HR_FORMAT,
5428             p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5429
5430      hr->remove_strong_code_root(_nm);
5431    }
5432  }
5433
5434public:
5435  UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5436    _g1h(g1h), _nm(nm) {}
5437
5438  void do_oop(oop* p)       { do_oop_work(p); }
5439  void do_oop(narrowOop* p) { do_oop_work(p); }
5440};
5441
5442void G1CollectedHeap::register_nmethod(nmethod* nm) {
5443  CollectedHeap::register_nmethod(nm);
5444
5445  guarantee(nm != NULL, "sanity");
5446  RegisterNMethodOopClosure reg_cl(this, nm);
5447  nm->oops_do(&reg_cl);
5448}
5449
5450void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
5451  CollectedHeap::unregister_nmethod(nm);
5452
5453  guarantee(nm != NULL, "sanity");
5454  UnregisterNMethodOopClosure reg_cl(this, nm);
5455  nm->oops_do(&reg_cl, true);
5456}
5457
5458void G1CollectedHeap::purge_code_root_memory() {
5459  double purge_start = os::elapsedTime();
5460  G1CodeRootSet::purge();
5461  double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
5462  g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
5463}
5464
5465class RebuildStrongCodeRootClosure: public CodeBlobClosure {
5466  G1CollectedHeap* _g1h;
5467
5468public:
5469  RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5470    _g1h(g1h) {}
5471
5472  void do_code_blob(CodeBlob* cb) {
5473    nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5474    if (nm == NULL) {
5475      return;
5476    }
5477
5478    if (ScavengeRootsInCode) {
5479      _g1h->register_nmethod(nm);
5480    }
5481  }
5482};
5483
5484void G1CollectedHeap::rebuild_strong_code_roots() {
5485  RebuildStrongCodeRootClosure blob_cl(this);
5486  CodeCache::blobs_do(&blob_cl);
5487}
5488