1/*
2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/g1CollectedHeap.inline.hpp"
27#include "gc/g1/g1FullGCScope.hpp"
28#include "gc/g1/g1MarkSweep.hpp"
29#include "gc/g1/g1RemSet.inline.hpp"
30#include "gc/g1/g1SerialFullCollector.hpp"
31#include "gc/g1/heapRegionRemSet.hpp"
32#include "gc/shared/referenceProcessor.hpp"
33
34G1SerialFullCollector::G1SerialFullCollector(G1FullGCScope* scope,
35                                             ReferenceProcessor* reference_processor) :
36    _scope(scope),
37    _reference_processor(reference_processor),
38    _is_alive_mutator(_reference_processor, NULL),
39    _mt_discovery_mutator(_reference_processor, false) {
40  // Temporarily make discovery by the STW ref processor single threaded (non-MT)
41  // and clear the STW ref processor's _is_alive_non_header field.
42}
43
44void G1SerialFullCollector::prepare_collection() {
45  _reference_processor->enable_discovery();
46  _reference_processor->setup_policy(_scope->should_clear_soft_refs());
47}
48
49void G1SerialFullCollector::complete_collection() {
50  // Enqueue any discovered reference objects that have
51  // not been removed from the discovered lists.
52  ReferenceProcessorPhaseTimes pt(NULL, _reference_processor->num_q());
53  _reference_processor->enqueue_discovered_references(NULL, &pt);
54  pt.print_enqueue_phase();
55
56  // Iterate the heap and rebuild the remembered sets.
57  rebuild_remembered_sets();
58}
59
60void G1SerialFullCollector::collect() {
61  // Do the actual collection work.
62  G1MarkSweep::invoke_at_safepoint(_reference_processor, _scope->should_clear_soft_refs());
63}
64
65class PostMCRemSetClearClosure: public HeapRegionClosure {
66  G1CollectedHeap* _g1h;
67  ModRefBarrierSet* _mr_bs;
68public:
69  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
70    _g1h(g1h), _mr_bs(mr_bs) {}
71
72  bool doHeapRegion(HeapRegion* r) {
73    HeapRegionRemSet* hrrs = r->rem_set();
74
75    _g1h->reset_gc_time_stamps(r);
76
77    if (r->is_continues_humongous()) {
78      // We'll assert that the strong code root list and RSet is empty
79      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
80      assert(hrrs->occupied() == 0, "RSet should be empty");
81    } else {
82      hrrs->clear();
83    }
84    // You might think here that we could clear just the cards
85    // corresponding to the used region.  But no: if we leave a dirty card
86    // in a region we might allocate into, then it would prevent that card
87    // from being enqueued, and cause it to be missed.
88    // Re: the performance cost: we shouldn't be doing full GC anyway!
89    _mr_bs->clear(MemRegion(r->bottom(), r->end()));
90
91    return false;
92  }
93};
94
95
96class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
97  G1CollectedHeap*   _g1h;
98  RebuildRSOopClosure _cl;
99public:
100  RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
101    _cl(g1->g1_rem_set(), worker_i),
102    _g1h(g1)
103  { }
104
105  bool doHeapRegion(HeapRegion* r) {
106    if (!r->is_continues_humongous()) {
107      _cl.set_from(r);
108      r->oop_iterate(&_cl);
109    }
110    return false;
111  }
112};
113
114class ParRebuildRSTask: public AbstractGangTask {
115  G1CollectedHeap* _g1;
116  HeapRegionClaimer _hrclaimer;
117
118public:
119  ParRebuildRSTask(G1CollectedHeap* g1) :
120      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
121
122  void work(uint worker_id) {
123    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
124    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
125  }
126};
127
128void G1SerialFullCollector::rebuild_remembered_sets() {
129  G1CollectedHeap* g1h = G1CollectedHeap::heap();
130  // First clear the stale remembered sets.
131  PostMCRemSetClearClosure rs_clear(g1h, g1h->g1_barrier_set());
132  g1h->heap_region_iterate(&rs_clear);
133
134  // Rebuild remembered sets of all regions.
135  uint n_workers = AdaptiveSizePolicy::calc_active_workers(g1h->workers()->total_workers(),
136                                                           g1h->workers()->active_workers(),
137                                                           Threads::number_of_non_daemon_threads());
138  g1h->workers()->update_active_workers(n_workers);
139  log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, g1h->workers()->total_workers());
140
141  ParRebuildRSTask rebuild_rs_task(g1h);
142  g1h->workers()->run_task(&rebuild_rs_task);
143}
144