g1RootProcessor.cpp revision 12408:777aaa19c4b1
1/*
2 * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26
27#include "aot/aotLoader.hpp"
28#include "classfile/stringTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/g1/bufferingOopClosure.hpp"
32#include "gc/g1/g1CodeBlobClosure.hpp"
33#include "gc/g1/g1CollectedHeap.inline.hpp"
34#include "gc/g1/g1CollectorState.hpp"
35#include "gc/g1/g1GCPhaseTimes.hpp"
36#include "gc/g1/g1Policy.hpp"
37#include "gc/g1/g1RootClosures.hpp"
38#include "gc/g1/g1RootProcessor.hpp"
39#include "gc/g1/heapRegion.inline.hpp"
40#include "memory/allocation.inline.hpp"
41#include "runtime/fprofiler.hpp"
42#include "runtime/mutex.hpp"
43#include "services/management.hpp"
44
45void G1RootProcessor::worker_has_discovered_all_strong_classes() {
46  assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
47
48  uint new_value = (uint)Atomic::add(1, &_n_workers_discovered_strong_classes);
49  if (new_value == n_workers()) {
50    // This thread is last. Notify the others.
51    MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
52    _lock.notify_all();
53  }
54}
55
56void G1RootProcessor::wait_until_all_strong_classes_discovered() {
57  assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
58
59  if ((uint)_n_workers_discovered_strong_classes != n_workers()) {
60    MonitorLockerEx ml(&_lock, Mutex::_no_safepoint_check_flag);
61    while ((uint)_n_workers_discovered_strong_classes != n_workers()) {
62      _lock.wait(Mutex::_no_safepoint_check_flag, 0, false);
63    }
64  }
65}
66
67G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h, uint n_workers) :
68    _g1h(g1h),
69    _process_strong_tasks(G1RP_PS_NumElements),
70    _srs(n_workers),
71    _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
72    _n_workers_discovered_strong_classes(0) {}
73
74void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
75  double ext_roots_start = os::elapsedTime();
76  G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
77
78  process_java_roots(closures, phase_times, worker_i);
79
80  // This is the point where this worker thread will not find more strong CLDs/nmethods.
81  // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
82  if (closures->trace_metadata()) {
83    worker_has_discovered_all_strong_classes();
84  }
85
86  process_vm_roots(closures, phase_times, worker_i);
87  process_string_table_roots(closures, phase_times, worker_i);
88
89  {
90    // Now the CM ref_processor roots.
91    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
92    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
93      // We need to treat the discovered reference lists of the
94      // concurrent mark ref processor as roots and keep entries
95      // (which are added by the marking threads) on them live
96      // until they can be processed at the end of marking.
97      _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
98    }
99  }
100
101  if (closures->trace_metadata()) {
102    {
103      G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
104      // Barrier to make sure all workers passed
105      // the strong CLD and strong nmethods phases.
106      wait_until_all_strong_classes_discovered();
107    }
108
109    // Now take the complement of the strong CLDs.
110    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
111    assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
112    ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
113  } else {
114    phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
115    phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
116    assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
117  }
118
119  // Finish up any enqueued closure apps (attributed as object copy time).
120  closures->flush();
121
122  double obj_copy_time_sec = closures->closure_app_seconds();
123
124  phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
125
126  double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
127
128  phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
129
130  // During conc marking we have to filter the per-thread SATB buffers
131  // to make sure we remove any oops into the CSet (which will show up
132  // as implicitly live).
133  {
134    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
135    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_in_progress()) {
136      JavaThread::satb_mark_queue_set().filter_thread_buffers();
137    }
138  }
139
140  _process_strong_tasks.all_tasks_completed(n_workers());
141}
142
143// Adaptor to pass the closures to the strong roots in the VM.
144class StrongRootsClosures : public G1RootClosures {
145  OopClosure* _roots;
146  CLDClosure* _clds;
147  CodeBlobClosure* _blobs;
148public:
149  StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
150      _roots(roots), _clds(clds), _blobs(blobs) {}
151
152  OopClosure* weak_oops()   { return NULL; }
153  OopClosure* strong_oops() { return _roots; }
154
155  CLDClosure* weak_clds()        { return NULL; }
156  CLDClosure* strong_clds()      { return _clds; }
157
158  CodeBlobClosure* strong_codeblobs() { return _blobs; }
159};
160
161void G1RootProcessor::process_strong_roots(OopClosure* oops,
162                                           CLDClosure* clds,
163                                           CodeBlobClosure* blobs) {
164  StrongRootsClosures closures(oops, clds, blobs);
165
166  process_java_roots(&closures, NULL, 0);
167  process_vm_roots(&closures, NULL, 0);
168
169  _process_strong_tasks.all_tasks_completed(n_workers());
170}
171
172// Adaptor to pass the closures to all the roots in the VM.
173class AllRootsClosures : public G1RootClosures {
174  OopClosure* _roots;
175  CLDClosure* _clds;
176public:
177  AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
178      _roots(roots), _clds(clds) {}
179
180  OopClosure* weak_oops() { return _roots; }
181  OopClosure* strong_oops() { return _roots; }
182
183  // By returning the same CLDClosure for both weak and strong CLDs we ensure
184  // that a single walk of the CLDG will invoke the closure on all CLDs i the
185  // system.
186  CLDClosure* weak_clds() { return _clds; }
187  CLDClosure* strong_clds() { return _clds; }
188
189  // We don't want to visit code blobs more than once, so we return NULL for the
190  // strong case and walk the entire code cache as a separate step.
191  CodeBlobClosure* strong_codeblobs() { return NULL; }
192};
193
194void G1RootProcessor::process_all_roots(OopClosure* oops,
195                                        CLDClosure* clds,
196                                        CodeBlobClosure* blobs,
197                                        bool process_string_table) {
198  AllRootsClosures closures(oops, clds);
199
200  process_java_roots(&closures, NULL, 0);
201  process_vm_roots(&closures, NULL, 0);
202
203  if (process_string_table) {
204    process_string_table_roots(&closures, NULL, 0);
205  }
206  process_code_cache_roots(blobs, NULL, 0);
207
208  _process_strong_tasks.all_tasks_completed(n_workers());
209}
210
211void G1RootProcessor::process_all_roots(OopClosure* oops,
212                                        CLDClosure* clds,
213                                        CodeBlobClosure* blobs) {
214  process_all_roots(oops, clds, blobs, true);
215}
216
217void G1RootProcessor::process_all_roots_no_string_table(OopClosure* oops,
218                                                        CLDClosure* clds,
219                                                        CodeBlobClosure* blobs) {
220  assert(!ClassUnloading, "Should only be used when class unloading is disabled");
221  process_all_roots(oops, clds, blobs, false);
222}
223
224void G1RootProcessor::process_java_roots(G1RootClosures* closures,
225                                         G1GCPhaseTimes* phase_times,
226                                         uint worker_i) {
227  // Iterating over the CLDG and the Threads are done early to allow us to
228  // first process the strong CLDs and nmethods and then, after a barrier,
229  // let the thread process the weak CLDs and nmethods.
230  {
231    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
232    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
233      ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
234    }
235  }
236
237  {
238    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
239    bool is_par = n_workers() > 1;
240    Threads::possibly_parallel_oops_do(is_par,
241                                       closures->strong_oops(),
242                                       closures->strong_codeblobs());
243  }
244}
245
246void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
247                                       G1GCPhaseTimes* phase_times,
248                                       uint worker_i) {
249  OopClosure* strong_roots = closures->strong_oops();
250  OopClosure* weak_roots = closures->weak_oops();
251
252  {
253    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
254    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
255      Universe::oops_do(strong_roots);
256    }
257  }
258
259  {
260    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
261    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
262      JNIHandles::oops_do(strong_roots);
263    }
264  }
265
266  {
267    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
268    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
269      ObjectSynchronizer::oops_do(strong_roots);
270    }
271  }
272
273  {
274    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i);
275    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) {
276      FlatProfiler::oops_do(strong_roots);
277    }
278  }
279
280  {
281    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
282    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
283      Management::oops_do(strong_roots);
284    }
285  }
286
287  {
288    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
289    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
290      JvmtiExport::oops_do(strong_roots);
291    }
292  }
293
294#if INCLUDE_AOT
295  if (UseAOT) {
296    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
297    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_aot_oops_do)) {
298        AOTLoader::oops_do(strong_roots);
299    }
300  }
301#endif
302
303  {
304    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
305    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
306      SystemDictionary::roots_oops_do(strong_roots, weak_roots);
307    }
308  }
309}
310
311void G1RootProcessor::process_string_table_roots(G1RootClosures* closures,
312                                                 G1GCPhaseTimes* phase_times,
313                                                 uint worker_i) {
314  assert(closures->weak_oops() != NULL, "Should only be called when all roots are processed");
315  G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i);
316  // All threads execute the following. A specific chunk of buckets
317  // from the StringTable are the individual tasks.
318  StringTable::possibly_parallel_oops_do(closures->weak_oops());
319}
320
321void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
322                                               G1GCPhaseTimes* phase_times,
323                                               uint worker_i) {
324  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
325    CodeCache::blobs_do(code_closure);
326  }
327}
328
329uint G1RootProcessor::n_workers() const {
330  return _srs.n_threads();
331}
332