vmCMSOperations.cpp revision 9727:f944761a3ce3
1/*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
27#include "gc/cms/concurrentMarkSweepThread.hpp"
28#include "gc/cms/vmCMSOperations.hpp"
29#include "gc/shared/gcLocker.inline.hpp"
30#include "gc/shared/gcTimer.hpp"
31#include "gc/shared/gcTraceTime.inline.hpp"
32#include "gc/shared/isGCActiveMark.hpp"
33#include "runtime/interfaceSupport.hpp"
34#include "runtime/os.hpp"
35#include "utilities/dtrace.hpp"
36
37//////////////////////////////////////////////////////////
38// Methods in abstract class VM_CMS_Operation
39//////////////////////////////////////////////////////////
40void VM_CMS_Operation::acquire_pending_list_lock() {
41  // The caller may block while communicating
42  // with the SLT thread in order to acquire/release the PLL.
43  SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
44  if (slt != NULL) {
45    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
46  } else {
47    SurrogateLockerThread::report_missing_slt();
48  }
49}
50
51void VM_CMS_Operation::release_and_notify_pending_list_lock() {
52  // The caller may block while communicating
53  // with the SLT thread in order to acquire/release the PLL.
54  ConcurrentMarkSweepThread::slt()->
55    manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
56}
57
58void VM_CMS_Operation::verify_before_gc() {
59  if (VerifyBeforeGC &&
60      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
61    GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
62    HandleMark hm;
63    FreelistLocker x(_collector);
64    MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
65    GenCollectedHeap::heap()->prepare_for_verify();
66    Universe::verify();
67  }
68}
69
70void VM_CMS_Operation::verify_after_gc() {
71  if (VerifyAfterGC &&
72      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
73    GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
74    HandleMark hm;
75    FreelistLocker x(_collector);
76    MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
77    Universe::verify();
78  }
79}
80
81bool VM_CMS_Operation::lost_race() const {
82  if (CMSCollector::abstract_state() == CMSCollector::Idling) {
83    // We lost a race to a foreground collection
84    // -- there's nothing to do
85    return true;
86  }
87  assert(CMSCollector::abstract_state() == legal_state(),
88         "Inconsistent collector state?");
89  return false;
90}
91
92bool VM_CMS_Operation::doit_prologue() {
93  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
94  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
95  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
96         "Possible deadlock");
97
98  if (needs_pll()) {
99    acquire_pending_list_lock();
100  }
101  // Get the Heap_lock after the pending_list_lock.
102  Heap_lock->lock();
103  if (lost_race()) {
104    assert(_prologue_succeeded == false, "Initialized in c'tor");
105    Heap_lock->unlock();
106    if (needs_pll()) {
107      release_and_notify_pending_list_lock();
108    }
109  } else {
110    _prologue_succeeded = true;
111  }
112  return _prologue_succeeded;
113}
114
115void VM_CMS_Operation::doit_epilogue() {
116  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
117  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
118  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
119         "Possible deadlock");
120
121  // Release the Heap_lock first.
122  Heap_lock->unlock();
123  if (needs_pll()) {
124    release_and_notify_pending_list_lock();
125  }
126}
127
128//////////////////////////////////////////////////////////
129// Methods in class VM_CMS_Initial_Mark
130//////////////////////////////////////////////////////////
131void VM_CMS_Initial_Mark::doit() {
132  if (lost_race()) {
133    // Nothing to do.
134    return;
135  }
136  HS_PRIVATE_CMS_INITMARK_BEGIN();
137  GCIdMark gc_id_mark(_gc_id);
138
139  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
140
141  GenCollectedHeap* gch = GenCollectedHeap::heap();
142  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
143
144  VM_CMS_Operation::verify_before_gc();
145
146  IsGCActiveMark x; // stop-world GC active
147  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
148
149  VM_CMS_Operation::verify_after_gc();
150
151  _collector->_gc_timer_cm->register_gc_pause_end();
152
153  HS_PRIVATE_CMS_INITMARK_END();
154}
155
156//////////////////////////////////////////////////////////
157// Methods in class VM_CMS_Final_Remark_Operation
158//////////////////////////////////////////////////////////
159void VM_CMS_Final_Remark::doit() {
160  if (lost_race()) {
161    // Nothing to do.
162    return;
163  }
164  HS_PRIVATE_CMS_REMARK_BEGIN();
165  GCIdMark gc_id_mark(_gc_id);
166
167  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
168
169  GenCollectedHeap* gch = GenCollectedHeap::heap();
170  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
171
172  VM_CMS_Operation::verify_before_gc();
173
174  IsGCActiveMark x; // stop-world GC active
175  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
176
177  VM_CMS_Operation::verify_after_gc();
178
179  _collector->save_heap_summary();
180  _collector->_gc_timer_cm->register_gc_pause_end();
181
182  HS_PRIVATE_CMS_REMARK_END();
183}
184
185// VM operation to invoke a concurrent collection of a
186// GenCollectedHeap heap.
187void VM_GenCollectFullConcurrent::doit() {
188  assert(Thread::current()->is_VM_thread(), "Should be VM thread");
189  assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
190
191  GenCollectedHeap* gch = GenCollectedHeap::heap();
192  if (_gc_count_before == gch->total_collections()) {
193    // The "full" of do_full_collection call below "forces"
194    // a collection; the second arg, 0, below ensures that
195    // only the young gen is collected. XXX In the future,
196    // we'll probably need to have something in this interface
197    // to say do this only if we are sure we will not bail
198    // out to a full collection in this attempt, but that's
199    // for the future.
200    assert(SafepointSynchronize::is_at_safepoint(),
201      "We can only be executing this arm of if at a safepoint");
202    GCCauseSetter gccs(gch, _gc_cause);
203    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
204  } // Else no need for a foreground young gc
205  assert((_gc_count_before < gch->total_collections()) ||
206         (GC_locker::is_active() /* gc may have been skipped */
207          && (_gc_count_before == gch->total_collections())),
208         "total_collections() should be monotonically increasing");
209
210  MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
211  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
212  if (gch->total_full_collections() == _full_gc_count_before) {
213    // Nudge the CMS thread to start a concurrent collection.
214    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
215  } else {
216    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
217    FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
218  }
219}
220
221bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
222  Thread* thr = Thread::current();
223  assert(thr != NULL, "Unexpected tid");
224  if (!thr->is_Java_thread()) {
225    assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
226    GenCollectedHeap* gch = GenCollectedHeap::heap();
227    if (_gc_count_before != gch->total_collections()) {
228      // No need to do a young gc, we'll just nudge the CMS thread
229      // in the doit() method above, to be executed soon.
230      assert(_gc_count_before < gch->total_collections(),
231             "total_collections() should be monotonically increasing");
232      return false;  // no need for foreground young gc
233    }
234  }
235  return true;       // may still need foreground young gc
236}
237
238
239void VM_GenCollectFullConcurrent::doit_epilogue() {
240  Thread* thr = Thread::current();
241  assert(thr->is_Java_thread(), "just checking");
242  JavaThread* jt = (JavaThread*)thr;
243  // Release the Heap_lock first.
244  Heap_lock->unlock();
245  release_and_notify_pending_list_lock();
246
247  // It is fine to test whether completed collections has
248  // exceeded our request count without locking because
249  // the completion count is monotonically increasing;
250  // this will break for very long-running apps when the
251  // count overflows and wraps around. XXX fix me !!!
252  // e.g. at the rate of 1 full gc per ms, this could
253  // overflow in about 1000 years.
254  GenCollectedHeap* gch = GenCollectedHeap::heap();
255  if (_gc_cause != GCCause::_gc_locker &&
256      gch->total_full_collections_completed() <= _full_gc_count_before) {
257    // maybe we should change the condition to test _gc_cause ==
258    // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
259    // instead of _gc_cause != GCCause::_gc_locker
260    assert(GCCause::is_user_requested_gc(_gc_cause),
261           "the only way to get here if this was a System.gc()-induced GC");
262    assert(ExplicitGCInvokesConcurrent, "Error");
263    // Now, wait for witnessing concurrent gc cycle to complete,
264    // but do so in native mode, because we want to lock the
265    // FullGCEvent_lock, which may be needed by the VM thread
266    // or by the CMS thread, so we do not want to be suspended
267    // while holding that lock.
268    ThreadToNativeFromVM native(jt);
269    MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
270    // Either a concurrent or a stop-world full gc is sufficient
271    // witness to our request.
272    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
273      FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
274    }
275  }
276}
277