vmCMSOperations.cpp revision 10643:767bc8e5cb19
11573Srgrimes/*
21573Srgrimes * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
31573Srgrimes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
41573Srgrimes *
51573Srgrimes * This code is free software; you can redistribute it and/or modify it
61573Srgrimes * under the terms of the GNU General Public License version 2 only, as
71573Srgrimes * published by the Free Software Foundation.
81573Srgrimes *
91573Srgrimes * This code is distributed in the hope that it will be useful, but WITHOUT
101573Srgrimes * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
111573Srgrimes * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
121573Srgrimes * version 2 for more details (a copy is included in the LICENSE file that
131573Srgrimes * accompanied this code).
141573Srgrimes *
151573Srgrimes * You should have received a copy of the GNU General Public License version
16251069Semaste * 2 along with this work; if not, write to the Free Software Foundation,
171573Srgrimes * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
181573Srgrimes *
191573Srgrimes * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
201573Srgrimes * or visit www.oracle.com if you need additional information or have any
211573Srgrimes * questions.
221573Srgrimes *
231573Srgrimes */
241573Srgrimes
251573Srgrimes#include "precompiled.hpp"
261573Srgrimes#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
271573Srgrimes#include "gc/cms/concurrentMarkSweepThread.hpp"
281573Srgrimes#include "gc/cms/vmCMSOperations.hpp"
291573Srgrimes#include "gc/shared/gcLocker.inline.hpp"
301573Srgrimes#include "gc/shared/gcTimer.hpp"
311573Srgrimes#include "gc/shared/gcTraceTime.inline.hpp"
321573Srgrimes#include "gc/shared/isGCActiveMark.hpp"
331573Srgrimes#include "runtime/interfaceSupport.hpp"
341573Srgrimes#include "runtime/os.hpp"
351573Srgrimes#include "utilities/dtrace.hpp"
3686170Sobrien
3786170Sobrien//////////////////////////////////////////////////////////
381573Srgrimes// Methods in abstract class VM_CMS_Operation
39138659Strhodes//////////////////////////////////////////////////////////
401573Srgrimesvoid VM_CMS_Operation::acquire_pending_list_lock() {
411573Srgrimes  _pending_list_locker.lock();
42138659Strhodes}
431573Srgrimes
4492889Sobrienvoid VM_CMS_Operation::release_and_notify_pending_list_lock() {
4592889Sobrien  _pending_list_locker.unlock();
4692889Sobrien}
471573Srgrimes
48232503Seadlervoid VM_CMS_Operation::verify_before_gc() {
49232503Seadler  if (VerifyBeforeGC &&
50102639Srobert      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
511573Srgrimes    GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm);
521573Srgrimes    HandleMark hm;
531573Srgrimes    FreelistLocker x(_collector);
541573Srgrimes    MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
55102639Srobert    GenCollectedHeap::heap()->prepare_for_verify();
561573Srgrimes    Universe::verify();
57102639Srobert  }
581573Srgrimes}
591573Srgrimes
601573Srgrimesvoid VM_CMS_Operation::verify_after_gc() {
611573Srgrimes  if (VerifyAfterGC &&
62      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
63    GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm);
64    HandleMark hm;
65    FreelistLocker x(_collector);
66    MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
67    Universe::verify();
68  }
69}
70
71bool VM_CMS_Operation::lost_race() const {
72  if (CMSCollector::abstract_state() == CMSCollector::Idling) {
73    // We lost a race to a foreground collection
74    // -- there's nothing to do
75    return true;
76  }
77  assert(CMSCollector::abstract_state() == legal_state(),
78         "Inconsistent collector state?");
79  return false;
80}
81
82bool VM_CMS_Operation::doit_prologue() {
83  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
84  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
85  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
86         "Possible deadlock");
87
88  if (needs_pending_list_lock()) {
89    acquire_pending_list_lock();
90  }
91  // Get the Heap_lock after the pending_list_lock.
92  Heap_lock->lock();
93  if (lost_race()) {
94    assert(_prologue_succeeded == false, "Initialized in c'tor");
95    Heap_lock->unlock();
96    if (needs_pending_list_lock()) {
97      release_and_notify_pending_list_lock();
98    }
99  } else {
100    _prologue_succeeded = true;
101  }
102  return _prologue_succeeded;
103}
104
105void VM_CMS_Operation::doit_epilogue() {
106  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
107  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
108  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
109         "Possible deadlock");
110
111  // Release the Heap_lock first.
112  Heap_lock->unlock();
113  if (needs_pending_list_lock()) {
114    release_and_notify_pending_list_lock();
115  }
116}
117
118//////////////////////////////////////////////////////////
119// Methods in class VM_CMS_Initial_Mark
120//////////////////////////////////////////////////////////
121void VM_CMS_Initial_Mark::doit() {
122  if (lost_race()) {
123    // Nothing to do.
124    return;
125  }
126  HS_PRIVATE_CMS_INITMARK_BEGIN();
127  GCIdMark gc_id_mark(_gc_id);
128
129  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
130
131  GenCollectedHeap* gch = GenCollectedHeap::heap();
132  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
133
134  VM_CMS_Operation::verify_before_gc();
135
136  IsGCActiveMark x; // stop-world GC active
137  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
138
139  VM_CMS_Operation::verify_after_gc();
140
141  _collector->_gc_timer_cm->register_gc_pause_end();
142
143  HS_PRIVATE_CMS_INITMARK_END();
144}
145
146//////////////////////////////////////////////////////////
147// Methods in class VM_CMS_Final_Remark_Operation
148//////////////////////////////////////////////////////////
149void VM_CMS_Final_Remark::doit() {
150  if (lost_race()) {
151    // Nothing to do.
152    return;
153  }
154  HS_PRIVATE_CMS_REMARK_BEGIN();
155  GCIdMark gc_id_mark(_gc_id);
156
157  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
158
159  GenCollectedHeap* gch = GenCollectedHeap::heap();
160  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
161
162  VM_CMS_Operation::verify_before_gc();
163
164  IsGCActiveMark x; // stop-world GC active
165  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
166
167  VM_CMS_Operation::verify_after_gc();
168
169  _collector->save_heap_summary();
170  _collector->_gc_timer_cm->register_gc_pause_end();
171
172  HS_PRIVATE_CMS_REMARK_END();
173}
174
175// VM operation to invoke a concurrent collection of a
176// GenCollectedHeap heap.
177void VM_GenCollectFullConcurrent::doit() {
178  assert(Thread::current()->is_VM_thread(), "Should be VM thread");
179  assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
180
181  GenCollectedHeap* gch = GenCollectedHeap::heap();
182  if (_gc_count_before == gch->total_collections()) {
183    // The "full" of do_full_collection call below "forces"
184    // a collection; the second arg, 0, below ensures that
185    // only the young gen is collected. XXX In the future,
186    // we'll probably need to have something in this interface
187    // to say do this only if we are sure we will not bail
188    // out to a full collection in this attempt, but that's
189    // for the future.
190    assert(SafepointSynchronize::is_at_safepoint(),
191      "We can only be executing this arm of if at a safepoint");
192    GCCauseSetter gccs(gch, _gc_cause);
193    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
194  } // Else no need for a foreground young gc
195  assert((_gc_count_before < gch->total_collections()) ||
196         (GCLocker::is_active() /* gc may have been skipped */
197          && (_gc_count_before == gch->total_collections())),
198         "total_collections() should be monotonically increasing");
199
200  MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
201  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
202  if (gch->total_full_collections() == _full_gc_count_before) {
203    // Nudge the CMS thread to start a concurrent collection.
204    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
205  } else {
206    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
207    FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
208  }
209}
210
211bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
212  Thread* thr = Thread::current();
213  assert(thr != NULL, "Unexpected tid");
214  if (!thr->is_Java_thread()) {
215    assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
216    GenCollectedHeap* gch = GenCollectedHeap::heap();
217    if (_gc_count_before != gch->total_collections()) {
218      // No need to do a young gc, we'll just nudge the CMS thread
219      // in the doit() method above, to be executed soon.
220      assert(_gc_count_before < gch->total_collections(),
221             "total_collections() should be monotonically increasing");
222      return false;  // no need for foreground young gc
223    }
224  }
225  return true;       // may still need foreground young gc
226}
227
228
229void VM_GenCollectFullConcurrent::doit_epilogue() {
230  Thread* thr = Thread::current();
231  assert(thr->is_Java_thread(), "just checking");
232  JavaThread* jt = (JavaThread*)thr;
233  // Release the Heap_lock first.
234  Heap_lock->unlock();
235  release_and_notify_pending_list_lock();
236
237  // It is fine to test whether completed collections has
238  // exceeded our request count without locking because
239  // the completion count is monotonically increasing;
240  // this will break for very long-running apps when the
241  // count overflows and wraps around. XXX fix me !!!
242  // e.g. at the rate of 1 full gc per ms, this could
243  // overflow in about 1000 years.
244  GenCollectedHeap* gch = GenCollectedHeap::heap();
245  if (_gc_cause != GCCause::_gc_locker &&
246      gch->total_full_collections_completed() <= _full_gc_count_before) {
247    // maybe we should change the condition to test _gc_cause ==
248    // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
249    // instead of _gc_cause != GCCause::_gc_locker
250    assert(GCCause::is_user_requested_gc(_gc_cause),
251           "the only way to get here if this was a System.gc()-induced GC");
252    assert(ExplicitGCInvokesConcurrent, "Error");
253    // Now, wait for witnessing concurrent gc cycle to complete,
254    // but do so in native mode, because we want to lock the
255    // FullGCEvent_lock, which may be needed by the VM thread
256    // or by the CMS thread, so we do not want to be suspended
257    // while holding that lock.
258    ThreadToNativeFromVM native(jt);
259    MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
260    // Either a concurrent or a stop-world full gc is sufficient
261    // witness to our request.
262    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
263      FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
264    }
265  }
266}
267