concurrentMarkSweepGeneration.cpp revision 13242:fcb4803050e8
1251607Sdim/*
2251607Sdim * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3251607Sdim * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4251607Sdim *
5251607Sdim * This code is free software; you can redistribute it and/or modify it
6251607Sdim * under the terms of the GNU General Public License version 2 only, as
7251607Sdim * published by the Free Software Foundation.
8251607Sdim *
9251607Sdim * This code is distributed in the hope that it will be useful, but WITHOUT
10251607Sdim * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11251607Sdim * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12251607Sdim * version 2 for more details (a copy is included in the LICENSE file that
13251607Sdim * accompanied this code).
14251607Sdim *
15251607Sdim * You should have received a copy of the GNU General Public License version
16251607Sdim * 2 along with this work; if not, write to the Free Software Foundation,
17251607Sdim * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18251607Sdim *
19251607Sdim * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20251607Sdim * or visit www.oracle.com if you need additional information or have any
21251607Sdim * questions.
22251607Sdim *
23251607Sdim */
24251607Sdim
25251607Sdim#include "precompiled.hpp"
26251607Sdim#include "classfile/classLoaderData.hpp"
27251607Sdim#include "classfile/stringTable.hpp"
28251607Sdim#include "classfile/symbolTable.hpp"
29251607Sdim#include "classfile/systemDictionary.hpp"
30251607Sdim#include "code/codeCache.hpp"
31251607Sdim#include "gc/cms/cmsCollectorPolicy.hpp"
32251607Sdim#include "gc/cms/cmsOopClosures.inline.hpp"
33251607Sdim#include "gc/cms/compactibleFreeListSpace.hpp"
34251607Sdim#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
35251607Sdim#include "gc/cms/concurrentMarkSweepThread.hpp"
36251607Sdim#include "gc/cms/parNewGeneration.hpp"
37251607Sdim#include "gc/cms/vmCMSOperations.hpp"
38251607Sdim#include "gc/serial/genMarkSweep.hpp"
39251607Sdim#include "gc/serial/tenuredGeneration.hpp"
40251607Sdim#include "gc/shared/adaptiveSizePolicy.hpp"
41251607Sdim#include "gc/shared/cardGeneration.inline.hpp"
42251607Sdim#include "gc/shared/cardTableRS.hpp"
43251607Sdim#include "gc/shared/collectedHeap.inline.hpp"
44251607Sdim#include "gc/shared/collectorCounters.hpp"
45251607Sdim#include "gc/shared/collectorPolicy.hpp"
46251607Sdim#include "gc/shared/gcLocker.inline.hpp"
47251607Sdim#include "gc/shared/gcPolicyCounters.hpp"
48251607Sdim#include "gc/shared/gcTimer.hpp"
49251607Sdim#include "gc/shared/gcTrace.hpp"
50251607Sdim#include "gc/shared/gcTraceTime.inline.hpp"
51251607Sdim#include "gc/shared/genCollectedHeap.hpp"
52251607Sdim#include "gc/shared/genOopClosures.inline.hpp"
53251607Sdim#include "gc/shared/isGCActiveMark.hpp"
54251607Sdim#include "gc/shared/referencePolicy.hpp"
55251607Sdim#include "gc/shared/strongRootsScope.hpp"
56251607Sdim#include "gc/shared/taskqueue.inline.hpp"
57251607Sdim#include "logging/log.hpp"
58251607Sdim#include "memory/allocation.hpp"
59251607Sdim#include "memory/iterator.inline.hpp"
60251607Sdim#include "memory/padded.hpp"
61251607Sdim#include "memory/resourceArea.hpp"
62251607Sdim#include "oops/oop.inline.hpp"
63251607Sdim#include "prims/jvmtiExport.hpp"
64251607Sdim#include "runtime/atomic.hpp"
65251607Sdim#include "runtime/globals_extension.hpp"
66251607Sdim#include "runtime/handles.inline.hpp"
67251607Sdim#include "runtime/java.hpp"
68251607Sdim#include "runtime/orderAccess.inline.hpp"
69251607Sdim#include "runtime/timer.hpp"
70251607Sdim#include "runtime/vmThread.hpp"
71251607Sdim#include "services/memoryService.hpp"
72251607Sdim#include "services/runtimeService.hpp"
73251607Sdim#include "utilities/stack.inline.hpp"
74251607Sdim
75251607Sdim// statics
76251607SdimCMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
77251607Sdimbool CMSCollector::_full_gc_requested = false;
78251607SdimGCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
79251607Sdim
80251607Sdim//////////////////////////////////////////////////////////////////
81251607Sdim// In support of CMS/VM thread synchronization
82251607Sdim//////////////////////////////////////////////////////////////////
83251607Sdim// We split use of the CGC_lock into 2 "levels".
84263508Sdim// The low-level locking is of the usual CGC_lock monitor. We introduce
85263508Sdim// a higher level "token" (hereafter "CMS token") built on top of the
86263508Sdim// low level monitor (hereafter "CGC lock").
87263508Sdim// The token-passing protocol gives priority to the VM thread. The
88251607Sdim// CMS-lock doesn't provide any fairness guarantees, but clients
89251607Sdim// should ensure that it is only held for very short, bounded
90251607Sdim// durations.
91251607Sdim//
92251607Sdim// When either of the CMS thread or the VM thread is involved in
93251607Sdim// collection operations during which it does not want the other
94251607Sdim// thread to interfere, it obtains the CMS token.
95251607Sdim//
96251607Sdim// If either thread tries to get the token while the other has
97251607Sdim// it, that thread waits. However, if the VM thread and CMS thread
98251607Sdim// both want the token, then the VM thread gets priority while the
99251607Sdim// CMS thread waits. This ensures, for instance, that the "concurrent"
100263508Sdim// phases of the CMS thread's work do not block out the VM thread
101263508Sdim// for long periods of time as the CMS thread continues to hog
102263508Sdim// the token. (See bug 4616232).
103251607Sdim//
104251607Sdim// The baton-passing functions are, however, controlled by the
105// flags _foregroundGCShouldWait and _foregroundGCIsActive,
106// and here the low-level CMS lock, not the high level token,
107// ensures mutual exclusion.
108//
109// Two important conditions that we have to satisfy:
110// 1. if a thread does a low-level wait on the CMS lock, then it
111//    relinquishes the CMS token if it were holding that token
112//    when it acquired the low-level CMS lock.
113// 2. any low-level notifications on the low-level lock
114//    should only be sent when a thread has relinquished the token.
115//
116// In the absence of either property, we'd have potential deadlock.
117//
118// We protect each of the CMS (concurrent and sequential) phases
119// with the CMS _token_, not the CMS _lock_.
120//
121// The only code protected by CMS lock is the token acquisition code
122// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
123// baton-passing code.
124//
125// Unfortunately, i couldn't come up with a good abstraction to factor and
126// hide the naked CGC_lock manipulation in the baton-passing code
127// further below. That's something we should try to do. Also, the proof
128// of correctness of this 2-level locking scheme is far from obvious,
129// and potentially quite slippery. We have an uneasy suspicion, for instance,
130// that there may be a theoretical possibility of delay/starvation in the
131// low-level lock/wait/notify scheme used for the baton-passing because of
132// potential interference with the priority scheme embodied in the
133// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
134// invocation further below and marked with "XXX 20011219YSR".
135// Indeed, as we note elsewhere, this may become yet more slippery
136// in the presence of multiple CMS and/or multiple VM threads. XXX
137
138class CMSTokenSync: public StackObj {
139 private:
140  bool _is_cms_thread;
141 public:
142  CMSTokenSync(bool is_cms_thread):
143    _is_cms_thread(is_cms_thread) {
144    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
145           "Incorrect argument to constructor");
146    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
147  }
148
149  ~CMSTokenSync() {
150    assert(_is_cms_thread ?
151             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
152             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
153          "Incorrect state");
154    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
155  }
156};
157
158// Convenience class that does a CMSTokenSync, and then acquires
159// upto three locks.
160class CMSTokenSyncWithLocks: public CMSTokenSync {
161 private:
162  // Note: locks are acquired in textual declaration order
163  // and released in the opposite order
164  MutexLockerEx _locker1, _locker2, _locker3;
165 public:
166  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
167                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
168    CMSTokenSync(is_cms_thread),
169    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
170    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
171    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
172  { }
173};
174
175
176//////////////////////////////////////////////////////////////////
177//  Concurrent Mark-Sweep Generation /////////////////////////////
178//////////////////////////////////////////////////////////////////
179
180NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
181
182// This struct contains per-thread things necessary to support parallel
183// young-gen collection.
184class CMSParGCThreadState: public CHeapObj<mtGC> {
185 public:
186  CompactibleFreeListSpaceLAB lab;
187  PromotionInfo promo;
188
189  // Constructor.
190  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
191    promo.setSpace(cfls);
192  }
193};
194
195ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
196     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
197  CardGeneration(rs, initial_byte_size, ct),
198  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
199  _did_compact(false)
200{
201  HeapWord* bottom = (HeapWord*) _virtual_space.low();
202  HeapWord* end    = (HeapWord*) _virtual_space.high();
203
204  _direct_allocated_words = 0;
205  NOT_PRODUCT(
206    _numObjectsPromoted = 0;
207    _numWordsPromoted = 0;
208    _numObjectsAllocated = 0;
209    _numWordsAllocated = 0;
210  )
211
212  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
213  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
214  _cmsSpace->_old_gen = this;
215
216  _gc_stats = new CMSGCStats();
217
218  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
219  // offsets match. The ability to tell free chunks from objects
220  // depends on this property.
221  debug_only(
222    FreeChunk* junk = NULL;
223    assert(UseCompressedClassPointers ||
224           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
225           "Offset of FreeChunk::_prev within FreeChunk must match"
226           "  that of OopDesc::_klass within OopDesc");
227  )
228
229  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
230  for (uint i = 0; i < ParallelGCThreads; i++) {
231    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
232  }
233
234  _incremental_collection_failed = false;
235  // The "dilatation_factor" is the expansion that can occur on
236  // account of the fact that the minimum object size in the CMS
237  // generation may be larger than that in, say, a contiguous young
238  //  generation.
239  // Ideally, in the calculation below, we'd compute the dilatation
240  // factor as: MinChunkSize/(promoting_gen's min object size)
241  // Since we do not have such a general query interface for the
242  // promoting generation, we'll instead just use the minimum
243  // object size (which today is a header's worth of space);
244  // note that all arithmetic is in units of HeapWords.
245  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
246  assert(_dilatation_factor >= 1.0, "from previous assert");
247}
248
249
250// The field "_initiating_occupancy" represents the occupancy percentage
251// at which we trigger a new collection cycle.  Unless explicitly specified
252// via CMSInitiatingOccupancyFraction (argument "io" below), it
253// is calculated by:
254//
255//   Let "f" be MinHeapFreeRatio in
256//
257//    _initiating_occupancy = 100-f +
258//                           f * (CMSTriggerRatio/100)
259//   where CMSTriggerRatio is the argument "tr" below.
260//
261// That is, if we assume the heap is at its desired maximum occupancy at the
262// end of a collection, we let CMSTriggerRatio of the (purported) free
263// space be allocated before initiating a new collection cycle.
264//
265void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
266  assert(io <= 100 && tr <= 100, "Check the arguments");
267  if (io >= 0) {
268    _initiating_occupancy = (double)io / 100.0;
269  } else {
270    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
271                             (double)(tr * MinHeapFreeRatio) / 100.0)
272                            / 100.0;
273  }
274}
275
276void ConcurrentMarkSweepGeneration::ref_processor_init() {
277  assert(collector() != NULL, "no collector");
278  collector()->ref_processor_init();
279}
280
281void CMSCollector::ref_processor_init() {
282  if (_ref_processor == NULL) {
283    // Allocate and initialize a reference processor
284    _ref_processor =
285      new ReferenceProcessor(_span,                               // span
286                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
287                             ParallelGCThreads,                   // mt processing degree
288                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
289                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
290                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
291                             &_is_alive_closure);                 // closure for liveness info
292    // Initialize the _ref_processor field of CMSGen
293    _cmsGen->set_ref_processor(_ref_processor);
294
295  }
296}
297
298AdaptiveSizePolicy* CMSCollector::size_policy() {
299  GenCollectedHeap* gch = GenCollectedHeap::heap();
300  return gch->gen_policy()->size_policy();
301}
302
303void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
304
305  const char* gen_name = "old";
306  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
307  // Generation Counters - generation 1, 1 subspace
308  _gen_counters = new GenerationCounters(gen_name, 1, 1,
309      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
310
311  _space_counters = new GSpaceCounters(gen_name, 0,
312                                       _virtual_space.reserved_size(),
313                                       this, _gen_counters);
314}
315
316CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317  _cms_gen(cms_gen)
318{
319  assert(alpha <= 100, "bad value");
320  _saved_alpha = alpha;
321
322  // Initialize the alphas to the bootstrap value of 100.
323  _gc0_alpha = _cms_alpha = 100;
324
325  _cms_begin_time.update();
326  _cms_end_time.update();
327
328  _gc0_duration = 0.0;
329  _gc0_period = 0.0;
330  _gc0_promoted = 0;
331
332  _cms_duration = 0.0;
333  _cms_period = 0.0;
334  _cms_allocated = 0;
335
336  _cms_used_at_gc0_begin = 0;
337  _cms_used_at_gc0_end = 0;
338  _allow_duty_cycle_reduction = false;
339  _valid_bits = 0;
340}
341
342double CMSStats::cms_free_adjustment_factor(size_t free) const {
343  // TBD: CR 6909490
344  return 1.0;
345}
346
347void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
348}
349
350// If promotion failure handling is on use
351// the padded average size of the promotion for each
352// young generation collection.
353double CMSStats::time_until_cms_gen_full() const {
354  size_t cms_free = _cms_gen->cmsSpace()->free();
355  GenCollectedHeap* gch = GenCollectedHeap::heap();
356  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
357                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
358  if (cms_free > expected_promotion) {
359    // Start a cms collection if there isn't enough space to promote
360    // for the next young collection.  Use the padded average as
361    // a safety factor.
362    cms_free -= expected_promotion;
363
364    // Adjust by the safety factor.
365    double cms_free_dbl = (double)cms_free;
366    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
367    // Apply a further correction factor which tries to adjust
368    // for recent occurance of concurrent mode failures.
369    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
370    cms_free_dbl = cms_free_dbl * cms_adjustment;
371
372    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
373                  cms_free, expected_promotion);
374    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
375    // Add 1 in case the consumption rate goes to zero.
376    return cms_free_dbl / (cms_consumption_rate() + 1.0);
377  }
378  return 0.0;
379}
380
381// Compare the duration of the cms collection to the
382// time remaining before the cms generation is empty.
383// Note that the time from the start of the cms collection
384// to the start of the cms sweep (less than the total
385// duration of the cms collection) can be used.  This
386// has been tried and some applications experienced
387// promotion failures early in execution.  This was
388// possibly because the averages were not accurate
389// enough at the beginning.
390double CMSStats::time_until_cms_start() const {
391  // We add "gc0_period" to the "work" calculation
392  // below because this query is done (mostly) at the
393  // end of a scavenge, so we need to conservatively
394  // account for that much possible delay
395  // in the query so as to avoid concurrent mode failures
396  // due to starting the collection just a wee bit too
397  // late.
398  double work = cms_duration() + gc0_period();
399  double deadline = time_until_cms_gen_full();
400  // If a concurrent mode failure occurred recently, we want to be
401  // more conservative and halve our expected time_until_cms_gen_full()
402  if (work > deadline) {
403    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
404                          cms_duration(), gc0_period(), time_until_cms_gen_full());
405    return 0.0;
406  }
407  return work - deadline;
408}
409
410#ifndef PRODUCT
411void CMSStats::print_on(outputStream *st) const {
412  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
413  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
414               gc0_duration(), gc0_period(), gc0_promoted());
415  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
416            cms_duration(), cms_period(), cms_allocated());
417  st->print(",cms_since_beg=%g,cms_since_end=%g",
418            cms_time_since_begin(), cms_time_since_end());
419  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
420            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
421
422  if (valid()) {
423    st->print(",promo_rate=%g,cms_alloc_rate=%g",
424              promotion_rate(), cms_allocation_rate());
425    st->print(",cms_consumption_rate=%g,time_until_full=%g",
426              cms_consumption_rate(), time_until_cms_gen_full());
427  }
428  st->cr();
429}
430#endif // #ifndef PRODUCT
431
432CMSCollector::CollectorState CMSCollector::_collectorState =
433                             CMSCollector::Idling;
434bool CMSCollector::_foregroundGCIsActive = false;
435bool CMSCollector::_foregroundGCShouldWait = false;
436
437CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
438                           CardTableRS*                   ct,
439                           ConcurrentMarkSweepPolicy*     cp):
440  _cmsGen(cmsGen),
441  _ct(ct),
442  _ref_processor(NULL),    // will be set later
443  _conc_workers(NULL),     // may be set later
444  _abort_preclean(false),
445  _start_sampling(false),
446  _between_prologue_and_epilogue(false),
447  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
448  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
449                 -1 /* lock-free */, "No_lock" /* dummy */),
450  _modUnionClosurePar(&_modUnionTable),
451  // Adjust my span to cover old (cms) gen
452  _span(cmsGen->reserved()),
453  // Construct the is_alive_closure with _span & markBitMap
454  _is_alive_closure(_span, &_markBitMap),
455  _restart_addr(NULL),
456  _overflow_list(NULL),
457  _stats(cmsGen),
458  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
459                             //verify that this lock should be acquired with safepoint check.
460                             Monitor::_safepoint_check_sometimes)),
461  _eden_chunk_array(NULL),     // may be set in ctor body
462  _eden_chunk_capacity(0),     // -- ditto --
463  _eden_chunk_index(0),        // -- ditto --
464  _survivor_plab_array(NULL),  // -- ditto --
465  _survivor_chunk_array(NULL), // -- ditto --
466  _survivor_chunk_capacity(0), // -- ditto --
467  _survivor_chunk_index(0),    // -- ditto --
468  _ser_pmc_preclean_ovflw(0),
469  _ser_kac_preclean_ovflw(0),
470  _ser_pmc_remark_ovflw(0),
471  _par_pmc_remark_ovflw(0),
472  _ser_kac_ovflw(0),
473  _par_kac_ovflw(0),
474#ifndef PRODUCT
475  _num_par_pushes(0),
476#endif
477  _collection_count_start(0),
478  _verifying(false),
479  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
480  _completed_initialization(false),
481  _collector_policy(cp),
482  _should_unload_classes(CMSClassUnloadingEnabled),
483  _concurrent_cycles_since_last_unload(0),
484  _roots_scanning_options(GenCollectedHeap::SO_None),
485  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
486  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
487  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
488  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
489  _cms_start_registered(false)
490{
491  // Now expand the span and allocate the collection support structures
492  // (MUT, marking bit map etc.) to cover both generations subject to
493  // collection.
494
495  // For use by dirty card to oop closures.
496  _cmsGen->cmsSpace()->set_collector(this);
497
498  // Allocate MUT and marking bit map
499  {
500    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
501    if (!_markBitMap.allocate(_span)) {
502      log_warning(gc)("Failed to allocate CMS Bit Map");
503      return;
504    }
505    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
506  }
507  {
508    _modUnionTable.allocate(_span);
509    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
510  }
511
512  if (!_markStack.allocate(MarkStackSize)) {
513    log_warning(gc)("Failed to allocate CMS Marking Stack");
514    return;
515  }
516
517  // Support for multi-threaded concurrent phases
518  if (CMSConcurrentMTEnabled) {
519    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
520      // just for now
521      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
522    }
523    if (ConcGCThreads > 1) {
524      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
525                                 ConcGCThreads, true);
526      if (_conc_workers == NULL) {
527        log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
528        CMSConcurrentMTEnabled = false;
529      } else {
530        _conc_workers->initialize_workers();
531      }
532    } else {
533      CMSConcurrentMTEnabled = false;
534    }
535  }
536  if (!CMSConcurrentMTEnabled) {
537    ConcGCThreads = 0;
538  } else {
539    // Turn off CMSCleanOnEnter optimization temporarily for
540    // the MT case where it's not fixed yet; see 6178663.
541    CMSCleanOnEnter = false;
542  }
543  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
544         "Inconsistency");
545  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
546  log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
547
548  // Parallel task queues; these are shared for the
549  // concurrent and stop-world phases of CMS, but
550  // are not shared with parallel scavenge (ParNew).
551  {
552    uint i;
553    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
554
555    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
556         || ParallelRefProcEnabled)
557        && num_queues > 0) {
558      _task_queues = new OopTaskQueueSet(num_queues);
559      if (_task_queues == NULL) {
560        log_warning(gc)("task_queues allocation failure.");
561        return;
562      }
563      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
564      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
565      for (i = 0; i < num_queues; i++) {
566        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
567        if (q == NULL) {
568          log_warning(gc)("work_queue allocation failure.");
569          return;
570        }
571        _task_queues->register_queue(i, q);
572      }
573      for (i = 0; i < num_queues; i++) {
574        _task_queues->queue(i)->initialize();
575        _hash_seed[i] = 17;  // copied from ParNew
576      }
577    }
578  }
579
580  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
581
582  // Clip CMSBootstrapOccupancy between 0 and 100.
583  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
584
585  // Now tell CMS generations the identity of their collector
586  ConcurrentMarkSweepGeneration::set_collector(this);
587
588  // Create & start a CMS thread for this CMS collector
589  _cmsThread = ConcurrentMarkSweepThread::start(this);
590  assert(cmsThread() != NULL, "CMS Thread should have been created");
591  assert(cmsThread()->collector() == this,
592         "CMS Thread should refer to this gen");
593  assert(CGC_lock != NULL, "Where's the CGC_lock?");
594
595  // Support for parallelizing young gen rescan
596  GenCollectedHeap* gch = GenCollectedHeap::heap();
597  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
598  _young_gen = (ParNewGeneration*)gch->young_gen();
599  if (gch->supports_inline_contig_alloc()) {
600    _top_addr = gch->top_addr();
601    _end_addr = gch->end_addr();
602    assert(_young_gen != NULL, "no _young_gen");
603    _eden_chunk_index = 0;
604    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
605    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
606  }
607
608  // Support for parallelizing survivor space rescan
609  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
610    const size_t max_plab_samples =
611      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
612
613    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
614    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
615    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
616    _survivor_chunk_capacity = max_plab_samples;
617    for (uint i = 0; i < ParallelGCThreads; i++) {
618      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
619      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
620      assert(cur->end() == 0, "Should be 0");
621      assert(cur->array() == vec, "Should be vec");
622      assert(cur->capacity() == max_plab_samples, "Error");
623    }
624  }
625
626  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
627  _gc_counters = new CollectorCounters("CMS", 1);
628  _completed_initialization = true;
629  _inter_sweep_timer.start();  // start of time
630}
631
632const char* ConcurrentMarkSweepGeneration::name() const {
633  return "concurrent mark-sweep generation";
634}
635void ConcurrentMarkSweepGeneration::update_counters() {
636  if (UsePerfData) {
637    _space_counters->update_all();
638    _gen_counters->update_all();
639  }
640}
641
642// this is an optimized version of update_counters(). it takes the
643// used value as a parameter rather than computing it.
644//
645void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
646  if (UsePerfData) {
647    _space_counters->update_used(used);
648    _space_counters->update_capacity();
649    _gen_counters->update_all();
650  }
651}
652
653void ConcurrentMarkSweepGeneration::print() const {
654  Generation::print();
655  cmsSpace()->print();
656}
657
658#ifndef PRODUCT
659void ConcurrentMarkSweepGeneration::print_statistics() {
660  cmsSpace()->printFLCensus(0);
661}
662#endif
663
664size_t
665ConcurrentMarkSweepGeneration::contiguous_available() const {
666  // dld proposes an improvement in precision here. If the committed
667  // part of the space ends in a free block we should add that to
668  // uncommitted size in the calculation below. Will make this
669  // change later, staying with the approximation below for the
670  // time being. -- ysr.
671  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
672}
673
674size_t
675ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
676  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
677}
678
679size_t ConcurrentMarkSweepGeneration::max_available() const {
680  return free() + _virtual_space.uncommitted_size();
681}
682
683bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
684  size_t available = max_available();
685  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
686  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
687  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
688                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
689  return res;
690}
691
692// At a promotion failure dump information on block layout in heap
693// (cms old generation).
694void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
695  Log(gc, promotion) log;
696  if (log.is_trace()) {
697    ResourceMark rm;
698    cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
699  }
700}
701
702void ConcurrentMarkSweepGeneration::reset_after_compaction() {
703  // Clear the promotion information.  These pointers can be adjusted
704  // along with all the other pointers into the heap but
705  // compaction is expected to be a rare event with
706  // a heap using cms so don't do it without seeing the need.
707  for (uint i = 0; i < ParallelGCThreads; i++) {
708    _par_gc_thread_states[i]->promo.reset();
709  }
710}
711
712void ConcurrentMarkSweepGeneration::compute_new_size() {
713  assert_locked_or_safepoint(Heap_lock);
714
715  // If incremental collection failed, we just want to expand
716  // to the limit.
717  if (incremental_collection_failed()) {
718    clear_incremental_collection_failed();
719    grow_to_reserved();
720    return;
721  }
722
723  // The heap has been compacted but not reset yet.
724  // Any metric such as free() or used() will be incorrect.
725
726  CardGeneration::compute_new_size();
727
728  // Reset again after a possible resizing
729  if (did_compact()) {
730    cmsSpace()->reset_after_compaction();
731  }
732}
733
734void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
735  assert_locked_or_safepoint(Heap_lock);
736
737  // If incremental collection failed, we just want to expand
738  // to the limit.
739  if (incremental_collection_failed()) {
740    clear_incremental_collection_failed();
741    grow_to_reserved();
742    return;
743  }
744
745  double free_percentage = ((double) free()) / capacity();
746  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
747  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
748
749  // compute expansion delta needed for reaching desired free percentage
750  if (free_percentage < desired_free_percentage) {
751    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
752    assert(desired_capacity >= capacity(), "invalid expansion size");
753    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
754    Log(gc) log;
755    if (log.is_trace()) {
756      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
757      log.trace("From compute_new_size: ");
758      log.trace("  Free fraction %f", free_percentage);
759      log.trace("  Desired free fraction %f", desired_free_percentage);
760      log.trace("  Maximum free fraction %f", maximum_free_percentage);
761      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
762      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
763      GenCollectedHeap* gch = GenCollectedHeap::heap();
764      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
765      size_t young_size = gch->young_gen()->capacity();
766      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
767      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
768      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
769      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
770    }
771    // safe if expansion fails
772    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
773    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
774  } else {
775    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
776    assert(desired_capacity <= capacity(), "invalid expansion size");
777    size_t shrink_bytes = capacity() - desired_capacity;
778    // Don't shrink unless the delta is greater than the minimum shrink we want
779    if (shrink_bytes >= MinHeapDeltaBytes) {
780      shrink_free_list_by(shrink_bytes);
781    }
782  }
783}
784
785Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
786  return cmsSpace()->freelistLock();
787}
788
789HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
790  CMSSynchronousYieldRequest yr;
791  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
792  return have_lock_and_allocate(size, tlab);
793}
794
795HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
796                                                                bool   tlab /* ignored */) {
797  assert_lock_strong(freelistLock());
798  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
799  HeapWord* res = cmsSpace()->allocate(adjustedSize);
800  // Allocate the object live (grey) if the background collector has
801  // started marking. This is necessary because the marker may
802  // have passed this address and consequently this object will
803  // not otherwise be greyed and would be incorrectly swept up.
804  // Note that if this object contains references, the writing
805  // of those references will dirty the card containing this object
806  // allowing the object to be blackened (and its references scanned)
807  // either during a preclean phase or at the final checkpoint.
808  if (res != NULL) {
809    // We may block here with an uninitialized object with
810    // its mark-bit or P-bits not yet set. Such objects need
811    // to be safely navigable by block_start().
812    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
813    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
814    collector()->direct_allocated(res, adjustedSize);
815    _direct_allocated_words += adjustedSize;
816    // allocation counters
817    NOT_PRODUCT(
818      _numObjectsAllocated++;
819      _numWordsAllocated += (int)adjustedSize;
820    )
821  }
822  return res;
823}
824
825// In the case of direct allocation by mutators in a generation that
826// is being concurrently collected, the object must be allocated
827// live (grey) if the background collector has started marking.
828// This is necessary because the marker may
829// have passed this address and consequently this object will
830// not otherwise be greyed and would be incorrectly swept up.
831// Note that if this object contains references, the writing
832// of those references will dirty the card containing this object
833// allowing the object to be blackened (and its references scanned)
834// either during a preclean phase or at the final checkpoint.
835void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
836  assert(_markBitMap.covers(start, size), "Out of bounds");
837  if (_collectorState >= Marking) {
838    MutexLockerEx y(_markBitMap.lock(),
839                    Mutex::_no_safepoint_check_flag);
840    // [see comments preceding SweepClosure::do_blk() below for details]
841    //
842    // Can the P-bits be deleted now?  JJJ
843    //
844    // 1. need to mark the object as live so it isn't collected
845    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
846    // 3. need to mark the end of the object so marking, precleaning or sweeping
847    //    can skip over uninitialized or unparsable objects. An allocated
848    //    object is considered uninitialized for our purposes as long as
849    //    its klass word is NULL.  All old gen objects are parsable
850    //    as soon as they are initialized.)
851    _markBitMap.mark(start);          // object is live
852    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
853    _markBitMap.mark(start + size - 1);
854                                      // mark end of object
855  }
856  // check that oop looks uninitialized
857  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
858}
859
860void CMSCollector::promoted(bool par, HeapWord* start,
861                            bool is_obj_array, size_t obj_size) {
862  assert(_markBitMap.covers(start), "Out of bounds");
863  // See comment in direct_allocated() about when objects should
864  // be allocated live.
865  if (_collectorState >= Marking) {
866    // we already hold the marking bit map lock, taken in
867    // the prologue
868    if (par) {
869      _markBitMap.par_mark(start);
870    } else {
871      _markBitMap.mark(start);
872    }
873    // We don't need to mark the object as uninitialized (as
874    // in direct_allocated above) because this is being done with the
875    // world stopped and the object will be initialized by the
876    // time the marking, precleaning or sweeping get to look at it.
877    // But see the code for copying objects into the CMS generation,
878    // where we need to ensure that concurrent readers of the
879    // block offset table are able to safely navigate a block that
880    // is in flux from being free to being allocated (and in
881    // transition while being copied into) and subsequently
882    // becoming a bona-fide object when the copy/promotion is complete.
883    assert(SafepointSynchronize::is_at_safepoint(),
884           "expect promotion only at safepoints");
885
886    if (_collectorState < Sweeping) {
887      // Mark the appropriate cards in the modUnionTable, so that
888      // this object gets scanned before the sweep. If this is
889      // not done, CMS generation references in the object might
890      // not get marked.
891      // For the case of arrays, which are otherwise precisely
892      // marked, we need to dirty the entire array, not just its head.
893      if (is_obj_array) {
894        // The [par_]mark_range() method expects mr.end() below to
895        // be aligned to the granularity of a bit's representation
896        // in the heap. In the case of the MUT below, that's a
897        // card size.
898        MemRegion mr(start,
899                     (HeapWord*)round_to((intptr_t)(start + obj_size),
900                        CardTableModRefBS::card_size /* bytes */));
901        if (par) {
902          _modUnionTable.par_mark_range(mr);
903        } else {
904          _modUnionTable.mark_range(mr);
905        }
906      } else {  // not an obj array; we can just mark the head
907        if (par) {
908          _modUnionTable.par_mark(start);
909        } else {
910          _modUnionTable.mark(start);
911        }
912      }
913    }
914  }
915}
916
917oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
918  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
919  // allocate, copy and if necessary update promoinfo --
920  // delegate to underlying space.
921  assert_lock_strong(freelistLock());
922
923#ifndef PRODUCT
924  if (GenCollectedHeap::heap()->promotion_should_fail()) {
925    return NULL;
926  }
927#endif  // #ifndef PRODUCT
928
929  oop res = _cmsSpace->promote(obj, obj_size);
930  if (res == NULL) {
931    // expand and retry
932    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
933    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
934    // Since this is the old generation, we don't try to promote
935    // into a more senior generation.
936    res = _cmsSpace->promote(obj, obj_size);
937  }
938  if (res != NULL) {
939    // See comment in allocate() about when objects should
940    // be allocated live.
941    assert(obj->is_oop(), "Will dereference klass pointer below");
942    collector()->promoted(false,           // Not parallel
943                          (HeapWord*)res, obj->is_objArray(), obj_size);
944    // promotion counters
945    NOT_PRODUCT(
946      _numObjectsPromoted++;
947      _numWordsPromoted +=
948        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
949    )
950  }
951  return res;
952}
953
954
955// IMPORTANT: Notes on object size recognition in CMS.
956// ---------------------------------------------------
957// A block of storage in the CMS generation is always in
958// one of three states. A free block (FREE), an allocated
959// object (OBJECT) whose size() method reports the correct size,
960// and an intermediate state (TRANSIENT) in which its size cannot
961// be accurately determined.
962// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
963// -----------------------------------------------------
964// FREE:      klass_word & 1 == 1; mark_word holds block size
965//
966// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
967//            obj->size() computes correct size
968//
969// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
970//
971// STATE IDENTIFICATION: (64 bit+COOPS)
972// ------------------------------------
973// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
974//
975// OBJECT:    klass_word installed; klass_word != 0;
976//            obj->size() computes correct size
977//
978// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
979//
980//
981// STATE TRANSITION DIAGRAM
982//
983//        mut / parnew                     mut  /  parnew
984// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
985//  ^                                                                   |
986//  |------------------------ DEAD <------------------------------------|
987//         sweep                            mut
988//
989// While a block is in TRANSIENT state its size cannot be determined
990// so readers will either need to come back later or stall until
991// the size can be determined. Note that for the case of direct
992// allocation, P-bits, when available, may be used to determine the
993// size of an object that may not yet have been initialized.
994
995// Things to support parallel young-gen collection.
996oop
997ConcurrentMarkSweepGeneration::par_promote(int thread_num,
998                                           oop old, markOop m,
999                                           size_t word_sz) {
1000#ifndef PRODUCT
1001  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1002    return NULL;
1003  }
1004#endif  // #ifndef PRODUCT
1005
1006  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1007  PromotionInfo* promoInfo = &ps->promo;
1008  // if we are tracking promotions, then first ensure space for
1009  // promotion (including spooling space for saving header if necessary).
1010  // then allocate and copy, then track promoted info if needed.
1011  // When tracking (see PromotionInfo::track()), the mark word may
1012  // be displaced and in this case restoration of the mark word
1013  // occurs in the (oop_since_save_marks_)iterate phase.
1014  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1015    // Out of space for allocating spooling buffers;
1016    // try expanding and allocating spooling buffers.
1017    if (!expand_and_ensure_spooling_space(promoInfo)) {
1018      return NULL;
1019    }
1020  }
1021  assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1022  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1023  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1024  if (obj_ptr == NULL) {
1025     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1026     if (obj_ptr == NULL) {
1027       return NULL;
1028     }
1029  }
1030  oop obj = oop(obj_ptr);
1031  OrderAccess::storestore();
1032  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1033  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1034  // IMPORTANT: See note on object initialization for CMS above.
1035  // Otherwise, copy the object.  Here we must be careful to insert the
1036  // klass pointer last, since this marks the block as an allocated object.
1037  // Except with compressed oops it's the mark word.
1038  HeapWord* old_ptr = (HeapWord*)old;
1039  // Restore the mark word copied above.
1040  obj->set_mark(m);
1041  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1042  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1043  OrderAccess::storestore();
1044
1045  if (UseCompressedClassPointers) {
1046    // Copy gap missed by (aligned) header size calculation below
1047    obj->set_klass_gap(old->klass_gap());
1048  }
1049  if (word_sz > (size_t)oopDesc::header_size()) {
1050    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1051                                 obj_ptr + oopDesc::header_size(),
1052                                 word_sz - oopDesc::header_size());
1053  }
1054
1055  // Now we can track the promoted object, if necessary.  We take care
1056  // to delay the transition from uninitialized to full object
1057  // (i.e., insertion of klass pointer) until after, so that it
1058  // atomically becomes a promoted object.
1059  if (promoInfo->tracking()) {
1060    promoInfo->track((PromotedObject*)obj, old->klass());
1061  }
1062  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1063  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1064  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1065
1066  // Finally, install the klass pointer (this should be volatile).
1067  OrderAccess::storestore();
1068  obj->set_klass(old->klass());
1069  // We should now be able to calculate the right size for this object
1070  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1071
1072  collector()->promoted(true,          // parallel
1073                        obj_ptr, old->is_objArray(), word_sz);
1074
1075  NOT_PRODUCT(
1076    Atomic::inc_ptr(&_numObjectsPromoted);
1077    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1078  )
1079
1080  return obj;
1081}
1082
1083void
1084ConcurrentMarkSweepGeneration::
1085par_promote_alloc_done(int thread_num) {
1086  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1087  ps->lab.retire(thread_num);
1088}
1089
1090void
1091ConcurrentMarkSweepGeneration::
1092par_oop_since_save_marks_iterate_done(int thread_num) {
1093  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1094  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1095  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1096
1097  // Because card-scanning has been completed, subsequent phases
1098  // (e.g., reference processing) will not need to recognize which
1099  // objects have been promoted during this GC. So, we can now disable
1100  // promotion tracking.
1101  ps->promo.stopTrackingPromotions();
1102}
1103
1104bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1105                                                   size_t size,
1106                                                   bool   tlab)
1107{
1108  // We allow a STW collection only if a full
1109  // collection was requested.
1110  return full || should_allocate(size, tlab); // FIX ME !!!
1111  // This and promotion failure handling are connected at the
1112  // hip and should be fixed by untying them.
1113}
1114
1115bool CMSCollector::shouldConcurrentCollect() {
1116  LogTarget(Trace, gc) log;
1117
1118  if (_full_gc_requested) {
1119    log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1120    return true;
1121  }
1122
1123  FreelistLocker x(this);
1124  // ------------------------------------------------------------------
1125  // Print out lots of information which affects the initiation of
1126  // a collection.
1127  if (log.is_enabled() && stats().valid()) {
1128    log.print("CMSCollector shouldConcurrentCollect: ");
1129
1130    LogStream out(log);
1131    stats().print_on(&out);
1132
1133    log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1134    log.print("free=" SIZE_FORMAT, _cmsGen->free());
1135    log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1136    log.print("promotion_rate=%g", stats().promotion_rate());
1137    log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1138    log.print("occupancy=%3.7f", _cmsGen->occupancy());
1139    log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1140    log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1141    log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1142    log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1143  }
1144  // ------------------------------------------------------------------
1145
1146  // If the estimated time to complete a cms collection (cms_duration())
1147  // is less than the estimated time remaining until the cms generation
1148  // is full, start a collection.
1149  if (!UseCMSInitiatingOccupancyOnly) {
1150    if (stats().valid()) {
1151      if (stats().time_until_cms_start() == 0.0) {
1152        return true;
1153      }
1154    } else {
1155      // We want to conservatively collect somewhat early in order
1156      // to try and "bootstrap" our CMS/promotion statistics;
1157      // this branch will not fire after the first successful CMS
1158      // collection because the stats should then be valid.
1159      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1160        log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1161                  _cmsGen->occupancy(), _bootstrap_occupancy);
1162        return true;
1163      }
1164    }
1165  }
1166
1167  // Otherwise, we start a collection cycle if
1168  // old gen want a collection cycle started. Each may use
1169  // an appropriate criterion for making this decision.
1170  // XXX We need to make sure that the gen expansion
1171  // criterion dovetails well with this. XXX NEED TO FIX THIS
1172  if (_cmsGen->should_concurrent_collect()) {
1173    log.print("CMS old gen initiated");
1174    return true;
1175  }
1176
1177  // We start a collection if we believe an incremental collection may fail;
1178  // this is not likely to be productive in practice because it's probably too
1179  // late anyway.
1180  GenCollectedHeap* gch = GenCollectedHeap::heap();
1181  assert(gch->collector_policy()->is_generation_policy(),
1182         "You may want to check the correctness of the following");
1183  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1184    log.print("CMSCollector: collect because incremental collection will fail ");
1185    return true;
1186  }
1187
1188  if (MetaspaceGC::should_concurrent_collect()) {
1189    log.print("CMSCollector: collect for metadata allocation ");
1190    return true;
1191  }
1192
1193  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1194  if (CMSTriggerInterval >= 0) {
1195    if (CMSTriggerInterval == 0) {
1196      // Trigger always
1197      return true;
1198    }
1199
1200    // Check the CMS time since begin (we do not check the stats validity
1201    // as we want to be able to trigger the first CMS cycle as well)
1202    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1203      if (stats().valid()) {
1204        log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1205                  stats().cms_time_since_begin());
1206      } else {
1207        log.print("CMSCollector: collect because of trigger interval (first collection)");
1208      }
1209      return true;
1210    }
1211  }
1212
1213  return false;
1214}
1215
1216void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1217
1218// Clear _expansion_cause fields of constituent generations
1219void CMSCollector::clear_expansion_cause() {
1220  _cmsGen->clear_expansion_cause();
1221}
1222
1223// We should be conservative in starting a collection cycle.  To
1224// start too eagerly runs the risk of collecting too often in the
1225// extreme.  To collect too rarely falls back on full collections,
1226// which works, even if not optimum in terms of concurrent work.
1227// As a work around for too eagerly collecting, use the flag
1228// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1229// giving the user an easily understandable way of controlling the
1230// collections.
1231// We want to start a new collection cycle if any of the following
1232// conditions hold:
1233// . our current occupancy exceeds the configured initiating occupancy
1234//   for this generation, or
1235// . we recently needed to expand this space and have not, since that
1236//   expansion, done a collection of this generation, or
1237// . the underlying space believes that it may be a good idea to initiate
1238//   a concurrent collection (this may be based on criteria such as the
1239//   following: the space uses linear allocation and linear allocation is
1240//   going to fail, or there is believed to be excessive fragmentation in
1241//   the generation, etc... or ...
1242// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1243//   the case of the old generation; see CR 6543076):
1244//   we may be approaching a point at which allocation requests may fail because
1245//   we will be out of sufficient free space given allocation rate estimates.]
1246bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1247
1248  assert_lock_strong(freelistLock());
1249  if (occupancy() > initiating_occupancy()) {
1250    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1251                  short_name(), occupancy(), initiating_occupancy());
1252    return true;
1253  }
1254  if (UseCMSInitiatingOccupancyOnly) {
1255    return false;
1256  }
1257  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1258    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1259    return true;
1260  }
1261  return false;
1262}
1263
1264void ConcurrentMarkSweepGeneration::collect(bool   full,
1265                                            bool   clear_all_soft_refs,
1266                                            size_t size,
1267                                            bool   tlab)
1268{
1269  collector()->collect(full, clear_all_soft_refs, size, tlab);
1270}
1271
1272void CMSCollector::collect(bool   full,
1273                           bool   clear_all_soft_refs,
1274                           size_t size,
1275                           bool   tlab)
1276{
1277  // The following "if" branch is present for defensive reasons.
1278  // In the current uses of this interface, it can be replaced with:
1279  // assert(!GCLocker.is_active(), "Can't be called otherwise");
1280  // But I am not placing that assert here to allow future
1281  // generality in invoking this interface.
1282  if (GCLocker::is_active()) {
1283    // A consistency test for GCLocker
1284    assert(GCLocker::needs_gc(), "Should have been set already");
1285    // Skip this foreground collection, instead
1286    // expanding the heap if necessary.
1287    // Need the free list locks for the call to free() in compute_new_size()
1288    compute_new_size();
1289    return;
1290  }
1291  acquire_control_and_collect(full, clear_all_soft_refs);
1292}
1293
1294void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1295  GenCollectedHeap* gch = GenCollectedHeap::heap();
1296  unsigned int gc_count = gch->total_full_collections();
1297  if (gc_count == full_gc_count) {
1298    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1299    _full_gc_requested = true;
1300    _full_gc_cause = cause;
1301    CGC_lock->notify();   // nudge CMS thread
1302  } else {
1303    assert(gc_count > full_gc_count, "Error: causal loop");
1304  }
1305}
1306
1307bool CMSCollector::is_external_interruption() {
1308  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1309  return GCCause::is_user_requested_gc(cause) ||
1310         GCCause::is_serviceability_requested_gc(cause);
1311}
1312
1313void CMSCollector::report_concurrent_mode_interruption() {
1314  if (is_external_interruption()) {
1315    log_debug(gc)("Concurrent mode interrupted");
1316  } else {
1317    log_debug(gc)("Concurrent mode failure");
1318    _gc_tracer_cm->report_concurrent_mode_failure();
1319  }
1320}
1321
1322
1323// The foreground and background collectors need to coordinate in order
1324// to make sure that they do not mutually interfere with CMS collections.
1325// When a background collection is active,
1326// the foreground collector may need to take over (preempt) and
1327// synchronously complete an ongoing collection. Depending on the
1328// frequency of the background collections and the heap usage
1329// of the application, this preemption can be seldom or frequent.
1330// There are only certain
1331// points in the background collection that the "collection-baton"
1332// can be passed to the foreground collector.
1333//
1334// The foreground collector will wait for the baton before
1335// starting any part of the collection.  The foreground collector
1336// will only wait at one location.
1337//
1338// The background collector will yield the baton before starting a new
1339// phase of the collection (e.g., before initial marking, marking from roots,
1340// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1341// of the loop which switches the phases. The background collector does some
1342// of the phases (initial mark, final re-mark) with the world stopped.
1343// Because of locking involved in stopping the world,
1344// the foreground collector should not block waiting for the background
1345// collector when it is doing a stop-the-world phase.  The background
1346// collector will yield the baton at an additional point just before
1347// it enters a stop-the-world phase.  Once the world is stopped, the
1348// background collector checks the phase of the collection.  If the
1349// phase has not changed, it proceeds with the collection.  If the
1350// phase has changed, it skips that phase of the collection.  See
1351// the comments on the use of the Heap_lock in collect_in_background().
1352//
1353// Variable used in baton passing.
1354//   _foregroundGCIsActive - Set to true by the foreground collector when
1355//      it wants the baton.  The foreground clears it when it has finished
1356//      the collection.
1357//   _foregroundGCShouldWait - Set to true by the background collector
1358//        when it is running.  The foreground collector waits while
1359//      _foregroundGCShouldWait is true.
1360//  CGC_lock - monitor used to protect access to the above variables
1361//      and to notify the foreground and background collectors.
1362//  _collectorState - current state of the CMS collection.
1363//
1364// The foreground collector
1365//   acquires the CGC_lock
1366//   sets _foregroundGCIsActive
1367//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1368//     various locks acquired in preparation for the collection
1369//     are released so as not to block the background collector
1370//     that is in the midst of a collection
1371//   proceeds with the collection
1372//   clears _foregroundGCIsActive
1373//   returns
1374//
1375// The background collector in a loop iterating on the phases of the
1376//      collection
1377//   acquires the CGC_lock
1378//   sets _foregroundGCShouldWait
1379//   if _foregroundGCIsActive is set
1380//     clears _foregroundGCShouldWait, notifies _CGC_lock
1381//     waits on _CGC_lock for _foregroundGCIsActive to become false
1382//     and exits the loop.
1383//   otherwise
1384//     proceed with that phase of the collection
1385//     if the phase is a stop-the-world phase,
1386//       yield the baton once more just before enqueueing
1387//       the stop-world CMS operation (executed by the VM thread).
1388//   returns after all phases of the collection are done
1389//
1390
1391void CMSCollector::acquire_control_and_collect(bool full,
1392        bool clear_all_soft_refs) {
1393  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1394  assert(!Thread::current()->is_ConcurrentGC_thread(),
1395         "shouldn't try to acquire control from self!");
1396
1397  // Start the protocol for acquiring control of the
1398  // collection from the background collector (aka CMS thread).
1399  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1400         "VM thread should have CMS token");
1401  // Remember the possibly interrupted state of an ongoing
1402  // concurrent collection
1403  CollectorState first_state = _collectorState;
1404
1405  // Signal to a possibly ongoing concurrent collection that
1406  // we want to do a foreground collection.
1407  _foregroundGCIsActive = true;
1408
1409  // release locks and wait for a notify from the background collector
1410  // releasing the locks in only necessary for phases which
1411  // do yields to improve the granularity of the collection.
1412  assert_lock_strong(bitMapLock());
1413  // We need to lock the Free list lock for the space that we are
1414  // currently collecting.
1415  assert(haveFreelistLocks(), "Must be holding free list locks");
1416  bitMapLock()->unlock();
1417  releaseFreelistLocks();
1418  {
1419    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1420    if (_foregroundGCShouldWait) {
1421      // We are going to be waiting for action for the CMS thread;
1422      // it had better not be gone (for instance at shutdown)!
1423      assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1424             "CMS thread must be running");
1425      // Wait here until the background collector gives us the go-ahead
1426      ConcurrentMarkSweepThread::clear_CMS_flag(
1427        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1428      // Get a possibly blocked CMS thread going:
1429      //   Note that we set _foregroundGCIsActive true above,
1430      //   without protection of the CGC_lock.
1431      CGC_lock->notify();
1432      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1433             "Possible deadlock");
1434      while (_foregroundGCShouldWait) {
1435        // wait for notification
1436        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1437        // Possibility of delay/starvation here, since CMS token does
1438        // not know to give priority to VM thread? Actually, i think
1439        // there wouldn't be any delay/starvation, but the proof of
1440        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1441      }
1442      ConcurrentMarkSweepThread::set_CMS_flag(
1443        ConcurrentMarkSweepThread::CMS_vm_has_token);
1444    }
1445  }
1446  // The CMS_token is already held.  Get back the other locks.
1447  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1448         "VM thread should have CMS token");
1449  getFreelistLocks();
1450  bitMapLock()->lock_without_safepoint_check();
1451  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1452                       p2i(Thread::current()), first_state);
1453  log_debug(gc, state)("    gets control with state %d", _collectorState);
1454
1455  // Inform cms gen if this was due to partial collection failing.
1456  // The CMS gen may use this fact to determine its expansion policy.
1457  GenCollectedHeap* gch = GenCollectedHeap::heap();
1458  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1459    assert(!_cmsGen->incremental_collection_failed(),
1460           "Should have been noticed, reacted to and cleared");
1461    _cmsGen->set_incremental_collection_failed();
1462  }
1463
1464  if (first_state > Idling) {
1465    report_concurrent_mode_interruption();
1466  }
1467
1468  set_did_compact(true);
1469
1470  // If the collection is being acquired from the background
1471  // collector, there may be references on the discovered
1472  // references lists.  Abandon those references, since some
1473  // of them may have become unreachable after concurrent
1474  // discovery; the STW compacting collector will redo discovery
1475  // more precisely, without being subject to floating garbage.
1476  // Leaving otherwise unreachable references in the discovered
1477  // lists would require special handling.
1478  ref_processor()->disable_discovery();
1479  ref_processor()->abandon_partial_discovery();
1480  ref_processor()->verify_no_references_recorded();
1481
1482  if (first_state > Idling) {
1483    save_heap_summary();
1484  }
1485
1486  do_compaction_work(clear_all_soft_refs);
1487
1488  // Has the GC time limit been exceeded?
1489  size_t max_eden_size = _young_gen->max_eden_size();
1490  GCCause::Cause gc_cause = gch->gc_cause();
1491  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1492                                         _young_gen->eden()->used(),
1493                                         _cmsGen->max_capacity(),
1494                                         max_eden_size,
1495                                         full,
1496                                         gc_cause,
1497                                         gch->collector_policy());
1498
1499  // Reset the expansion cause, now that we just completed
1500  // a collection cycle.
1501  clear_expansion_cause();
1502  _foregroundGCIsActive = false;
1503  return;
1504}
1505
1506// Resize the tenured generation
1507// after obtaining the free list locks for the
1508// two generations.
1509void CMSCollector::compute_new_size() {
1510  assert_locked_or_safepoint(Heap_lock);
1511  FreelistLocker z(this);
1512  MetaspaceGC::compute_new_size();
1513  _cmsGen->compute_new_size_free_list();
1514}
1515
1516// A work method used by the foreground collector to do
1517// a mark-sweep-compact.
1518void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1519  GenCollectedHeap* gch = GenCollectedHeap::heap();
1520
1521  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1522  gc_timer->register_gc_start();
1523
1524  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1525  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1526
1527  gch->pre_full_gc_dump(gc_timer);
1528
1529  GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1530
1531  // Temporarily widen the span of the weak reference processing to
1532  // the entire heap.
1533  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1534  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1535  // Temporarily, clear the "is_alive_non_header" field of the
1536  // reference processor.
1537  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1538  // Temporarily make reference _processing_ single threaded (non-MT).
1539  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1540  // Temporarily make refs discovery atomic
1541  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1542  // Temporarily make reference _discovery_ single threaded (non-MT)
1543  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1544
1545  ref_processor()->set_enqueuing_is_done(false);
1546  ref_processor()->enable_discovery();
1547  ref_processor()->setup_policy(clear_all_soft_refs);
1548  // If an asynchronous collection finishes, the _modUnionTable is
1549  // all clear.  If we are assuming the collection from an asynchronous
1550  // collection, clear the _modUnionTable.
1551  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1552    "_modUnionTable should be clear if the baton was not passed");
1553  _modUnionTable.clear_all();
1554  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1555    "mod union for klasses should be clear if the baton was passed");
1556  _ct->klass_rem_set()->clear_mod_union();
1557
1558  // We must adjust the allocation statistics being maintained
1559  // in the free list space. We do so by reading and clearing
1560  // the sweep timer and updating the block flux rate estimates below.
1561  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1562  if (_inter_sweep_timer.is_active()) {
1563    _inter_sweep_timer.stop();
1564    // Note that we do not use this sample to update the _inter_sweep_estimate.
1565    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1566                                            _inter_sweep_estimate.padded_average(),
1567                                            _intra_sweep_estimate.padded_average());
1568  }
1569
1570  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1571  #ifdef ASSERT
1572    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1573    size_t free_size = cms_space->free();
1574    assert(free_size ==
1575           pointer_delta(cms_space->end(), cms_space->compaction_top())
1576           * HeapWordSize,
1577      "All the free space should be compacted into one chunk at top");
1578    assert(cms_space->dictionary()->total_chunk_size(
1579                                      debug_only(cms_space->freelistLock())) == 0 ||
1580           cms_space->totalSizeInIndexedFreeLists() == 0,
1581      "All the free space should be in a single chunk");
1582    size_t num = cms_space->totalCount();
1583    assert((free_size == 0 && num == 0) ||
1584           (free_size > 0  && (num == 1 || num == 2)),
1585         "There should be at most 2 free chunks after compaction");
1586  #endif // ASSERT
1587  _collectorState = Resetting;
1588  assert(_restart_addr == NULL,
1589         "Should have been NULL'd before baton was passed");
1590  reset_stw();
1591  _cmsGen->reset_after_compaction();
1592  _concurrent_cycles_since_last_unload = 0;
1593
1594  // Clear any data recorded in the PLAB chunk arrays.
1595  if (_survivor_plab_array != NULL) {
1596    reset_survivor_plab_arrays();
1597  }
1598
1599  // Adjust the per-size allocation stats for the next epoch.
1600  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1601  // Restart the "inter sweep timer" for the next epoch.
1602  _inter_sweep_timer.reset();
1603  _inter_sweep_timer.start();
1604
1605  // No longer a need to do a concurrent collection for Metaspace.
1606  MetaspaceGC::set_should_concurrent_collect(false);
1607
1608  gch->post_full_gc_dump(gc_timer);
1609
1610  gc_timer->register_gc_end();
1611
1612  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1613
1614  // For a mark-sweep-compact, compute_new_size() will be called
1615  // in the heap's do_collection() method.
1616}
1617
1618void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1619  Log(gc, heap) log;
1620  if (!log.is_trace()) {
1621    return;
1622  }
1623
1624  ContiguousSpace* eden_space = _young_gen->eden();
1625  ContiguousSpace* from_space = _young_gen->from();
1626  ContiguousSpace* to_space   = _young_gen->to();
1627  // Eden
1628  if (_eden_chunk_array != NULL) {
1629    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1630              p2i(eden_space->bottom()), p2i(eden_space->top()),
1631              p2i(eden_space->end()), eden_space->capacity());
1632    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1633              _eden_chunk_index, _eden_chunk_capacity);
1634    for (size_t i = 0; i < _eden_chunk_index; i++) {
1635      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1636    }
1637  }
1638  // Survivor
1639  if (_survivor_chunk_array != NULL) {
1640    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1641              p2i(from_space->bottom()), p2i(from_space->top()),
1642              p2i(from_space->end()), from_space->capacity());
1643    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1644              _survivor_chunk_index, _survivor_chunk_capacity);
1645    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1646      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1647    }
1648  }
1649}
1650
1651void CMSCollector::getFreelistLocks() const {
1652  // Get locks for all free lists in all generations that this
1653  // collector is responsible for
1654  _cmsGen->freelistLock()->lock_without_safepoint_check();
1655}
1656
1657void CMSCollector::releaseFreelistLocks() const {
1658  // Release locks for all free lists in all generations that this
1659  // collector is responsible for
1660  _cmsGen->freelistLock()->unlock();
1661}
1662
1663bool CMSCollector::haveFreelistLocks() const {
1664  // Check locks for all free lists in all generations that this
1665  // collector is responsible for
1666  assert_lock_strong(_cmsGen->freelistLock());
1667  PRODUCT_ONLY(ShouldNotReachHere());
1668  return true;
1669}
1670
1671// A utility class that is used by the CMS collector to
1672// temporarily "release" the foreground collector from its
1673// usual obligation to wait for the background collector to
1674// complete an ongoing phase before proceeding.
1675class ReleaseForegroundGC: public StackObj {
1676 private:
1677  CMSCollector* _c;
1678 public:
1679  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1680    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1681    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1682    // allow a potentially blocked foreground collector to proceed
1683    _c->_foregroundGCShouldWait = false;
1684    if (_c->_foregroundGCIsActive) {
1685      CGC_lock->notify();
1686    }
1687    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1688           "Possible deadlock");
1689  }
1690
1691  ~ReleaseForegroundGC() {
1692    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1693    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1694    _c->_foregroundGCShouldWait = true;
1695  }
1696};
1697
1698void CMSCollector::collect_in_background(GCCause::Cause cause) {
1699  assert(Thread::current()->is_ConcurrentGC_thread(),
1700    "A CMS asynchronous collection is only allowed on a CMS thread.");
1701
1702  GenCollectedHeap* gch = GenCollectedHeap::heap();
1703  {
1704    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1705    MutexLockerEx hl(Heap_lock, safepoint_check);
1706    FreelistLocker fll(this);
1707    MutexLockerEx x(CGC_lock, safepoint_check);
1708    if (_foregroundGCIsActive) {
1709      // The foreground collector is. Skip this
1710      // background collection.
1711      assert(!_foregroundGCShouldWait, "Should be clear");
1712      return;
1713    } else {
1714      assert(_collectorState == Idling, "Should be idling before start.");
1715      _collectorState = InitialMarking;
1716      register_gc_start(cause);
1717      // Reset the expansion cause, now that we are about to begin
1718      // a new cycle.
1719      clear_expansion_cause();
1720
1721      // Clear the MetaspaceGC flag since a concurrent collection
1722      // is starting but also clear it after the collection.
1723      MetaspaceGC::set_should_concurrent_collect(false);
1724    }
1725    // Decide if we want to enable class unloading as part of the
1726    // ensuing concurrent GC cycle.
1727    update_should_unload_classes();
1728    _full_gc_requested = false;           // acks all outstanding full gc requests
1729    _full_gc_cause = GCCause::_no_gc;
1730    // Signal that we are about to start a collection
1731    gch->increment_total_full_collections();  // ... starting a collection cycle
1732    _collection_count_start = gch->total_full_collections();
1733  }
1734
1735  size_t prev_used = _cmsGen->used();
1736
1737  // The change of the collection state is normally done at this level;
1738  // the exceptions are phases that are executed while the world is
1739  // stopped.  For those phases the change of state is done while the
1740  // world is stopped.  For baton passing purposes this allows the
1741  // background collector to finish the phase and change state atomically.
1742  // The foreground collector cannot wait on a phase that is done
1743  // while the world is stopped because the foreground collector already
1744  // has the world stopped and would deadlock.
1745  while (_collectorState != Idling) {
1746    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1747                         p2i(Thread::current()), _collectorState);
1748    // The foreground collector
1749    //   holds the Heap_lock throughout its collection.
1750    //   holds the CMS token (but not the lock)
1751    //     except while it is waiting for the background collector to yield.
1752    //
1753    // The foreground collector should be blocked (not for long)
1754    //   if the background collector is about to start a phase
1755    //   executed with world stopped.  If the background
1756    //   collector has already started such a phase, the
1757    //   foreground collector is blocked waiting for the
1758    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1759    //   are executed in the VM thread.
1760    //
1761    // The locking order is
1762    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1763    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1764    //   CMS token  (claimed in
1765    //                stop_world_and_do() -->
1766    //                  safepoint_synchronize() -->
1767    //                    CMSThread::synchronize())
1768
1769    {
1770      // Check if the FG collector wants us to yield.
1771      CMSTokenSync x(true); // is cms thread
1772      if (waitForForegroundGC()) {
1773        // We yielded to a foreground GC, nothing more to be
1774        // done this round.
1775        assert(_foregroundGCShouldWait == false, "We set it to false in "
1776               "waitForForegroundGC()");
1777        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1778                             p2i(Thread::current()), _collectorState);
1779        return;
1780      } else {
1781        // The background collector can run but check to see if the
1782        // foreground collector has done a collection while the
1783        // background collector was waiting to get the CGC_lock
1784        // above.  If yes, break so that _foregroundGCShouldWait
1785        // is cleared before returning.
1786        if (_collectorState == Idling) {
1787          break;
1788        }
1789      }
1790    }
1791
1792    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1793      "should be waiting");
1794
1795    switch (_collectorState) {
1796      case InitialMarking:
1797        {
1798          ReleaseForegroundGC x(this);
1799          stats().record_cms_begin();
1800          VM_CMS_Initial_Mark initial_mark_op(this);
1801          VMThread::execute(&initial_mark_op);
1802        }
1803        // The collector state may be any legal state at this point
1804        // since the background collector may have yielded to the
1805        // foreground collector.
1806        break;
1807      case Marking:
1808        // initial marking in checkpointRootsInitialWork has been completed
1809        if (markFromRoots()) { // we were successful
1810          assert(_collectorState == Precleaning, "Collector state should "
1811            "have changed");
1812        } else {
1813          assert(_foregroundGCIsActive, "Internal state inconsistency");
1814        }
1815        break;
1816      case Precleaning:
1817        // marking from roots in markFromRoots has been completed
1818        preclean();
1819        assert(_collectorState == AbortablePreclean ||
1820               _collectorState == FinalMarking,
1821               "Collector state should have changed");
1822        break;
1823      case AbortablePreclean:
1824        abortable_preclean();
1825        assert(_collectorState == FinalMarking, "Collector state should "
1826          "have changed");
1827        break;
1828      case FinalMarking:
1829        {
1830          ReleaseForegroundGC x(this);
1831
1832          VM_CMS_Final_Remark final_remark_op(this);
1833          VMThread::execute(&final_remark_op);
1834        }
1835        assert(_foregroundGCShouldWait, "block post-condition");
1836        break;
1837      case Sweeping:
1838        // final marking in checkpointRootsFinal has been completed
1839        sweep();
1840        assert(_collectorState == Resizing, "Collector state change "
1841          "to Resizing must be done under the free_list_lock");
1842
1843      case Resizing: {
1844        // Sweeping has been completed...
1845        // At this point the background collection has completed.
1846        // Don't move the call to compute_new_size() down
1847        // into code that might be executed if the background
1848        // collection was preempted.
1849        {
1850          ReleaseForegroundGC x(this);   // unblock FG collection
1851          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1852          CMSTokenSync        z(true);   // not strictly needed.
1853          if (_collectorState == Resizing) {
1854            compute_new_size();
1855            save_heap_summary();
1856            _collectorState = Resetting;
1857          } else {
1858            assert(_collectorState == Idling, "The state should only change"
1859                   " because the foreground collector has finished the collection");
1860          }
1861        }
1862        break;
1863      }
1864      case Resetting:
1865        // CMS heap resizing has been completed
1866        reset_concurrent();
1867        assert(_collectorState == Idling, "Collector state should "
1868          "have changed");
1869
1870        MetaspaceGC::set_should_concurrent_collect(false);
1871
1872        stats().record_cms_end();
1873        // Don't move the concurrent_phases_end() and compute_new_size()
1874        // calls to here because a preempted background collection
1875        // has it's state set to "Resetting".
1876        break;
1877      case Idling:
1878      default:
1879        ShouldNotReachHere();
1880        break;
1881    }
1882    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1883                         p2i(Thread::current()), _collectorState);
1884    assert(_foregroundGCShouldWait, "block post-condition");
1885  }
1886
1887  // Should this be in gc_epilogue?
1888  collector_policy()->counters()->update_counters();
1889
1890  {
1891    // Clear _foregroundGCShouldWait and, in the event that the
1892    // foreground collector is waiting, notify it, before
1893    // returning.
1894    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1895    _foregroundGCShouldWait = false;
1896    if (_foregroundGCIsActive) {
1897      CGC_lock->notify();
1898    }
1899    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1900           "Possible deadlock");
1901  }
1902  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1903                       p2i(Thread::current()), _collectorState);
1904  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1905                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1906}
1907
1908void CMSCollector::register_gc_start(GCCause::Cause cause) {
1909  _cms_start_registered = true;
1910  _gc_timer_cm->register_gc_start();
1911  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1912}
1913
1914void CMSCollector::register_gc_end() {
1915  if (_cms_start_registered) {
1916    report_heap_summary(GCWhen::AfterGC);
1917
1918    _gc_timer_cm->register_gc_end();
1919    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1920    _cms_start_registered = false;
1921  }
1922}
1923
1924void CMSCollector::save_heap_summary() {
1925  GenCollectedHeap* gch = GenCollectedHeap::heap();
1926  _last_heap_summary = gch->create_heap_summary();
1927  _last_metaspace_summary = gch->create_metaspace_summary();
1928}
1929
1930void CMSCollector::report_heap_summary(GCWhen::Type when) {
1931  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1932  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1933}
1934
1935bool CMSCollector::waitForForegroundGC() {
1936  bool res = false;
1937  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1938         "CMS thread should have CMS token");
1939  // Block the foreground collector until the
1940  // background collectors decides whether to
1941  // yield.
1942  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1943  _foregroundGCShouldWait = true;
1944  if (_foregroundGCIsActive) {
1945    // The background collector yields to the
1946    // foreground collector and returns a value
1947    // indicating that it has yielded.  The foreground
1948    // collector can proceed.
1949    res = true;
1950    _foregroundGCShouldWait = false;
1951    ConcurrentMarkSweepThread::clear_CMS_flag(
1952      ConcurrentMarkSweepThread::CMS_cms_has_token);
1953    ConcurrentMarkSweepThread::set_CMS_flag(
1954      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1955    // Get a possibly blocked foreground thread going
1956    CGC_lock->notify();
1957    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1958                         p2i(Thread::current()), _collectorState);
1959    while (_foregroundGCIsActive) {
1960      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1961    }
1962    ConcurrentMarkSweepThread::set_CMS_flag(
1963      ConcurrentMarkSweepThread::CMS_cms_has_token);
1964    ConcurrentMarkSweepThread::clear_CMS_flag(
1965      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1966  }
1967  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1968                       p2i(Thread::current()), _collectorState);
1969  return res;
1970}
1971
1972// Because of the need to lock the free lists and other structures in
1973// the collector, common to all the generations that the collector is
1974// collecting, we need the gc_prologues of individual CMS generations
1975// delegate to their collector. It may have been simpler had the
1976// current infrastructure allowed one to call a prologue on a
1977// collector. In the absence of that we have the generation's
1978// prologue delegate to the collector, which delegates back
1979// some "local" work to a worker method in the individual generations
1980// that it's responsible for collecting, while itself doing any
1981// work common to all generations it's responsible for. A similar
1982// comment applies to the  gc_epilogue()'s.
1983// The role of the variable _between_prologue_and_epilogue is to
1984// enforce the invocation protocol.
1985void CMSCollector::gc_prologue(bool full) {
1986  // Call gc_prologue_work() for the CMSGen
1987  // we are responsible for.
1988
1989  // The following locking discipline assumes that we are only called
1990  // when the world is stopped.
1991  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1992
1993  // The CMSCollector prologue must call the gc_prologues for the
1994  // "generations" that it's responsible
1995  // for.
1996
1997  assert(   Thread::current()->is_VM_thread()
1998         || (   CMSScavengeBeforeRemark
1999             && Thread::current()->is_ConcurrentGC_thread()),
2000         "Incorrect thread type for prologue execution");
2001
2002  if (_between_prologue_and_epilogue) {
2003    // We have already been invoked; this is a gc_prologue delegation
2004    // from yet another CMS generation that we are responsible for, just
2005    // ignore it since all relevant work has already been done.
2006    return;
2007  }
2008
2009  // set a bit saying prologue has been called; cleared in epilogue
2010  _between_prologue_and_epilogue = true;
2011  // Claim locks for common data structures, then call gc_prologue_work()
2012  // for each CMSGen.
2013
2014  getFreelistLocks();   // gets free list locks on constituent spaces
2015  bitMapLock()->lock_without_safepoint_check();
2016
2017  // Should call gc_prologue_work() for all cms gens we are responsible for
2018  bool duringMarking =    _collectorState >= Marking
2019                         && _collectorState < Sweeping;
2020
2021  // The young collections clear the modified oops state, which tells if
2022  // there are any modified oops in the class. The remark phase also needs
2023  // that information. Tell the young collection to save the union of all
2024  // modified klasses.
2025  if (duringMarking) {
2026    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2027  }
2028
2029  bool registerClosure = duringMarking;
2030
2031  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2032
2033  if (!full) {
2034    stats().record_gc0_begin();
2035  }
2036}
2037
2038void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2039
2040  _capacity_at_prologue = capacity();
2041  _used_at_prologue = used();
2042
2043  // We enable promotion tracking so that card-scanning can recognize
2044  // which objects have been promoted during this GC and skip them.
2045  for (uint i = 0; i < ParallelGCThreads; i++) {
2046    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2047  }
2048
2049  // Delegate to CMScollector which knows how to coordinate between
2050  // this and any other CMS generations that it is responsible for
2051  // collecting.
2052  collector()->gc_prologue(full);
2053}
2054
2055// This is a "private" interface for use by this generation's CMSCollector.
2056// Not to be called directly by any other entity (for instance,
2057// GenCollectedHeap, which calls the "public" gc_prologue method above).
2058void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2059  bool registerClosure, ModUnionClosure* modUnionClosure) {
2060  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2061  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2062    "Should be NULL");
2063  if (registerClosure) {
2064    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2065  }
2066  cmsSpace()->gc_prologue();
2067  // Clear stat counters
2068  NOT_PRODUCT(
2069    assert(_numObjectsPromoted == 0, "check");
2070    assert(_numWordsPromoted   == 0, "check");
2071    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2072                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2073    _numObjectsAllocated = 0;
2074    _numWordsAllocated   = 0;
2075  )
2076}
2077
2078void CMSCollector::gc_epilogue(bool full) {
2079  // The following locking discipline assumes that we are only called
2080  // when the world is stopped.
2081  assert(SafepointSynchronize::is_at_safepoint(),
2082         "world is stopped assumption");
2083
2084  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2085  // if linear allocation blocks need to be appropriately marked to allow the
2086  // the blocks to be parsable. We also check here whether we need to nudge the
2087  // CMS collector thread to start a new cycle (if it's not already active).
2088  assert(   Thread::current()->is_VM_thread()
2089         || (   CMSScavengeBeforeRemark
2090             && Thread::current()->is_ConcurrentGC_thread()),
2091         "Incorrect thread type for epilogue execution");
2092
2093  if (!_between_prologue_and_epilogue) {
2094    // We have already been invoked; this is a gc_epilogue delegation
2095    // from yet another CMS generation that we are responsible for, just
2096    // ignore it since all relevant work has already been done.
2097    return;
2098  }
2099  assert(haveFreelistLocks(), "must have freelist locks");
2100  assert_lock_strong(bitMapLock());
2101
2102  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2103
2104  _cmsGen->gc_epilogue_work(full);
2105
2106  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2107    // in case sampling was not already enabled, enable it
2108    _start_sampling = true;
2109  }
2110  // reset _eden_chunk_array so sampling starts afresh
2111  _eden_chunk_index = 0;
2112
2113  size_t cms_used   = _cmsGen->cmsSpace()->used();
2114
2115  // update performance counters - this uses a special version of
2116  // update_counters() that allows the utilization to be passed as a
2117  // parameter, avoiding multiple calls to used().
2118  //
2119  _cmsGen->update_counters(cms_used);
2120
2121  bitMapLock()->unlock();
2122  releaseFreelistLocks();
2123
2124  if (!CleanChunkPoolAsync) {
2125    Chunk::clean_chunk_pool();
2126  }
2127
2128  set_did_compact(false);
2129  _between_prologue_and_epilogue = false;  // ready for next cycle
2130}
2131
2132void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2133  collector()->gc_epilogue(full);
2134
2135  // When using ParNew, promotion tracking should have already been
2136  // disabled. However, the prologue (which enables promotion
2137  // tracking) and epilogue are called irrespective of the type of
2138  // GC. So they will also be called before and after Full GCs, during
2139  // which promotion tracking will not be explicitly disabled. So,
2140  // it's safer to also disable it here too (to be symmetric with
2141  // enabling it in the prologue).
2142  for (uint i = 0; i < ParallelGCThreads; i++) {
2143    _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2144  }
2145}
2146
2147void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2148  assert(!incremental_collection_failed(), "Should have been cleared");
2149  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2150  cmsSpace()->gc_epilogue();
2151    // Print stat counters
2152  NOT_PRODUCT(
2153    assert(_numObjectsAllocated == 0, "check");
2154    assert(_numWordsAllocated == 0, "check");
2155    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2156                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2157    _numObjectsPromoted = 0;
2158    _numWordsPromoted   = 0;
2159  )
2160
2161  // Call down the chain in contiguous_available needs the freelistLock
2162  // so print this out before releasing the freeListLock.
2163  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2164}
2165
2166#ifndef PRODUCT
2167bool CMSCollector::have_cms_token() {
2168  Thread* thr = Thread::current();
2169  if (thr->is_VM_thread()) {
2170    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2171  } else if (thr->is_ConcurrentGC_thread()) {
2172    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2173  } else if (thr->is_GC_task_thread()) {
2174    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2175           ParGCRareEvent_lock->owned_by_self();
2176  }
2177  return false;
2178}
2179
2180// Check reachability of the given heap address in CMS generation,
2181// treating all other generations as roots.
2182bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2183  // We could "guarantee" below, rather than assert, but I'll
2184  // leave these as "asserts" so that an adventurous debugger
2185  // could try this in the product build provided some subset of
2186  // the conditions were met, provided they were interested in the
2187  // results and knew that the computation below wouldn't interfere
2188  // with other concurrent computations mutating the structures
2189  // being read or written.
2190  assert(SafepointSynchronize::is_at_safepoint(),
2191         "Else mutations in object graph will make answer suspect");
2192  assert(have_cms_token(), "Should hold cms token");
2193  assert(haveFreelistLocks(), "must hold free list locks");
2194  assert_lock_strong(bitMapLock());
2195
2196  // Clear the marking bit map array before starting, but, just
2197  // for kicks, first report if the given address is already marked
2198  tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2199                _markBitMap.isMarked(addr) ? "" : " not");
2200
2201  if (verify_after_remark()) {
2202    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2203    bool result = verification_mark_bm()->isMarked(addr);
2204    tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2205                  result ? "IS" : "is NOT");
2206    return result;
2207  } else {
2208    tty->print_cr("Could not compute result");
2209    return false;
2210  }
2211}
2212#endif
2213
2214void
2215CMSCollector::print_on_error(outputStream* st) {
2216  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2217  if (collector != NULL) {
2218    CMSBitMap* bitmap = &collector->_markBitMap;
2219    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2220    bitmap->print_on_error(st, " Bits: ");
2221
2222    st->cr();
2223
2224    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2225    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2226    mut_bitmap->print_on_error(st, " Bits: ");
2227  }
2228}
2229
2230////////////////////////////////////////////////////////
2231// CMS Verification Support
2232////////////////////////////////////////////////////////
2233// Following the remark phase, the following invariant
2234// should hold -- each object in the CMS heap which is
2235// marked in markBitMap() should be marked in the verification_mark_bm().
2236
2237class VerifyMarkedClosure: public BitMapClosure {
2238  CMSBitMap* _marks;
2239  bool       _failed;
2240
2241 public:
2242  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2243
2244  bool do_bit(size_t offset) {
2245    HeapWord* addr = _marks->offsetToHeapWord(offset);
2246    if (!_marks->isMarked(addr)) {
2247      Log(gc, verify) log;
2248      ResourceMark rm;
2249      oop(addr)->print_on(log.error_stream());
2250      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2251      _failed = true;
2252    }
2253    return true;
2254  }
2255
2256  bool failed() { return _failed; }
2257};
2258
2259bool CMSCollector::verify_after_remark() {
2260  GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2261  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2262  static bool init = false;
2263
2264  assert(SafepointSynchronize::is_at_safepoint(),
2265         "Else mutations in object graph will make answer suspect");
2266  assert(have_cms_token(),
2267         "Else there may be mutual interference in use of "
2268         " verification data structures");
2269  assert(_collectorState > Marking && _collectorState <= Sweeping,
2270         "Else marking info checked here may be obsolete");
2271  assert(haveFreelistLocks(), "must hold free list locks");
2272  assert_lock_strong(bitMapLock());
2273
2274
2275  // Allocate marking bit map if not already allocated
2276  if (!init) { // first time
2277    if (!verification_mark_bm()->allocate(_span)) {
2278      return false;
2279    }
2280    init = true;
2281  }
2282
2283  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2284
2285  // Turn off refs discovery -- so we will be tracing through refs.
2286  // This is as intended, because by this time
2287  // GC must already have cleared any refs that need to be cleared,
2288  // and traced those that need to be marked; moreover,
2289  // the marking done here is not going to interfere in any
2290  // way with the marking information used by GC.
2291  NoRefDiscovery no_discovery(ref_processor());
2292
2293#if defined(COMPILER2) || INCLUDE_JVMCI
2294  DerivedPointerTableDeactivate dpt_deact;
2295#endif
2296
2297  // Clear any marks from a previous round
2298  verification_mark_bm()->clear_all();
2299  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2300  verify_work_stacks_empty();
2301
2302  GenCollectedHeap* gch = GenCollectedHeap::heap();
2303  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2304  // Update the saved marks which may affect the root scans.
2305  gch->save_marks();
2306
2307  if (CMSRemarkVerifyVariant == 1) {
2308    // In this first variant of verification, we complete
2309    // all marking, then check if the new marks-vector is
2310    // a subset of the CMS marks-vector.
2311    verify_after_remark_work_1();
2312  } else {
2313    guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2314    // In this second variant of verification, we flag an error
2315    // (i.e. an object reachable in the new marks-vector not reachable
2316    // in the CMS marks-vector) immediately, also indicating the
2317    // identify of an object (A) that references the unmarked object (B) --
2318    // presumably, a mutation to A failed to be picked up by preclean/remark?
2319    verify_after_remark_work_2();
2320  }
2321
2322  return true;
2323}
2324
2325void CMSCollector::verify_after_remark_work_1() {
2326  ResourceMark rm;
2327  HandleMark  hm;
2328  GenCollectedHeap* gch = GenCollectedHeap::heap();
2329
2330  // Get a clear set of claim bits for the roots processing to work with.
2331  ClassLoaderDataGraph::clear_claimed_marks();
2332
2333  // Mark from roots one level into CMS
2334  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2335  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2336
2337  {
2338    StrongRootsScope srs(1);
2339
2340    gch->cms_process_roots(&srs,
2341                           true,   // young gen as roots
2342                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2343                           should_unload_classes(),
2344                           &notOlder,
2345                           NULL);
2346  }
2347
2348  // Now mark from the roots
2349  MarkFromRootsClosure markFromRootsClosure(this, _span,
2350    verification_mark_bm(), verification_mark_stack(),
2351    false /* don't yield */, true /* verifying */);
2352  assert(_restart_addr == NULL, "Expected pre-condition");
2353  verification_mark_bm()->iterate(&markFromRootsClosure);
2354  while (_restart_addr != NULL) {
2355    // Deal with stack overflow: by restarting at the indicated
2356    // address.
2357    HeapWord* ra = _restart_addr;
2358    markFromRootsClosure.reset(ra);
2359    _restart_addr = NULL;
2360    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2361  }
2362  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2363  verify_work_stacks_empty();
2364
2365  // Marking completed -- now verify that each bit marked in
2366  // verification_mark_bm() is also marked in markBitMap(); flag all
2367  // errors by printing corresponding objects.
2368  VerifyMarkedClosure vcl(markBitMap());
2369  verification_mark_bm()->iterate(&vcl);
2370  if (vcl.failed()) {
2371    Log(gc, verify) log;
2372    log.error("Failed marking verification after remark");
2373    ResourceMark rm;
2374    gch->print_on(log.error_stream());
2375    fatal("CMS: failed marking verification after remark");
2376  }
2377}
2378
2379class VerifyKlassOopsKlassClosure : public KlassClosure {
2380  class VerifyKlassOopsClosure : public OopClosure {
2381    CMSBitMap* _bitmap;
2382   public:
2383    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2384    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2385    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2386  } _oop_closure;
2387 public:
2388  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2389  void do_klass(Klass* k) {
2390    k->oops_do(&_oop_closure);
2391  }
2392};
2393
2394void CMSCollector::verify_after_remark_work_2() {
2395  ResourceMark rm;
2396  HandleMark  hm;
2397  GenCollectedHeap* gch = GenCollectedHeap::heap();
2398
2399  // Get a clear set of claim bits for the roots processing to work with.
2400  ClassLoaderDataGraph::clear_claimed_marks();
2401
2402  // Mark from roots one level into CMS
2403  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2404                                     markBitMap());
2405  CLDToOopClosure cld_closure(&notOlder, true);
2406
2407  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2408
2409  {
2410    StrongRootsScope srs(1);
2411
2412    gch->cms_process_roots(&srs,
2413                           true,   // young gen as roots
2414                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2415                           should_unload_classes(),
2416                           &notOlder,
2417                           &cld_closure);
2418  }
2419
2420  // Now mark from the roots
2421  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2422    verification_mark_bm(), markBitMap(), verification_mark_stack());
2423  assert(_restart_addr == NULL, "Expected pre-condition");
2424  verification_mark_bm()->iterate(&markFromRootsClosure);
2425  while (_restart_addr != NULL) {
2426    // Deal with stack overflow: by restarting at the indicated
2427    // address.
2428    HeapWord* ra = _restart_addr;
2429    markFromRootsClosure.reset(ra);
2430    _restart_addr = NULL;
2431    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2432  }
2433  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2434  verify_work_stacks_empty();
2435
2436  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2437  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2438
2439  // Marking completed -- now verify that each bit marked in
2440  // verification_mark_bm() is also marked in markBitMap(); flag all
2441  // errors by printing corresponding objects.
2442  VerifyMarkedClosure vcl(markBitMap());
2443  verification_mark_bm()->iterate(&vcl);
2444  assert(!vcl.failed(), "Else verification above should not have succeeded");
2445}
2446
2447void ConcurrentMarkSweepGeneration::save_marks() {
2448  // delegate to CMS space
2449  cmsSpace()->save_marks();
2450}
2451
2452bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2453  return cmsSpace()->no_allocs_since_save_marks();
2454}
2455
2456#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2457                                                                \
2458void ConcurrentMarkSweepGeneration::                            \
2459oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2460  cl->set_generation(this);                                     \
2461  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2462  cl->reset_generation();                                       \
2463  save_marks();                                                 \
2464}
2465
2466ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2467
2468void
2469ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2470  if (freelistLock()->owned_by_self()) {
2471    Generation::oop_iterate(cl);
2472  } else {
2473    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2474    Generation::oop_iterate(cl);
2475  }
2476}
2477
2478void
2479ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2480  if (freelistLock()->owned_by_self()) {
2481    Generation::object_iterate(cl);
2482  } else {
2483    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2484    Generation::object_iterate(cl);
2485  }
2486}
2487
2488void
2489ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2490  if (freelistLock()->owned_by_self()) {
2491    Generation::safe_object_iterate(cl);
2492  } else {
2493    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2494    Generation::safe_object_iterate(cl);
2495  }
2496}
2497
2498void
2499ConcurrentMarkSweepGeneration::post_compact() {
2500}
2501
2502void
2503ConcurrentMarkSweepGeneration::prepare_for_verify() {
2504  // Fix the linear allocation blocks to look like free blocks.
2505
2506  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2507  // are not called when the heap is verified during universe initialization and
2508  // at vm shutdown.
2509  if (freelistLock()->owned_by_self()) {
2510    cmsSpace()->prepare_for_verify();
2511  } else {
2512    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2513    cmsSpace()->prepare_for_verify();
2514  }
2515}
2516
2517void
2518ConcurrentMarkSweepGeneration::verify() {
2519  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2520  // are not called when the heap is verified during universe initialization and
2521  // at vm shutdown.
2522  if (freelistLock()->owned_by_self()) {
2523    cmsSpace()->verify();
2524  } else {
2525    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2526    cmsSpace()->verify();
2527  }
2528}
2529
2530void CMSCollector::verify() {
2531  _cmsGen->verify();
2532}
2533
2534#ifndef PRODUCT
2535bool CMSCollector::overflow_list_is_empty() const {
2536  assert(_num_par_pushes >= 0, "Inconsistency");
2537  if (_overflow_list == NULL) {
2538    assert(_num_par_pushes == 0, "Inconsistency");
2539  }
2540  return _overflow_list == NULL;
2541}
2542
2543// The methods verify_work_stacks_empty() and verify_overflow_empty()
2544// merely consolidate assertion checks that appear to occur together frequently.
2545void CMSCollector::verify_work_stacks_empty() const {
2546  assert(_markStack.isEmpty(), "Marking stack should be empty");
2547  assert(overflow_list_is_empty(), "Overflow list should be empty");
2548}
2549
2550void CMSCollector::verify_overflow_empty() const {
2551  assert(overflow_list_is_empty(), "Overflow list should be empty");
2552  assert(no_preserved_marks(), "No preserved marks");
2553}
2554#endif // PRODUCT
2555
2556// Decide if we want to enable class unloading as part of the
2557// ensuing concurrent GC cycle. We will collect and
2558// unload classes if it's the case that:
2559//  (a) class unloading is enabled at the command line, and
2560//  (b) old gen is getting really full
2561// NOTE: Provided there is no change in the state of the heap between
2562// calls to this method, it should have idempotent results. Moreover,
2563// its results should be monotonically increasing (i.e. going from 0 to 1,
2564// but not 1 to 0) between successive calls between which the heap was
2565// not collected. For the implementation below, it must thus rely on
2566// the property that concurrent_cycles_since_last_unload()
2567// will not decrease unless a collection cycle happened and that
2568// _cmsGen->is_too_full() are
2569// themselves also monotonic in that sense. See check_monotonicity()
2570// below.
2571void CMSCollector::update_should_unload_classes() {
2572  _should_unload_classes = false;
2573  if (CMSClassUnloadingEnabled) {
2574    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2575                              CMSClassUnloadingMaxInterval)
2576                           || _cmsGen->is_too_full();
2577  }
2578}
2579
2580bool ConcurrentMarkSweepGeneration::is_too_full() const {
2581  bool res = should_concurrent_collect();
2582  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2583  return res;
2584}
2585
2586void CMSCollector::setup_cms_unloading_and_verification_state() {
2587  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2588                             || VerifyBeforeExit;
2589  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2590
2591  // We set the proper root for this CMS cycle here.
2592  if (should_unload_classes()) {   // Should unload classes this cycle
2593    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2594    set_verifying(should_verify);    // Set verification state for this cycle
2595    return;                            // Nothing else needs to be done at this time
2596  }
2597
2598  // Not unloading classes this cycle
2599  assert(!should_unload_classes(), "Inconsistency!");
2600
2601  // If we are not unloading classes then add SO_AllCodeCache to root
2602  // scanning options.
2603  add_root_scanning_option(rso);
2604
2605  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2606    set_verifying(true);
2607  } else if (verifying() && !should_verify) {
2608    // We were verifying, but some verification flags got disabled.
2609    set_verifying(false);
2610    // Exclude symbols, strings and code cache elements from root scanning to
2611    // reduce IM and RM pauses.
2612    remove_root_scanning_option(rso);
2613  }
2614}
2615
2616
2617#ifndef PRODUCT
2618HeapWord* CMSCollector::block_start(const void* p) const {
2619  const HeapWord* addr = (HeapWord*)p;
2620  if (_span.contains(p)) {
2621    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2622      return _cmsGen->cmsSpace()->block_start(p);
2623    }
2624  }
2625  return NULL;
2626}
2627#endif
2628
2629HeapWord*
2630ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2631                                                   bool   tlab,
2632                                                   bool   parallel) {
2633  CMSSynchronousYieldRequest yr;
2634  assert(!tlab, "Can't deal with TLAB allocation");
2635  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2636  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2637  if (GCExpandToAllocateDelayMillis > 0) {
2638    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2639  }
2640  return have_lock_and_allocate(word_size, tlab);
2641}
2642
2643void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2644    size_t bytes,
2645    size_t expand_bytes,
2646    CMSExpansionCause::Cause cause)
2647{
2648
2649  bool success = expand(bytes, expand_bytes);
2650
2651  // remember why we expanded; this information is used
2652  // by shouldConcurrentCollect() when making decisions on whether to start
2653  // a new CMS cycle.
2654  if (success) {
2655    set_expansion_cause(cause);
2656    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2657  }
2658}
2659
2660HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2661  HeapWord* res = NULL;
2662  MutexLocker x(ParGCRareEvent_lock);
2663  while (true) {
2664    // Expansion by some other thread might make alloc OK now:
2665    res = ps->lab.alloc(word_sz);
2666    if (res != NULL) return res;
2667    // If there's not enough expansion space available, give up.
2668    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2669      return NULL;
2670    }
2671    // Otherwise, we try expansion.
2672    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2673    // Now go around the loop and try alloc again;
2674    // A competing par_promote might beat us to the expansion space,
2675    // so we may go around the loop again if promotion fails again.
2676    if (GCExpandToAllocateDelayMillis > 0) {
2677      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2678    }
2679  }
2680}
2681
2682
2683bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2684  PromotionInfo* promo) {
2685  MutexLocker x(ParGCRareEvent_lock);
2686  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2687  while (true) {
2688    // Expansion by some other thread might make alloc OK now:
2689    if (promo->ensure_spooling_space()) {
2690      assert(promo->has_spooling_space(),
2691             "Post-condition of successful ensure_spooling_space()");
2692      return true;
2693    }
2694    // If there's not enough expansion space available, give up.
2695    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2696      return false;
2697    }
2698    // Otherwise, we try expansion.
2699    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2700    // Now go around the loop and try alloc again;
2701    // A competing allocation might beat us to the expansion space,
2702    // so we may go around the loop again if allocation fails again.
2703    if (GCExpandToAllocateDelayMillis > 0) {
2704      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2705    }
2706  }
2707}
2708
2709void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2710  // Only shrink if a compaction was done so that all the free space
2711  // in the generation is in a contiguous block at the end.
2712  if (did_compact()) {
2713    CardGeneration::shrink(bytes);
2714  }
2715}
2716
2717void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2718  assert_locked_or_safepoint(Heap_lock);
2719}
2720
2721void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2722  assert_locked_or_safepoint(Heap_lock);
2723  assert_lock_strong(freelistLock());
2724  log_trace(gc)("Shrinking of CMS not yet implemented");
2725  return;
2726}
2727
2728
2729// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2730// phases.
2731class CMSPhaseAccounting: public StackObj {
2732 public:
2733  CMSPhaseAccounting(CMSCollector *collector,
2734                     const char *title);
2735  ~CMSPhaseAccounting();
2736
2737 private:
2738  CMSCollector *_collector;
2739  const char *_title;
2740  GCTraceConcTime(Info, gc) _trace_time;
2741
2742 public:
2743  // Not MT-safe; so do not pass around these StackObj's
2744  // where they may be accessed by other threads.
2745  double wallclock_millis() {
2746    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2747  }
2748};
2749
2750CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2751                                       const char *title) :
2752  _collector(collector), _title(title), _trace_time(title) {
2753
2754  _collector->resetYields();
2755  _collector->resetTimer();
2756  _collector->startTimer();
2757  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2758}
2759
2760CMSPhaseAccounting::~CMSPhaseAccounting() {
2761  _collector->gc_timer_cm()->register_gc_concurrent_end();
2762  _collector->stopTimer();
2763  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2764  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2765}
2766
2767// CMS work
2768
2769// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2770class CMSParMarkTask : public AbstractGangTask {
2771 protected:
2772  CMSCollector*     _collector;
2773  uint              _n_workers;
2774  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2775      AbstractGangTask(name),
2776      _collector(collector),
2777      _n_workers(n_workers) {}
2778  // Work method in support of parallel rescan ... of young gen spaces
2779  void do_young_space_rescan(OopsInGenClosure* cl,
2780                             ContiguousSpace* space,
2781                             HeapWord** chunk_array, size_t chunk_top);
2782  void work_on_young_gen_roots(OopsInGenClosure* cl);
2783};
2784
2785// Parallel initial mark task
2786class CMSParInitialMarkTask: public CMSParMarkTask {
2787  StrongRootsScope* _strong_roots_scope;
2788 public:
2789  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2790      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2791      _strong_roots_scope(strong_roots_scope) {}
2792  void work(uint worker_id);
2793};
2794
2795// Checkpoint the roots into this generation from outside
2796// this generation. [Note this initial checkpoint need only
2797// be approximate -- we'll do a catch up phase subsequently.]
2798void CMSCollector::checkpointRootsInitial() {
2799  assert(_collectorState == InitialMarking, "Wrong collector state");
2800  check_correct_thread_executing();
2801  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2802
2803  save_heap_summary();
2804  report_heap_summary(GCWhen::BeforeGC);
2805
2806  ReferenceProcessor* rp = ref_processor();
2807  assert(_restart_addr == NULL, "Control point invariant");
2808  {
2809    // acquire locks for subsequent manipulations
2810    MutexLockerEx x(bitMapLock(),
2811                    Mutex::_no_safepoint_check_flag);
2812    checkpointRootsInitialWork();
2813    // enable ("weak") refs discovery
2814    rp->enable_discovery();
2815    _collectorState = Marking;
2816  }
2817}
2818
2819void CMSCollector::checkpointRootsInitialWork() {
2820  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2821  assert(_collectorState == InitialMarking, "just checking");
2822
2823  // Already have locks.
2824  assert_lock_strong(bitMapLock());
2825  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2826
2827  // Setup the verification and class unloading state for this
2828  // CMS collection cycle.
2829  setup_cms_unloading_and_verification_state();
2830
2831  GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2832
2833  // Reset all the PLAB chunk arrays if necessary.
2834  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2835    reset_survivor_plab_arrays();
2836  }
2837
2838  ResourceMark rm;
2839  HandleMark  hm;
2840
2841  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2842  GenCollectedHeap* gch = GenCollectedHeap::heap();
2843
2844  verify_work_stacks_empty();
2845  verify_overflow_empty();
2846
2847  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2848  // Update the saved marks which may affect the root scans.
2849  gch->save_marks();
2850
2851  // weak reference processing has not started yet.
2852  ref_processor()->set_enqueuing_is_done(false);
2853
2854  // Need to remember all newly created CLDs,
2855  // so that we can guarantee that the remark finds them.
2856  ClassLoaderDataGraph::remember_new_clds(true);
2857
2858  // Whenever a CLD is found, it will be claimed before proceeding to mark
2859  // the klasses. The claimed marks need to be cleared before marking starts.
2860  ClassLoaderDataGraph::clear_claimed_marks();
2861
2862  print_eden_and_survivor_chunk_arrays();
2863
2864  {
2865#if defined(COMPILER2) || INCLUDE_JVMCI
2866    DerivedPointerTableDeactivate dpt_deact;
2867#endif
2868    if (CMSParallelInitialMarkEnabled) {
2869      // The parallel version.
2870      WorkGang* workers = gch->workers();
2871      assert(workers != NULL, "Need parallel worker threads.");
2872      uint n_workers = workers->active_workers();
2873
2874      StrongRootsScope srs(n_workers);
2875
2876      CMSParInitialMarkTask tsk(this, &srs, n_workers);
2877      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2878      // If the total workers is greater than 1, then multiple workers
2879      // may be used at some time and the initialization has been set
2880      // such that the single threaded path cannot be used.
2881      if (workers->total_workers() > 1) {
2882        workers->run_task(&tsk);
2883      } else {
2884        tsk.work(0);
2885      }
2886    } else {
2887      // The serial version.
2888      CLDToOopClosure cld_closure(&notOlder, true);
2889      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2890
2891      StrongRootsScope srs(1);
2892
2893      gch->cms_process_roots(&srs,
2894                             true,   // young gen as roots
2895                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
2896                             should_unload_classes(),
2897                             &notOlder,
2898                             &cld_closure);
2899    }
2900  }
2901
2902  // Clear mod-union table; it will be dirtied in the prologue of
2903  // CMS generation per each young generation collection.
2904
2905  assert(_modUnionTable.isAllClear(),
2906       "Was cleared in most recent final checkpoint phase"
2907       " or no bits are set in the gc_prologue before the start of the next "
2908       "subsequent marking phase.");
2909
2910  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2911
2912  // Save the end of the used_region of the constituent generations
2913  // to be used to limit the extent of sweep in each generation.
2914  save_sweep_limits();
2915  verify_overflow_empty();
2916}
2917
2918bool CMSCollector::markFromRoots() {
2919  // we might be tempted to assert that:
2920  // assert(!SafepointSynchronize::is_at_safepoint(),
2921  //        "inconsistent argument?");
2922  // However that wouldn't be right, because it's possible that
2923  // a safepoint is indeed in progress as a young generation
2924  // stop-the-world GC happens even as we mark in this generation.
2925  assert(_collectorState == Marking, "inconsistent state?");
2926  check_correct_thread_executing();
2927  verify_overflow_empty();
2928
2929  // Weak ref discovery note: We may be discovering weak
2930  // refs in this generation concurrent (but interleaved) with
2931  // weak ref discovery by the young generation collector.
2932
2933  CMSTokenSyncWithLocks ts(true, bitMapLock());
2934  GCTraceCPUTime tcpu;
2935  CMSPhaseAccounting pa(this, "Concurrent Mark");
2936  bool res = markFromRootsWork();
2937  if (res) {
2938    _collectorState = Precleaning;
2939  } else { // We failed and a foreground collection wants to take over
2940    assert(_foregroundGCIsActive, "internal state inconsistency");
2941    assert(_restart_addr == NULL,  "foreground will restart from scratch");
2942    log_debug(gc)("bailing out to foreground collection");
2943  }
2944  verify_overflow_empty();
2945  return res;
2946}
2947
2948bool CMSCollector::markFromRootsWork() {
2949  // iterate over marked bits in bit map, doing a full scan and mark
2950  // from these roots using the following algorithm:
2951  // . if oop is to the right of the current scan pointer,
2952  //   mark corresponding bit (we'll process it later)
2953  // . else (oop is to left of current scan pointer)
2954  //   push oop on marking stack
2955  // . drain the marking stack
2956
2957  // Note that when we do a marking step we need to hold the
2958  // bit map lock -- recall that direct allocation (by mutators)
2959  // and promotion (by the young generation collector) is also
2960  // marking the bit map. [the so-called allocate live policy.]
2961  // Because the implementation of bit map marking is not
2962  // robust wrt simultaneous marking of bits in the same word,
2963  // we need to make sure that there is no such interference
2964  // between concurrent such updates.
2965
2966  // already have locks
2967  assert_lock_strong(bitMapLock());
2968
2969  verify_work_stacks_empty();
2970  verify_overflow_empty();
2971  bool result = false;
2972  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2973    result = do_marking_mt();
2974  } else {
2975    result = do_marking_st();
2976  }
2977  return result;
2978}
2979
2980// Forward decl
2981class CMSConcMarkingTask;
2982
2983class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2984  CMSCollector*       _collector;
2985  CMSConcMarkingTask* _task;
2986 public:
2987  virtual void yield();
2988
2989  // "n_threads" is the number of threads to be terminated.
2990  // "queue_set" is a set of work queues of other threads.
2991  // "collector" is the CMS collector associated with this task terminator.
2992  // "yield" indicates whether we need the gang as a whole to yield.
2993  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
2994    ParallelTaskTerminator(n_threads, queue_set),
2995    _collector(collector) { }
2996
2997  void set_task(CMSConcMarkingTask* task) {
2998    _task = task;
2999  }
3000};
3001
3002class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3003  CMSConcMarkingTask* _task;
3004 public:
3005  bool should_exit_termination();
3006  void set_task(CMSConcMarkingTask* task) {
3007    _task = task;
3008  }
3009};
3010
3011// MT Concurrent Marking Task
3012class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3013  CMSCollector*             _collector;
3014  uint                      _n_workers;      // requested/desired # workers
3015  bool                      _result;
3016  CompactibleFreeListSpace* _cms_space;
3017  char                      _pad_front[64];   // padding to ...
3018  HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
3019  char                      _pad_back[64];
3020  HeapWord*                 _restart_addr;
3021
3022  //  Exposed here for yielding support
3023  Mutex* const _bit_map_lock;
3024
3025  // The per thread work queues, available here for stealing
3026  OopTaskQueueSet*  _task_queues;
3027
3028  // Termination (and yielding) support
3029  CMSConcMarkingTerminator _term;
3030  CMSConcMarkingTerminatorTerminator _term_term;
3031
3032 public:
3033  CMSConcMarkingTask(CMSCollector* collector,
3034                 CompactibleFreeListSpace* cms_space,
3035                 YieldingFlexibleWorkGang* workers,
3036                 OopTaskQueueSet* task_queues):
3037    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3038    _collector(collector),
3039    _cms_space(cms_space),
3040    _n_workers(0), _result(true),
3041    _task_queues(task_queues),
3042    _term(_n_workers, task_queues, _collector),
3043    _bit_map_lock(collector->bitMapLock())
3044  {
3045    _requested_size = _n_workers;
3046    _term.set_task(this);
3047    _term_term.set_task(this);
3048    _restart_addr = _global_finger = _cms_space->bottom();
3049  }
3050
3051
3052  OopTaskQueueSet* task_queues()  { return _task_queues; }
3053
3054  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3055
3056  HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3057
3058  CMSConcMarkingTerminator* terminator() { return &_term; }
3059
3060  virtual void set_for_termination(uint active_workers) {
3061    terminator()->reset_for_reuse(active_workers);
3062  }
3063
3064  void work(uint worker_id);
3065  bool should_yield() {
3066    return    ConcurrentMarkSweepThread::should_yield()
3067           && !_collector->foregroundGCIsActive();
3068  }
3069
3070  virtual void coordinator_yield();  // stuff done by coordinator
3071  bool result() { return _result; }
3072
3073  void reset(HeapWord* ra) {
3074    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3075    _restart_addr = _global_finger = ra;
3076    _term.reset_for_reuse();
3077  }
3078
3079  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3080                                           OopTaskQueue* work_q);
3081
3082 private:
3083  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3084  void do_work_steal(int i);
3085  void bump_global_finger(HeapWord* f);
3086};
3087
3088bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3089  assert(_task != NULL, "Error");
3090  return _task->yielding();
3091  // Note that we do not need the disjunct || _task->should_yield() above
3092  // because we want terminating threads to yield only if the task
3093  // is already in the midst of yielding, which happens only after at least one
3094  // thread has yielded.
3095}
3096
3097void CMSConcMarkingTerminator::yield() {
3098  if (_task->should_yield()) {
3099    _task->yield();
3100  } else {
3101    ParallelTaskTerminator::yield();
3102  }
3103}
3104
3105////////////////////////////////////////////////////////////////
3106// Concurrent Marking Algorithm Sketch
3107////////////////////////////////////////////////////////////////
3108// Until all tasks exhausted (both spaces):
3109// -- claim next available chunk
3110// -- bump global finger via CAS
3111// -- find first object that starts in this chunk
3112//    and start scanning bitmap from that position
3113// -- scan marked objects for oops
3114// -- CAS-mark target, and if successful:
3115//    . if target oop is above global finger (volatile read)
3116//      nothing to do
3117//    . if target oop is in chunk and above local finger
3118//        then nothing to do
3119//    . else push on work-queue
3120// -- Deal with possible overflow issues:
3121//    . local work-queue overflow causes stuff to be pushed on
3122//      global (common) overflow queue
3123//    . always first empty local work queue
3124//    . then get a batch of oops from global work queue if any
3125//    . then do work stealing
3126// -- When all tasks claimed (both spaces)
3127//    and local work queue empty,
3128//    then in a loop do:
3129//    . check global overflow stack; steal a batch of oops and trace
3130//    . try to steal from other threads oif GOS is empty
3131//    . if neither is available, offer termination
3132// -- Terminate and return result
3133//
3134void CMSConcMarkingTask::work(uint worker_id) {
3135  elapsedTimer _timer;
3136  ResourceMark rm;
3137  HandleMark hm;
3138
3139  DEBUG_ONLY(_collector->verify_overflow_empty();)
3140
3141  // Before we begin work, our work queue should be empty
3142  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3143  // Scan the bitmap covering _cms_space, tracing through grey objects.
3144  _timer.start();
3145  do_scan_and_mark(worker_id, _cms_space);
3146  _timer.stop();
3147  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3148
3149  // ... do work stealing
3150  _timer.reset();
3151  _timer.start();
3152  do_work_steal(worker_id);
3153  _timer.stop();
3154  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3155  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3156  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3157  // Note that under the current task protocol, the
3158  // following assertion is true even of the spaces
3159  // expanded since the completion of the concurrent
3160  // marking. XXX This will likely change under a strict
3161  // ABORT semantics.
3162  // After perm removal the comparison was changed to
3163  // greater than or equal to from strictly greater than.
3164  // Before perm removal the highest address sweep would
3165  // have been at the end of perm gen but now is at the
3166  // end of the tenured gen.
3167  assert(_global_finger >=  _cms_space->end(),
3168         "All tasks have been completed");
3169  DEBUG_ONLY(_collector->verify_overflow_empty();)
3170}
3171
3172void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3173  HeapWord* read = _global_finger;
3174  HeapWord* cur  = read;
3175  while (f > read) {
3176    cur = read;
3177    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3178    if (cur == read) {
3179      // our cas succeeded
3180      assert(_global_finger >= f, "protocol consistency");
3181      break;
3182    }
3183  }
3184}
3185
3186// This is really inefficient, and should be redone by
3187// using (not yet available) block-read and -write interfaces to the
3188// stack and the work_queue. XXX FIX ME !!!
3189bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3190                                                      OopTaskQueue* work_q) {
3191  // Fast lock-free check
3192  if (ovflw_stk->length() == 0) {
3193    return false;
3194  }
3195  assert(work_q->size() == 0, "Shouldn't steal");
3196  MutexLockerEx ml(ovflw_stk->par_lock(),
3197                   Mutex::_no_safepoint_check_flag);
3198  // Grab up to 1/4 the size of the work queue
3199  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3200                    (size_t)ParGCDesiredObjsFromOverflowList);
3201  num = MIN2(num, ovflw_stk->length());
3202  for (int i = (int) num; i > 0; i--) {
3203    oop cur = ovflw_stk->pop();
3204    assert(cur != NULL, "Counted wrong?");
3205    work_q->push(cur);
3206  }
3207  return num > 0;
3208}
3209
3210void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3211  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3212  int n_tasks = pst->n_tasks();
3213  // We allow that there may be no tasks to do here because
3214  // we are restarting after a stack overflow.
3215  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3216  uint nth_task = 0;
3217
3218  HeapWord* aligned_start = sp->bottom();
3219  if (sp->used_region().contains(_restart_addr)) {
3220    // Align down to a card boundary for the start of 0th task
3221    // for this space.
3222    aligned_start = align_ptr_down(_restart_addr, CardTableModRefBS::card_size);
3223  }
3224
3225  size_t chunk_size = sp->marking_task_size();
3226  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3227    // Having claimed the nth task in this space,
3228    // compute the chunk that it corresponds to:
3229    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3230                               aligned_start + (nth_task+1)*chunk_size);
3231    // Try and bump the global finger via a CAS;
3232    // note that we need to do the global finger bump
3233    // _before_ taking the intersection below, because
3234    // the task corresponding to that region will be
3235    // deemed done even if the used_region() expands
3236    // because of allocation -- as it almost certainly will
3237    // during start-up while the threads yield in the
3238    // closure below.
3239    HeapWord* finger = span.end();
3240    bump_global_finger(finger);   // atomically
3241    // There are null tasks here corresponding to chunks
3242    // beyond the "top" address of the space.
3243    span = span.intersection(sp->used_region());
3244    if (!span.is_empty()) {  // Non-null task
3245      HeapWord* prev_obj;
3246      assert(!span.contains(_restart_addr) || nth_task == 0,
3247             "Inconsistency");
3248      if (nth_task == 0) {
3249        // For the 0th task, we'll not need to compute a block_start.
3250        if (span.contains(_restart_addr)) {
3251          // In the case of a restart because of stack overflow,
3252          // we might additionally skip a chunk prefix.
3253          prev_obj = _restart_addr;
3254        } else {
3255          prev_obj = span.start();
3256        }
3257      } else {
3258        // We want to skip the first object because
3259        // the protocol is to scan any object in its entirety
3260        // that _starts_ in this span; a fortiori, any
3261        // object starting in an earlier span is scanned
3262        // as part of an earlier claimed task.
3263        // Below we use the "careful" version of block_start
3264        // so we do not try to navigate uninitialized objects.
3265        prev_obj = sp->block_start_careful(span.start());
3266        // Below we use a variant of block_size that uses the
3267        // Printezis bits to avoid waiting for allocated
3268        // objects to become initialized/parsable.
3269        while (prev_obj < span.start()) {
3270          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3271          if (sz > 0) {
3272            prev_obj += sz;
3273          } else {
3274            // In this case we may end up doing a bit of redundant
3275            // scanning, but that appears unavoidable, short of
3276            // locking the free list locks; see bug 6324141.
3277            break;
3278          }
3279        }
3280      }
3281      if (prev_obj < span.end()) {
3282        MemRegion my_span = MemRegion(prev_obj, span.end());
3283        // Do the marking work within a non-empty span --
3284        // the last argument to the constructor indicates whether the
3285        // iteration should be incremental with periodic yields.
3286        ParMarkFromRootsClosure cl(this, _collector, my_span,
3287                                   &_collector->_markBitMap,
3288                                   work_queue(i),
3289                                   &_collector->_markStack);
3290        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3291      } // else nothing to do for this task
3292    }   // else nothing to do for this task
3293  }
3294  // We'd be tempted to assert here that since there are no
3295  // more tasks left to claim in this space, the global_finger
3296  // must exceed space->top() and a fortiori space->end(). However,
3297  // that would not quite be correct because the bumping of
3298  // global_finger occurs strictly after the claiming of a task,
3299  // so by the time we reach here the global finger may not yet
3300  // have been bumped up by the thread that claimed the last
3301  // task.
3302  pst->all_tasks_completed();
3303}
3304
3305class ParConcMarkingClosure: public MetadataAwareOopClosure {
3306 private:
3307  CMSCollector* _collector;
3308  CMSConcMarkingTask* _task;
3309  MemRegion     _span;
3310  CMSBitMap*    _bit_map;
3311  CMSMarkStack* _overflow_stack;
3312  OopTaskQueue* _work_queue;
3313 protected:
3314  DO_OOP_WORK_DEFN
3315 public:
3316  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3317                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3318    MetadataAwareOopClosure(collector->ref_processor()),
3319    _collector(collector),
3320    _task(task),
3321    _span(collector->_span),
3322    _work_queue(work_queue),
3323    _bit_map(bit_map),
3324    _overflow_stack(overflow_stack)
3325  { }
3326  virtual void do_oop(oop* p);
3327  virtual void do_oop(narrowOop* p);
3328
3329  void trim_queue(size_t max);
3330  void handle_stack_overflow(HeapWord* lost);
3331  void do_yield_check() {
3332    if (_task->should_yield()) {
3333      _task->yield();
3334    }
3335  }
3336};
3337
3338DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3339
3340// Grey object scanning during work stealing phase --
3341// the salient assumption here is that any references
3342// that are in these stolen objects being scanned must
3343// already have been initialized (else they would not have
3344// been published), so we do not need to check for
3345// uninitialized objects before pushing here.
3346void ParConcMarkingClosure::do_oop(oop obj) {
3347  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3348  HeapWord* addr = (HeapWord*)obj;
3349  // Check if oop points into the CMS generation
3350  // and is not marked
3351  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3352    // a white object ...
3353    // If we manage to "claim" the object, by being the
3354    // first thread to mark it, then we push it on our
3355    // marking stack
3356    if (_bit_map->par_mark(addr)) {     // ... now grey
3357      // push on work queue (grey set)
3358      bool simulate_overflow = false;
3359      NOT_PRODUCT(
3360        if (CMSMarkStackOverflowALot &&
3361            _collector->simulate_overflow()) {
3362          // simulate a stack overflow
3363          simulate_overflow = true;
3364        }
3365      )
3366      if (simulate_overflow ||
3367          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3368        // stack overflow
3369        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3370        // We cannot assert that the overflow stack is full because
3371        // it may have been emptied since.
3372        assert(simulate_overflow ||
3373               _work_queue->size() == _work_queue->max_elems(),
3374              "Else push should have succeeded");
3375        handle_stack_overflow(addr);
3376      }
3377    } // Else, some other thread got there first
3378    do_yield_check();
3379  }
3380}
3381
3382void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3383void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3384
3385void ParConcMarkingClosure::trim_queue(size_t max) {
3386  while (_work_queue->size() > max) {
3387    oop new_oop;
3388    if (_work_queue->pop_local(new_oop)) {
3389      assert(new_oop->is_oop(), "Should be an oop");
3390      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3391      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3392      new_oop->oop_iterate(this);  // do_oop() above
3393      do_yield_check();
3394    }
3395  }
3396}
3397
3398// Upon stack overflow, we discard (part of) the stack,
3399// remembering the least address amongst those discarded
3400// in CMSCollector's _restart_address.
3401void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3402  // We need to do this under a mutex to prevent other
3403  // workers from interfering with the work done below.
3404  MutexLockerEx ml(_overflow_stack->par_lock(),
3405                   Mutex::_no_safepoint_check_flag);
3406  // Remember the least grey address discarded
3407  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3408  _collector->lower_restart_addr(ra);
3409  _overflow_stack->reset();  // discard stack contents
3410  _overflow_stack->expand(); // expand the stack if possible
3411}
3412
3413
3414void CMSConcMarkingTask::do_work_steal(int i) {
3415  OopTaskQueue* work_q = work_queue(i);
3416  oop obj_to_scan;
3417  CMSBitMap* bm = &(_collector->_markBitMap);
3418  CMSMarkStack* ovflw = &(_collector->_markStack);
3419  int* seed = _collector->hash_seed(i);
3420  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3421  while (true) {
3422    cl.trim_queue(0);
3423    assert(work_q->size() == 0, "Should have been emptied above");
3424    if (get_work_from_overflow_stack(ovflw, work_q)) {
3425      // Can't assert below because the work obtained from the
3426      // overflow stack may already have been stolen from us.
3427      // assert(work_q->size() > 0, "Work from overflow stack");
3428      continue;
3429    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3430      assert(obj_to_scan->is_oop(), "Should be an oop");
3431      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3432      obj_to_scan->oop_iterate(&cl);
3433    } else if (terminator()->offer_termination(&_term_term)) {
3434      assert(work_q->size() == 0, "Impossible!");
3435      break;
3436    } else if (yielding() || should_yield()) {
3437      yield();
3438    }
3439  }
3440}
3441
3442// This is run by the CMS (coordinator) thread.
3443void CMSConcMarkingTask::coordinator_yield() {
3444  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3445         "CMS thread should hold CMS token");
3446  // First give up the locks, then yield, then re-lock
3447  // We should probably use a constructor/destructor idiom to
3448  // do this unlock/lock or modify the MutexUnlocker class to
3449  // serve our purpose. XXX
3450  assert_lock_strong(_bit_map_lock);
3451  _bit_map_lock->unlock();
3452  ConcurrentMarkSweepThread::desynchronize(true);
3453  _collector->stopTimer();
3454  _collector->incrementYields();
3455
3456  // It is possible for whichever thread initiated the yield request
3457  // not to get a chance to wake up and take the bitmap lock between
3458  // this thread releasing it and reacquiring it. So, while the
3459  // should_yield() flag is on, let's sleep for a bit to give the
3460  // other thread a chance to wake up. The limit imposed on the number
3461  // of iterations is defensive, to avoid any unforseen circumstances
3462  // putting us into an infinite loop. Since it's always been this
3463  // (coordinator_yield()) method that was observed to cause the
3464  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3465  // which is by default non-zero. For the other seven methods that
3466  // also perform the yield operation, as are using a different
3467  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3468  // can enable the sleeping for those methods too, if necessary.
3469  // See 6442774.
3470  //
3471  // We really need to reconsider the synchronization between the GC
3472  // thread and the yield-requesting threads in the future and we
3473  // should really use wait/notify, which is the recommended
3474  // way of doing this type of interaction. Additionally, we should
3475  // consolidate the eight methods that do the yield operation and they
3476  // are almost identical into one for better maintainability and
3477  // readability. See 6445193.
3478  //
3479  // Tony 2006.06.29
3480  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3481                   ConcurrentMarkSweepThread::should_yield() &&
3482                   !CMSCollector::foregroundGCIsActive(); ++i) {
3483    os::sleep(Thread::current(), 1, false);
3484  }
3485
3486  ConcurrentMarkSweepThread::synchronize(true);
3487  _bit_map_lock->lock_without_safepoint_check();
3488  _collector->startTimer();
3489}
3490
3491bool CMSCollector::do_marking_mt() {
3492  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3493  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3494                                                                  conc_workers()->active_workers(),
3495                                                                  Threads::number_of_non_daemon_threads());
3496  num_workers = conc_workers()->update_active_workers(num_workers);
3497  log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3498
3499  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3500
3501  CMSConcMarkingTask tsk(this,
3502                         cms_space,
3503                         conc_workers(),
3504                         task_queues());
3505
3506  // Since the actual number of workers we get may be different
3507  // from the number we requested above, do we need to do anything different
3508  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3509  // class?? XXX
3510  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3511
3512  // Refs discovery is already non-atomic.
3513  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3514  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3515  conc_workers()->start_task(&tsk);
3516  while (tsk.yielded()) {
3517    tsk.coordinator_yield();
3518    conc_workers()->continue_task(&tsk);
3519  }
3520  // If the task was aborted, _restart_addr will be non-NULL
3521  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3522  while (_restart_addr != NULL) {
3523    // XXX For now we do not make use of ABORTED state and have not
3524    // yet implemented the right abort semantics (even in the original
3525    // single-threaded CMS case). That needs some more investigation
3526    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3527    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3528    // If _restart_addr is non-NULL, a marking stack overflow
3529    // occurred; we need to do a fresh marking iteration from the
3530    // indicated restart address.
3531    if (_foregroundGCIsActive) {
3532      // We may be running into repeated stack overflows, having
3533      // reached the limit of the stack size, while making very
3534      // slow forward progress. It may be best to bail out and
3535      // let the foreground collector do its job.
3536      // Clear _restart_addr, so that foreground GC
3537      // works from scratch. This avoids the headache of
3538      // a "rescan" which would otherwise be needed because
3539      // of the dirty mod union table & card table.
3540      _restart_addr = NULL;
3541      return false;
3542    }
3543    // Adjust the task to restart from _restart_addr
3544    tsk.reset(_restart_addr);
3545    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3546                  _restart_addr);
3547    _restart_addr = NULL;
3548    // Get the workers going again
3549    conc_workers()->start_task(&tsk);
3550    while (tsk.yielded()) {
3551      tsk.coordinator_yield();
3552      conc_workers()->continue_task(&tsk);
3553    }
3554  }
3555  assert(tsk.completed(), "Inconsistency");
3556  assert(tsk.result() == true, "Inconsistency");
3557  return true;
3558}
3559
3560bool CMSCollector::do_marking_st() {
3561  ResourceMark rm;
3562  HandleMark   hm;
3563
3564  // Temporarily make refs discovery single threaded (non-MT)
3565  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3566  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3567    &_markStack, CMSYield);
3568  // the last argument to iterate indicates whether the iteration
3569  // should be incremental with periodic yields.
3570  _markBitMap.iterate(&markFromRootsClosure);
3571  // If _restart_addr is non-NULL, a marking stack overflow
3572  // occurred; we need to do a fresh iteration from the
3573  // indicated restart address.
3574  while (_restart_addr != NULL) {
3575    if (_foregroundGCIsActive) {
3576      // We may be running into repeated stack overflows, having
3577      // reached the limit of the stack size, while making very
3578      // slow forward progress. It may be best to bail out and
3579      // let the foreground collector do its job.
3580      // Clear _restart_addr, so that foreground GC
3581      // works from scratch. This avoids the headache of
3582      // a "rescan" which would otherwise be needed because
3583      // of the dirty mod union table & card table.
3584      _restart_addr = NULL;
3585      return false;  // indicating failure to complete marking
3586    }
3587    // Deal with stack overflow:
3588    // we restart marking from _restart_addr
3589    HeapWord* ra = _restart_addr;
3590    markFromRootsClosure.reset(ra);
3591    _restart_addr = NULL;
3592    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3593  }
3594  return true;
3595}
3596
3597void CMSCollector::preclean() {
3598  check_correct_thread_executing();
3599  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3600  verify_work_stacks_empty();
3601  verify_overflow_empty();
3602  _abort_preclean = false;
3603  if (CMSPrecleaningEnabled) {
3604    if (!CMSEdenChunksRecordAlways) {
3605      _eden_chunk_index = 0;
3606    }
3607    size_t used = get_eden_used();
3608    size_t capacity = get_eden_capacity();
3609    // Don't start sampling unless we will get sufficiently
3610    // many samples.
3611    if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3612                * CMSScheduleRemarkEdenPenetration)) {
3613      _start_sampling = true;
3614    } else {
3615      _start_sampling = false;
3616    }
3617    GCTraceCPUTime tcpu;
3618    CMSPhaseAccounting pa(this, "Concurrent Preclean");
3619    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3620  }
3621  CMSTokenSync x(true); // is cms thread
3622  if (CMSPrecleaningEnabled) {
3623    sample_eden();
3624    _collectorState = AbortablePreclean;
3625  } else {
3626    _collectorState = FinalMarking;
3627  }
3628  verify_work_stacks_empty();
3629  verify_overflow_empty();
3630}
3631
3632// Try and schedule the remark such that young gen
3633// occupancy is CMSScheduleRemarkEdenPenetration %.
3634void CMSCollector::abortable_preclean() {
3635  check_correct_thread_executing();
3636  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3637  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3638
3639  // If Eden's current occupancy is below this threshold,
3640  // immediately schedule the remark; else preclean
3641  // past the next scavenge in an effort to
3642  // schedule the pause as described above. By choosing
3643  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3644  // we will never do an actual abortable preclean cycle.
3645  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3646    GCTraceCPUTime tcpu;
3647    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3648    // We need more smarts in the abortable preclean
3649    // loop below to deal with cases where allocation
3650    // in young gen is very very slow, and our precleaning
3651    // is running a losing race against a horde of
3652    // mutators intent on flooding us with CMS updates
3653    // (dirty cards).
3654    // One, admittedly dumb, strategy is to give up
3655    // after a certain number of abortable precleaning loops
3656    // or after a certain maximum time. We want to make
3657    // this smarter in the next iteration.
3658    // XXX FIX ME!!! YSR
3659    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3660    while (!(should_abort_preclean() ||
3661             ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3662      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3663      cumworkdone += workdone;
3664      loops++;
3665      // Voluntarily terminate abortable preclean phase if we have
3666      // been at it for too long.
3667      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3668          loops >= CMSMaxAbortablePrecleanLoops) {
3669        log_debug(gc)(" CMS: abort preclean due to loops ");
3670        break;
3671      }
3672      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3673        log_debug(gc)(" CMS: abort preclean due to time ");
3674        break;
3675      }
3676      // If we are doing little work each iteration, we should
3677      // take a short break.
3678      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3679        // Sleep for some time, waiting for work to accumulate
3680        stopTimer();
3681        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3682        startTimer();
3683        waited++;
3684      }
3685    }
3686    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3687                               loops, waited, cumworkdone);
3688  }
3689  CMSTokenSync x(true); // is cms thread
3690  if (_collectorState != Idling) {
3691    assert(_collectorState == AbortablePreclean,
3692           "Spontaneous state transition?");
3693    _collectorState = FinalMarking;
3694  } // Else, a foreground collection completed this CMS cycle.
3695  return;
3696}
3697
3698// Respond to an Eden sampling opportunity
3699void CMSCollector::sample_eden() {
3700  // Make sure a young gc cannot sneak in between our
3701  // reading and recording of a sample.
3702  assert(Thread::current()->is_ConcurrentGC_thread(),
3703         "Only the cms thread may collect Eden samples");
3704  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3705         "Should collect samples while holding CMS token");
3706  if (!_start_sampling) {
3707    return;
3708  }
3709  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3710  // is populated by the young generation.
3711  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3712    if (_eden_chunk_index < _eden_chunk_capacity) {
3713      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3714      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3715             "Unexpected state of Eden");
3716      // We'd like to check that what we just sampled is an oop-start address;
3717      // however, we cannot do that here since the object may not yet have been
3718      // initialized. So we'll instead do the check when we _use_ this sample
3719      // later.
3720      if (_eden_chunk_index == 0 ||
3721          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3722                         _eden_chunk_array[_eden_chunk_index-1])
3723           >= CMSSamplingGrain)) {
3724        _eden_chunk_index++;  // commit sample
3725      }
3726    }
3727  }
3728  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3729    size_t used = get_eden_used();
3730    size_t capacity = get_eden_capacity();
3731    assert(used <= capacity, "Unexpected state of Eden");
3732    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3733      _abort_preclean = true;
3734    }
3735  }
3736}
3737
3738
3739size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3740  assert(_collectorState == Precleaning ||
3741         _collectorState == AbortablePreclean, "incorrect state");
3742  ResourceMark rm;
3743  HandleMark   hm;
3744
3745  // Precleaning is currently not MT but the reference processor
3746  // may be set for MT.  Disable it temporarily here.
3747  ReferenceProcessor* rp = ref_processor();
3748  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3749
3750  // Do one pass of scrubbing the discovered reference lists
3751  // to remove any reference objects with strongly-reachable
3752  // referents.
3753  if (clean_refs) {
3754    CMSPrecleanRefsYieldClosure yield_cl(this);
3755    assert(rp->span().equals(_span), "Spans should be equal");
3756    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3757                                   &_markStack, true /* preclean */);
3758    CMSDrainMarkingStackClosure complete_trace(this,
3759                                   _span, &_markBitMap, &_markStack,
3760                                   &keep_alive, true /* preclean */);
3761
3762    // We don't want this step to interfere with a young
3763    // collection because we don't want to take CPU
3764    // or memory bandwidth away from the young GC threads
3765    // (which may be as many as there are CPUs).
3766    // Note that we don't need to protect ourselves from
3767    // interference with mutators because they can't
3768    // manipulate the discovered reference lists nor affect
3769    // the computed reachability of the referents, the
3770    // only properties manipulated by the precleaning
3771    // of these reference lists.
3772    stopTimer();
3773    CMSTokenSyncWithLocks x(true /* is cms thread */,
3774                            bitMapLock());
3775    startTimer();
3776    sample_eden();
3777
3778    // The following will yield to allow foreground
3779    // collection to proceed promptly. XXX YSR:
3780    // The code in this method may need further
3781    // tweaking for better performance and some restructuring
3782    // for cleaner interfaces.
3783    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3784    rp->preclean_discovered_references(
3785          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3786          gc_timer);
3787  }
3788
3789  if (clean_survivor) {  // preclean the active survivor space(s)
3790    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3791                             &_markBitMap, &_modUnionTable,
3792                             &_markStack, true /* precleaning phase */);
3793    stopTimer();
3794    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3795                             bitMapLock());
3796    startTimer();
3797    unsigned int before_count =
3798      GenCollectedHeap::heap()->total_collections();
3799    SurvivorSpacePrecleanClosure
3800      sss_cl(this, _span, &_markBitMap, &_markStack,
3801             &pam_cl, before_count, CMSYield);
3802    _young_gen->from()->object_iterate_careful(&sss_cl);
3803    _young_gen->to()->object_iterate_careful(&sss_cl);
3804  }
3805  MarkRefsIntoAndScanClosure
3806    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3807             &_markStack, this, CMSYield,
3808             true /* precleaning phase */);
3809  // CAUTION: The following closure has persistent state that may need to
3810  // be reset upon a decrease in the sequence of addresses it
3811  // processes.
3812  ScanMarkedObjectsAgainCarefullyClosure
3813    smoac_cl(this, _span,
3814      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3815
3816  // Preclean dirty cards in ModUnionTable and CardTable using
3817  // appropriate convergence criterion;
3818  // repeat CMSPrecleanIter times unless we find that
3819  // we are losing.
3820  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3821  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3822         "Bad convergence multiplier");
3823  assert(CMSPrecleanThreshold >= 100,
3824         "Unreasonably low CMSPrecleanThreshold");
3825
3826  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3827  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3828       numIter < CMSPrecleanIter;
3829       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3830    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3831    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3832    // Either there are very few dirty cards, so re-mark
3833    // pause will be small anyway, or our pre-cleaning isn't
3834    // that much faster than the rate at which cards are being
3835    // dirtied, so we might as well stop and re-mark since
3836    // precleaning won't improve our re-mark time by much.
3837    if (curNumCards <= CMSPrecleanThreshold ||
3838        (numIter > 0 &&
3839         (curNumCards * CMSPrecleanDenominator >
3840         lastNumCards * CMSPrecleanNumerator))) {
3841      numIter++;
3842      cumNumCards += curNumCards;
3843      break;
3844    }
3845  }
3846
3847  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3848
3849  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3850  cumNumCards += curNumCards;
3851  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3852                             curNumCards, cumNumCards, numIter);
3853  return cumNumCards;   // as a measure of useful work done
3854}
3855
3856// PRECLEANING NOTES:
3857// Precleaning involves:
3858// . reading the bits of the modUnionTable and clearing the set bits.
3859// . For the cards corresponding to the set bits, we scan the
3860//   objects on those cards. This means we need the free_list_lock
3861//   so that we can safely iterate over the CMS space when scanning
3862//   for oops.
3863// . When we scan the objects, we'll be both reading and setting
3864//   marks in the marking bit map, so we'll need the marking bit map.
3865// . For protecting _collector_state transitions, we take the CGC_lock.
3866//   Note that any races in the reading of of card table entries by the
3867//   CMS thread on the one hand and the clearing of those entries by the
3868//   VM thread or the setting of those entries by the mutator threads on the
3869//   other are quite benign. However, for efficiency it makes sense to keep
3870//   the VM thread from racing with the CMS thread while the latter is
3871//   dirty card info to the modUnionTable. We therefore also use the
3872//   CGC_lock to protect the reading of the card table and the mod union
3873//   table by the CM thread.
3874// . We run concurrently with mutator updates, so scanning
3875//   needs to be done carefully  -- we should not try to scan
3876//   potentially uninitialized objects.
3877//
3878// Locking strategy: While holding the CGC_lock, we scan over and
3879// reset a maximal dirty range of the mod union / card tables, then lock
3880// the free_list_lock and bitmap lock to do a full marking, then
3881// release these locks; and repeat the cycle. This allows for a
3882// certain amount of fairness in the sharing of these locks between
3883// the CMS collector on the one hand, and the VM thread and the
3884// mutators on the other.
3885
3886// NOTE: preclean_mod_union_table() and preclean_card_table()
3887// further below are largely identical; if you need to modify
3888// one of these methods, please check the other method too.
3889
3890size_t CMSCollector::preclean_mod_union_table(
3891  ConcurrentMarkSweepGeneration* old_gen,
3892  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3893  verify_work_stacks_empty();
3894  verify_overflow_empty();
3895
3896  // strategy: starting with the first card, accumulate contiguous
3897  // ranges of dirty cards; clear these cards, then scan the region
3898  // covered by these cards.
3899
3900  // Since all of the MUT is committed ahead, we can just use
3901  // that, in case the generations expand while we are precleaning.
3902  // It might also be fine to just use the committed part of the
3903  // generation, but we might potentially miss cards when the
3904  // generation is rapidly expanding while we are in the midst
3905  // of precleaning.
3906  HeapWord* startAddr = old_gen->reserved().start();
3907  HeapWord* endAddr   = old_gen->reserved().end();
3908
3909  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3910
3911  size_t numDirtyCards, cumNumDirtyCards;
3912  HeapWord *nextAddr, *lastAddr;
3913  for (cumNumDirtyCards = numDirtyCards = 0,
3914       nextAddr = lastAddr = startAddr;
3915       nextAddr < endAddr;
3916       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3917
3918    ResourceMark rm;
3919    HandleMark   hm;
3920
3921    MemRegion dirtyRegion;
3922    {
3923      stopTimer();
3924      // Potential yield point
3925      CMSTokenSync ts(true);
3926      startTimer();
3927      sample_eden();
3928      // Get dirty region starting at nextOffset (inclusive),
3929      // simultaneously clearing it.
3930      dirtyRegion =
3931        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3932      assert(dirtyRegion.start() >= nextAddr,
3933             "returned region inconsistent?");
3934    }
3935    // Remember where the next search should begin.
3936    // The returned region (if non-empty) is a right open interval,
3937    // so lastOffset is obtained from the right end of that
3938    // interval.
3939    lastAddr = dirtyRegion.end();
3940    // Should do something more transparent and less hacky XXX
3941    numDirtyCards =
3942      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3943
3944    // We'll scan the cards in the dirty region (with periodic
3945    // yields for foreground GC as needed).
3946    if (!dirtyRegion.is_empty()) {
3947      assert(numDirtyCards > 0, "consistency check");
3948      HeapWord* stop_point = NULL;
3949      stopTimer();
3950      // Potential yield point
3951      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3952                               bitMapLock());
3953      startTimer();
3954      {
3955        verify_work_stacks_empty();
3956        verify_overflow_empty();
3957        sample_eden();
3958        stop_point =
3959          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3960      }
3961      if (stop_point != NULL) {
3962        // The careful iteration stopped early either because it found an
3963        // uninitialized object, or because we were in the midst of an
3964        // "abortable preclean", which should now be aborted. Redirty
3965        // the bits corresponding to the partially-scanned or unscanned
3966        // cards. We'll either restart at the next block boundary or
3967        // abort the preclean.
3968        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3969               "Should only be AbortablePreclean.");
3970        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3971        if (should_abort_preclean()) {
3972          break; // out of preclean loop
3973        } else {
3974          // Compute the next address at which preclean should pick up;
3975          // might need bitMapLock in order to read P-bits.
3976          lastAddr = next_card_start_after_block(stop_point);
3977        }
3978      }
3979    } else {
3980      assert(lastAddr == endAddr, "consistency check");
3981      assert(numDirtyCards == 0, "consistency check");
3982      break;
3983    }
3984  }
3985  verify_work_stacks_empty();
3986  verify_overflow_empty();
3987  return cumNumDirtyCards;
3988}
3989
3990// NOTE: preclean_mod_union_table() above and preclean_card_table()
3991// below are largely identical; if you need to modify
3992// one of these methods, please check the other method too.
3993
3994size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
3995  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3996  // strategy: it's similar to precleamModUnionTable above, in that
3997  // we accumulate contiguous ranges of dirty cards, mark these cards
3998  // precleaned, then scan the region covered by these cards.
3999  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4000  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4001
4002  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4003
4004  size_t numDirtyCards, cumNumDirtyCards;
4005  HeapWord *lastAddr, *nextAddr;
4006
4007  for (cumNumDirtyCards = numDirtyCards = 0,
4008       nextAddr = lastAddr = startAddr;
4009       nextAddr < endAddr;
4010       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4011
4012    ResourceMark rm;
4013    HandleMark   hm;
4014
4015    MemRegion dirtyRegion;
4016    {
4017      // See comments in "Precleaning notes" above on why we
4018      // do this locking. XXX Could the locking overheads be
4019      // too high when dirty cards are sparse? [I don't think so.]
4020      stopTimer();
4021      CMSTokenSync x(true); // is cms thread
4022      startTimer();
4023      sample_eden();
4024      // Get and clear dirty region from card table
4025      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4026                                    MemRegion(nextAddr, endAddr),
4027                                    true,
4028                                    CardTableModRefBS::precleaned_card_val());
4029
4030      assert(dirtyRegion.start() >= nextAddr,
4031             "returned region inconsistent?");
4032    }
4033    lastAddr = dirtyRegion.end();
4034    numDirtyCards =
4035      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4036
4037    if (!dirtyRegion.is_empty()) {
4038      stopTimer();
4039      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4040      startTimer();
4041      sample_eden();
4042      verify_work_stacks_empty();
4043      verify_overflow_empty();
4044      HeapWord* stop_point =
4045        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4046      if (stop_point != NULL) {
4047        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4048               "Should only be AbortablePreclean.");
4049        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4050        if (should_abort_preclean()) {
4051          break; // out of preclean loop
4052        } else {
4053          // Compute the next address at which preclean should pick up.
4054          lastAddr = next_card_start_after_block(stop_point);
4055        }
4056      }
4057    } else {
4058      break;
4059    }
4060  }
4061  verify_work_stacks_empty();
4062  verify_overflow_empty();
4063  return cumNumDirtyCards;
4064}
4065
4066class PrecleanKlassClosure : public KlassClosure {
4067  KlassToOopClosure _cm_klass_closure;
4068 public:
4069  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4070  void do_klass(Klass* k) {
4071    if (k->has_accumulated_modified_oops()) {
4072      k->clear_accumulated_modified_oops();
4073
4074      _cm_klass_closure.do_klass(k);
4075    }
4076  }
4077};
4078
4079// The freelist lock is needed to prevent asserts, is it really needed?
4080void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4081
4082  cl->set_freelistLock(freelistLock);
4083
4084  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4085
4086  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4087  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4088  PrecleanKlassClosure preclean_klass_closure(cl);
4089  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4090
4091  verify_work_stacks_empty();
4092  verify_overflow_empty();
4093}
4094
4095void CMSCollector::checkpointRootsFinal() {
4096  assert(_collectorState == FinalMarking, "incorrect state transition?");
4097  check_correct_thread_executing();
4098  // world is stopped at this checkpoint
4099  assert(SafepointSynchronize::is_at_safepoint(),
4100         "world should be stopped");
4101  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4102
4103  verify_work_stacks_empty();
4104  verify_overflow_empty();
4105
4106  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4107                _young_gen->used() / K, _young_gen->capacity() / K);
4108  {
4109    if (CMSScavengeBeforeRemark) {
4110      GenCollectedHeap* gch = GenCollectedHeap::heap();
4111      // Temporarily set flag to false, GCH->do_collection will
4112      // expect it to be false and set to true
4113      FlagSetting fl(gch->_is_gc_active, false);
4114
4115      gch->do_collection(true,                      // full (i.e. force, see below)
4116                         false,                     // !clear_all_soft_refs
4117                         0,                         // size
4118                         false,                     // is_tlab
4119                         GenCollectedHeap::YoungGen // type
4120        );
4121    }
4122    FreelistLocker x(this);
4123    MutexLockerEx y(bitMapLock(),
4124                    Mutex::_no_safepoint_check_flag);
4125    checkpointRootsFinalWork();
4126  }
4127  verify_work_stacks_empty();
4128  verify_overflow_empty();
4129}
4130
4131void CMSCollector::checkpointRootsFinalWork() {
4132  GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4133
4134  assert(haveFreelistLocks(), "must have free list locks");
4135  assert_lock_strong(bitMapLock());
4136
4137  ResourceMark rm;
4138  HandleMark   hm;
4139
4140  GenCollectedHeap* gch = GenCollectedHeap::heap();
4141
4142  if (should_unload_classes()) {
4143    CodeCache::gc_prologue();
4144  }
4145  assert(haveFreelistLocks(), "must have free list locks");
4146  assert_lock_strong(bitMapLock());
4147
4148  // We might assume that we need not fill TLAB's when
4149  // CMSScavengeBeforeRemark is set, because we may have just done
4150  // a scavenge which would have filled all TLAB's -- and besides
4151  // Eden would be empty. This however may not always be the case --
4152  // for instance although we asked for a scavenge, it may not have
4153  // happened because of a JNI critical section. We probably need
4154  // a policy for deciding whether we can in that case wait until
4155  // the critical section releases and then do the remark following
4156  // the scavenge, and skip it here. In the absence of that policy,
4157  // or of an indication of whether the scavenge did indeed occur,
4158  // we cannot rely on TLAB's having been filled and must do
4159  // so here just in case a scavenge did not happen.
4160  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4161  // Update the saved marks which may affect the root scans.
4162  gch->save_marks();
4163
4164  print_eden_and_survivor_chunk_arrays();
4165
4166  {
4167#if defined(COMPILER2) || INCLUDE_JVMCI
4168    DerivedPointerTableDeactivate dpt_deact;
4169#endif
4170
4171    // Note on the role of the mod union table:
4172    // Since the marker in "markFromRoots" marks concurrently with
4173    // mutators, it is possible for some reachable objects not to have been
4174    // scanned. For instance, an only reference to an object A was
4175    // placed in object B after the marker scanned B. Unless B is rescanned,
4176    // A would be collected. Such updates to references in marked objects
4177    // are detected via the mod union table which is the set of all cards
4178    // dirtied since the first checkpoint in this GC cycle and prior to
4179    // the most recent young generation GC, minus those cleaned up by the
4180    // concurrent precleaning.
4181    if (CMSParallelRemarkEnabled) {
4182      GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4183      do_remark_parallel();
4184    } else {
4185      GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4186      do_remark_non_parallel();
4187    }
4188  }
4189  verify_work_stacks_empty();
4190  verify_overflow_empty();
4191
4192  {
4193    GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4194    refProcessingWork();
4195  }
4196  verify_work_stacks_empty();
4197  verify_overflow_empty();
4198
4199  if (should_unload_classes()) {
4200    CodeCache::gc_epilogue();
4201  }
4202  JvmtiExport::gc_epilogue();
4203
4204  // If we encountered any (marking stack / work queue) overflow
4205  // events during the current CMS cycle, take appropriate
4206  // remedial measures, where possible, so as to try and avoid
4207  // recurrence of that condition.
4208  assert(_markStack.isEmpty(), "No grey objects");
4209  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4210                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4211  if (ser_ovflw > 0) {
4212    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4213                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4214    _markStack.expand();
4215    _ser_pmc_remark_ovflw = 0;
4216    _ser_pmc_preclean_ovflw = 0;
4217    _ser_kac_preclean_ovflw = 0;
4218    _ser_kac_ovflw = 0;
4219  }
4220  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4221     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4222                          _par_pmc_remark_ovflw, _par_kac_ovflw);
4223     _par_pmc_remark_ovflw = 0;
4224    _par_kac_ovflw = 0;
4225  }
4226   if (_markStack._hit_limit > 0) {
4227     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4228                          _markStack._hit_limit);
4229   }
4230   if (_markStack._failed_double > 0) {
4231     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4232                          _markStack._failed_double, _markStack.capacity());
4233   }
4234  _markStack._hit_limit = 0;
4235  _markStack._failed_double = 0;
4236
4237  if ((VerifyAfterGC || VerifyDuringGC) &&
4238      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4239    verify_after_remark();
4240  }
4241
4242  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4243
4244  // Change under the freelistLocks.
4245  _collectorState = Sweeping;
4246  // Call isAllClear() under bitMapLock
4247  assert(_modUnionTable.isAllClear(),
4248      "Should be clear by end of the final marking");
4249  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4250      "Should be clear by end of the final marking");
4251}
4252
4253void CMSParInitialMarkTask::work(uint worker_id) {
4254  elapsedTimer _timer;
4255  ResourceMark rm;
4256  HandleMark   hm;
4257
4258  // ---------- scan from roots --------------
4259  _timer.start();
4260  GenCollectedHeap* gch = GenCollectedHeap::heap();
4261  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4262
4263  // ---------- young gen roots --------------
4264  {
4265    work_on_young_gen_roots(&par_mri_cl);
4266    _timer.stop();
4267    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4268  }
4269
4270  // ---------- remaining roots --------------
4271  _timer.reset();
4272  _timer.start();
4273
4274  CLDToOopClosure cld_closure(&par_mri_cl, true);
4275
4276  gch->cms_process_roots(_strong_roots_scope,
4277                         false,     // yg was scanned above
4278                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4279                         _collector->should_unload_classes(),
4280                         &par_mri_cl,
4281                         &cld_closure);
4282  assert(_collector->should_unload_classes()
4283         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4284         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4285  _timer.stop();
4286  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4287}
4288
4289// Parallel remark task
4290class CMSParRemarkTask: public CMSParMarkTask {
4291  CompactibleFreeListSpace* _cms_space;
4292
4293  // The per-thread work queues, available here for stealing.
4294  OopTaskQueueSet*       _task_queues;
4295  ParallelTaskTerminator _term;
4296  StrongRootsScope*      _strong_roots_scope;
4297
4298 public:
4299  // A value of 0 passed to n_workers will cause the number of
4300  // workers to be taken from the active workers in the work gang.
4301  CMSParRemarkTask(CMSCollector* collector,
4302                   CompactibleFreeListSpace* cms_space,
4303                   uint n_workers, WorkGang* workers,
4304                   OopTaskQueueSet* task_queues,
4305                   StrongRootsScope* strong_roots_scope):
4306    CMSParMarkTask("Rescan roots and grey objects in parallel",
4307                   collector, n_workers),
4308    _cms_space(cms_space),
4309    _task_queues(task_queues),
4310    _term(n_workers, task_queues),
4311    _strong_roots_scope(strong_roots_scope) { }
4312
4313  OopTaskQueueSet* task_queues() { return _task_queues; }
4314
4315  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4316
4317  ParallelTaskTerminator* terminator() { return &_term; }
4318  uint n_workers() { return _n_workers; }
4319
4320  void work(uint worker_id);
4321
4322 private:
4323  // ... of  dirty cards in old space
4324  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4325                                  ParMarkRefsIntoAndScanClosure* cl);
4326
4327  // ... work stealing for the above
4328  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4329};
4330
4331class RemarkKlassClosure : public KlassClosure {
4332  KlassToOopClosure _cm_klass_closure;
4333 public:
4334  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4335  void do_klass(Klass* k) {
4336    // Check if we have modified any oops in the Klass during the concurrent marking.
4337    if (k->has_accumulated_modified_oops()) {
4338      k->clear_accumulated_modified_oops();
4339
4340      // We could have transfered the current modified marks to the accumulated marks,
4341      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4342    } else if (k->has_modified_oops()) {
4343      // Don't clear anything, this info is needed by the next young collection.
4344    } else {
4345      // No modified oops in the Klass.
4346      return;
4347    }
4348
4349    // The klass has modified fields, need to scan the klass.
4350    _cm_klass_closure.do_klass(k);
4351  }
4352};
4353
4354void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4355  ParNewGeneration* young_gen = _collector->_young_gen;
4356  ContiguousSpace* eden_space = young_gen->eden();
4357  ContiguousSpace* from_space = young_gen->from();
4358  ContiguousSpace* to_space   = young_gen->to();
4359
4360  HeapWord** eca = _collector->_eden_chunk_array;
4361  size_t     ect = _collector->_eden_chunk_index;
4362  HeapWord** sca = _collector->_survivor_chunk_array;
4363  size_t     sct = _collector->_survivor_chunk_index;
4364
4365  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4366  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4367
4368  do_young_space_rescan(cl, to_space, NULL, 0);
4369  do_young_space_rescan(cl, from_space, sca, sct);
4370  do_young_space_rescan(cl, eden_space, eca, ect);
4371}
4372
4373// work_queue(i) is passed to the closure
4374// ParMarkRefsIntoAndScanClosure.  The "i" parameter
4375// also is passed to do_dirty_card_rescan_tasks() and to
4376// do_work_steal() to select the i-th task_queue.
4377
4378void CMSParRemarkTask::work(uint worker_id) {
4379  elapsedTimer _timer;
4380  ResourceMark rm;
4381  HandleMark   hm;
4382
4383  // ---------- rescan from roots --------------
4384  _timer.start();
4385  GenCollectedHeap* gch = GenCollectedHeap::heap();
4386  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4387    _collector->_span, _collector->ref_processor(),
4388    &(_collector->_markBitMap),
4389    work_queue(worker_id));
4390
4391  // Rescan young gen roots first since these are likely
4392  // coarsely partitioned and may, on that account, constitute
4393  // the critical path; thus, it's best to start off that
4394  // work first.
4395  // ---------- young gen roots --------------
4396  {
4397    work_on_young_gen_roots(&par_mrias_cl);
4398    _timer.stop();
4399    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4400  }
4401
4402  // ---------- remaining roots --------------
4403  _timer.reset();
4404  _timer.start();
4405  gch->cms_process_roots(_strong_roots_scope,
4406                         false,     // yg was scanned above
4407                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4408                         _collector->should_unload_classes(),
4409                         &par_mrias_cl,
4410                         NULL);     // The dirty klasses will be handled below
4411
4412  assert(_collector->should_unload_classes()
4413         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4414         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4415  _timer.stop();
4416  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4417
4418  // ---------- unhandled CLD scanning ----------
4419  if (worker_id == 0) { // Single threaded at the moment.
4420    _timer.reset();
4421    _timer.start();
4422
4423    // Scan all new class loader data objects and new dependencies that were
4424    // introduced during concurrent marking.
4425    ResourceMark rm;
4426    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4427    for (int i = 0; i < array->length(); i++) {
4428      par_mrias_cl.do_cld_nv(array->at(i));
4429    }
4430
4431    // We don't need to keep track of new CLDs anymore.
4432    ClassLoaderDataGraph::remember_new_clds(false);
4433
4434    _timer.stop();
4435    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4436  }
4437
4438  // ---------- dirty klass scanning ----------
4439  if (worker_id == 0) { // Single threaded at the moment.
4440    _timer.reset();
4441    _timer.start();
4442
4443    // Scan all classes that was dirtied during the concurrent marking phase.
4444    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4445    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4446
4447    _timer.stop();
4448    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4449  }
4450
4451  // We might have added oops to ClassLoaderData::_handles during the
4452  // concurrent marking phase. These oops point to newly allocated objects
4453  // that are guaranteed to be kept alive. Either by the direct allocation
4454  // code, or when the young collector processes the roots. Hence,
4455  // we don't have to revisit the _handles block during the remark phase.
4456
4457  // ---------- rescan dirty cards ------------
4458  _timer.reset();
4459  _timer.start();
4460
4461  // Do the rescan tasks for each of the two spaces
4462  // (cms_space) in turn.
4463  // "worker_id" is passed to select the task_queue for "worker_id"
4464  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4465  _timer.stop();
4466  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4467
4468  // ---------- steal work from other threads ...
4469  // ---------- ... and drain overflow list.
4470  _timer.reset();
4471  _timer.start();
4472  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4473  _timer.stop();
4474  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4475}
4476
4477void
4478CMSParMarkTask::do_young_space_rescan(
4479  OopsInGenClosure* cl, ContiguousSpace* space,
4480  HeapWord** chunk_array, size_t chunk_top) {
4481  // Until all tasks completed:
4482  // . claim an unclaimed task
4483  // . compute region boundaries corresponding to task claimed
4484  //   using chunk_array
4485  // . par_oop_iterate(cl) over that region
4486
4487  ResourceMark rm;
4488  HandleMark   hm;
4489
4490  SequentialSubTasksDone* pst = space->par_seq_tasks();
4491
4492  uint nth_task = 0;
4493  uint n_tasks  = pst->n_tasks();
4494
4495  if (n_tasks > 0) {
4496    assert(pst->valid(), "Uninitialized use?");
4497    HeapWord *start, *end;
4498    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4499      // We claimed task # nth_task; compute its boundaries.
4500      if (chunk_top == 0) {  // no samples were taken
4501        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4502        start = space->bottom();
4503        end   = space->top();
4504      } else if (nth_task == 0) {
4505        start = space->bottom();
4506        end   = chunk_array[nth_task];
4507      } else if (nth_task < (uint)chunk_top) {
4508        assert(nth_task >= 1, "Control point invariant");
4509        start = chunk_array[nth_task - 1];
4510        end   = chunk_array[nth_task];
4511      } else {
4512        assert(nth_task == (uint)chunk_top, "Control point invariant");
4513        start = chunk_array[chunk_top - 1];
4514        end   = space->top();
4515      }
4516      MemRegion mr(start, end);
4517      // Verify that mr is in space
4518      assert(mr.is_empty() || space->used_region().contains(mr),
4519             "Should be in space");
4520      // Verify that "start" is an object boundary
4521      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4522             "Should be an oop");
4523      space->par_oop_iterate(mr, cl);
4524    }
4525    pst->all_tasks_completed();
4526  }
4527}
4528
4529void
4530CMSParRemarkTask::do_dirty_card_rescan_tasks(
4531  CompactibleFreeListSpace* sp, int i,
4532  ParMarkRefsIntoAndScanClosure* cl) {
4533  // Until all tasks completed:
4534  // . claim an unclaimed task
4535  // . compute region boundaries corresponding to task claimed
4536  // . transfer dirty bits ct->mut for that region
4537  // . apply rescanclosure to dirty mut bits for that region
4538
4539  ResourceMark rm;
4540  HandleMark   hm;
4541
4542  OopTaskQueue* work_q = work_queue(i);
4543  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4544  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4545  // CAUTION: This closure has state that persists across calls to
4546  // the work method dirty_range_iterate_clear() in that it has
4547  // embedded in it a (subtype of) UpwardsObjectClosure. The
4548  // use of that state in the embedded UpwardsObjectClosure instance
4549  // assumes that the cards are always iterated (even if in parallel
4550  // by several threads) in monotonically increasing order per each
4551  // thread. This is true of the implementation below which picks
4552  // card ranges (chunks) in monotonically increasing order globally
4553  // and, a-fortiori, in monotonically increasing order per thread
4554  // (the latter order being a subsequence of the former).
4555  // If the work code below is ever reorganized into a more chaotic
4556  // work-partitioning form than the current "sequential tasks"
4557  // paradigm, the use of that persistent state will have to be
4558  // revisited and modified appropriately. See also related
4559  // bug 4756801 work on which should examine this code to make
4560  // sure that the changes there do not run counter to the
4561  // assumptions made here and necessary for correctness and
4562  // efficiency. Note also that this code might yield inefficient
4563  // behavior in the case of very large objects that span one or
4564  // more work chunks. Such objects would potentially be scanned
4565  // several times redundantly. Work on 4756801 should try and
4566  // address that performance anomaly if at all possible. XXX
4567  MemRegion  full_span  = _collector->_span;
4568  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4569  MarkFromDirtyCardsClosure
4570    greyRescanClosure(_collector, full_span, // entire span of interest
4571                      sp, bm, work_q, cl);
4572
4573  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4574  assert(pst->valid(), "Uninitialized use?");
4575  uint nth_task = 0;
4576  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4577  MemRegion span = sp->used_region();
4578  HeapWord* start_addr = span.start();
4579  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4580                                           alignment);
4581  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4582  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4583         start_addr, "Check alignment");
4584  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4585         chunk_size, "Check alignment");
4586
4587  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4588    // Having claimed the nth_task, compute corresponding mem-region,
4589    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4590    // The alignment restriction ensures that we do not need any
4591    // synchronization with other gang-workers while setting or
4592    // clearing bits in thus chunk of the MUT.
4593    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4594                                    start_addr + (nth_task+1)*chunk_size);
4595    // The last chunk's end might be way beyond end of the
4596    // used region. In that case pull back appropriately.
4597    if (this_span.end() > end_addr) {
4598      this_span.set_end(end_addr);
4599      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4600    }
4601    // Iterate over the dirty cards covering this chunk, marking them
4602    // precleaned, and setting the corresponding bits in the mod union
4603    // table. Since we have been careful to partition at Card and MUT-word
4604    // boundaries no synchronization is needed between parallel threads.
4605    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4606                                                 &modUnionClosure);
4607
4608    // Having transferred these marks into the modUnionTable,
4609    // rescan the marked objects on the dirty cards in the modUnionTable.
4610    // Even if this is at a synchronous collection, the initial marking
4611    // may have been done during an asynchronous collection so there
4612    // may be dirty bits in the mod-union table.
4613    _collector->_modUnionTable.dirty_range_iterate_clear(
4614                  this_span, &greyRescanClosure);
4615    _collector->_modUnionTable.verifyNoOneBitsInRange(
4616                                 this_span.start(),
4617                                 this_span.end());
4618  }
4619  pst->all_tasks_completed();  // declare that i am done
4620}
4621
4622// . see if we can share work_queues with ParNew? XXX
4623void
4624CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4625                                int* seed) {
4626  OopTaskQueue* work_q = work_queue(i);
4627  NOT_PRODUCT(int num_steals = 0;)
4628  oop obj_to_scan;
4629  CMSBitMap* bm = &(_collector->_markBitMap);
4630
4631  while (true) {
4632    // Completely finish any left over work from (an) earlier round(s)
4633    cl->trim_queue(0);
4634    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4635                                         (size_t)ParGCDesiredObjsFromOverflowList);
4636    // Now check if there's any work in the overflow list
4637    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4638    // only affects the number of attempts made to get work from the
4639    // overflow list and does not affect the number of workers.  Just
4640    // pass ParallelGCThreads so this behavior is unchanged.
4641    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4642                                                work_q,
4643                                                ParallelGCThreads)) {
4644      // found something in global overflow list;
4645      // not yet ready to go stealing work from others.
4646      // We'd like to assert(work_q->size() != 0, ...)
4647      // because we just took work from the overflow list,
4648      // but of course we can't since all of that could have
4649      // been already stolen from us.
4650      // "He giveth and He taketh away."
4651      continue;
4652    }
4653    // Verify that we have no work before we resort to stealing
4654    assert(work_q->size() == 0, "Have work, shouldn't steal");
4655    // Try to steal from other queues that have work
4656    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4657      NOT_PRODUCT(num_steals++;)
4658      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4659      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4660      // Do scanning work
4661      obj_to_scan->oop_iterate(cl);
4662      // Loop around, finish this work, and try to steal some more
4663    } else if (terminator()->offer_termination()) {
4664        break;  // nirvana from the infinite cycle
4665    }
4666  }
4667  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4668  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4669         "Else our work is not yet done");
4670}
4671
4672// Record object boundaries in _eden_chunk_array by sampling the eden
4673// top in the slow-path eden object allocation code path and record
4674// the boundaries, if CMSEdenChunksRecordAlways is true. If
4675// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4676// sampling in sample_eden() that activates during the part of the
4677// preclean phase.
4678void CMSCollector::sample_eden_chunk() {
4679  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4680    if (_eden_chunk_lock->try_lock()) {
4681      // Record a sample. This is the critical section. The contents
4682      // of the _eden_chunk_array have to be non-decreasing in the
4683      // address order.
4684      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4685      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4686             "Unexpected state of Eden");
4687      if (_eden_chunk_index == 0 ||
4688          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4689           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4690                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4691        _eden_chunk_index++;  // commit sample
4692      }
4693      _eden_chunk_lock->unlock();
4694    }
4695  }
4696}
4697
4698// Return a thread-local PLAB recording array, as appropriate.
4699void* CMSCollector::get_data_recorder(int thr_num) {
4700  if (_survivor_plab_array != NULL &&
4701      (CMSPLABRecordAlways ||
4702       (_collectorState > Marking && _collectorState < FinalMarking))) {
4703    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4704    ChunkArray* ca = &_survivor_plab_array[thr_num];
4705    ca->reset();   // clear it so that fresh data is recorded
4706    return (void*) ca;
4707  } else {
4708    return NULL;
4709  }
4710}
4711
4712// Reset all the thread-local PLAB recording arrays
4713void CMSCollector::reset_survivor_plab_arrays() {
4714  for (uint i = 0; i < ParallelGCThreads; i++) {
4715    _survivor_plab_array[i].reset();
4716  }
4717}
4718
4719// Merge the per-thread plab arrays into the global survivor chunk
4720// array which will provide the partitioning of the survivor space
4721// for CMS initial scan and rescan.
4722void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4723                                              int no_of_gc_threads) {
4724  assert(_survivor_plab_array  != NULL, "Error");
4725  assert(_survivor_chunk_array != NULL, "Error");
4726  assert(_collectorState == FinalMarking ||
4727         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4728  for (int j = 0; j < no_of_gc_threads; j++) {
4729    _cursor[j] = 0;
4730  }
4731  HeapWord* top = surv->top();
4732  size_t i;
4733  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4734    HeapWord* min_val = top;          // Higher than any PLAB address
4735    uint      min_tid = 0;            // position of min_val this round
4736    for (int j = 0; j < no_of_gc_threads; j++) {
4737      ChunkArray* cur_sca = &_survivor_plab_array[j];
4738      if (_cursor[j] == cur_sca->end()) {
4739        continue;
4740      }
4741      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4742      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4743      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4744      if (cur_val < min_val) {
4745        min_tid = j;
4746        min_val = cur_val;
4747      } else {
4748        assert(cur_val < top, "All recorded addresses should be less");
4749      }
4750    }
4751    // At this point min_val and min_tid are respectively
4752    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4753    // and the thread (j) that witnesses that address.
4754    // We record this address in the _survivor_chunk_array[i]
4755    // and increment _cursor[min_tid] prior to the next round i.
4756    if (min_val == top) {
4757      break;
4758    }
4759    _survivor_chunk_array[i] = min_val;
4760    _cursor[min_tid]++;
4761  }
4762  // We are all done; record the size of the _survivor_chunk_array
4763  _survivor_chunk_index = i; // exclusive: [0, i)
4764  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4765  // Verify that we used up all the recorded entries
4766  #ifdef ASSERT
4767    size_t total = 0;
4768    for (int j = 0; j < no_of_gc_threads; j++) {
4769      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4770      total += _cursor[j];
4771    }
4772    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4773    // Check that the merged array is in sorted order
4774    if (total > 0) {
4775      for (size_t i = 0; i < total - 1; i++) {
4776        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4777                                     i, p2i(_survivor_chunk_array[i]));
4778        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4779               "Not sorted");
4780      }
4781    }
4782  #endif // ASSERT
4783}
4784
4785// Set up the space's par_seq_tasks structure for work claiming
4786// for parallel initial scan and rescan of young gen.
4787// See ParRescanTask where this is currently used.
4788void
4789CMSCollector::
4790initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4791  assert(n_threads > 0, "Unexpected n_threads argument");
4792
4793  // Eden space
4794  if (!_young_gen->eden()->is_empty()) {
4795    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4796    assert(!pst->valid(), "Clobbering existing data?");
4797    // Each valid entry in [0, _eden_chunk_index) represents a task.
4798    size_t n_tasks = _eden_chunk_index + 1;
4799    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4800    // Sets the condition for completion of the subtask (how many threads
4801    // need to finish in order to be done).
4802    pst->set_n_threads(n_threads);
4803    pst->set_n_tasks((int)n_tasks);
4804  }
4805
4806  // Merge the survivor plab arrays into _survivor_chunk_array
4807  if (_survivor_plab_array != NULL) {
4808    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4809  } else {
4810    assert(_survivor_chunk_index == 0, "Error");
4811  }
4812
4813  // To space
4814  {
4815    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4816    assert(!pst->valid(), "Clobbering existing data?");
4817    // Sets the condition for completion of the subtask (how many threads
4818    // need to finish in order to be done).
4819    pst->set_n_threads(n_threads);
4820    pst->set_n_tasks(1);
4821    assert(pst->valid(), "Error");
4822  }
4823
4824  // From space
4825  {
4826    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4827    assert(!pst->valid(), "Clobbering existing data?");
4828    size_t n_tasks = _survivor_chunk_index + 1;
4829    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4830    // Sets the condition for completion of the subtask (how many threads
4831    // need to finish in order to be done).
4832    pst->set_n_threads(n_threads);
4833    pst->set_n_tasks((int)n_tasks);
4834    assert(pst->valid(), "Error");
4835  }
4836}
4837
4838// Parallel version of remark
4839void CMSCollector::do_remark_parallel() {
4840  GenCollectedHeap* gch = GenCollectedHeap::heap();
4841  WorkGang* workers = gch->workers();
4842  assert(workers != NULL, "Need parallel worker threads.");
4843  // Choose to use the number of GC workers most recently set
4844  // into "active_workers".
4845  uint n_workers = workers->active_workers();
4846
4847  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4848
4849  StrongRootsScope srs(n_workers);
4850
4851  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4852
4853  // We won't be iterating over the cards in the card table updating
4854  // the younger_gen cards, so we shouldn't call the following else
4855  // the verification code as well as subsequent younger_refs_iterate
4856  // code would get confused. XXX
4857  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4858
4859  // The young gen rescan work will not be done as part of
4860  // process_roots (which currently doesn't know how to
4861  // parallelize such a scan), but rather will be broken up into
4862  // a set of parallel tasks (via the sampling that the [abortable]
4863  // preclean phase did of eden, plus the [two] tasks of
4864  // scanning the [two] survivor spaces. Further fine-grain
4865  // parallelization of the scanning of the survivor spaces
4866  // themselves, and of precleaning of the young gen itself
4867  // is deferred to the future.
4868  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4869
4870  // The dirty card rescan work is broken up into a "sequence"
4871  // of parallel tasks (per constituent space) that are dynamically
4872  // claimed by the parallel threads.
4873  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4874
4875  // It turns out that even when we're using 1 thread, doing the work in a
4876  // separate thread causes wide variance in run times.  We can't help this
4877  // in the multi-threaded case, but we special-case n=1 here to get
4878  // repeatable measurements of the 1-thread overhead of the parallel code.
4879  if (n_workers > 1) {
4880    // Make refs discovery MT-safe, if it isn't already: it may not
4881    // necessarily be so, since it's possible that we are doing
4882    // ST marking.
4883    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4884    workers->run_task(&tsk);
4885  } else {
4886    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4887    tsk.work(0);
4888  }
4889
4890  // restore, single-threaded for now, any preserved marks
4891  // as a result of work_q overflow
4892  restore_preserved_marks_if_any();
4893}
4894
4895// Non-parallel version of remark
4896void CMSCollector::do_remark_non_parallel() {
4897  ResourceMark rm;
4898  HandleMark   hm;
4899  GenCollectedHeap* gch = GenCollectedHeap::heap();
4900  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4901
4902  MarkRefsIntoAndScanClosure
4903    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4904             &_markStack, this,
4905             false /* should_yield */, false /* not precleaning */);
4906  MarkFromDirtyCardsClosure
4907    markFromDirtyCardsClosure(this, _span,
4908                              NULL,  // space is set further below
4909                              &_markBitMap, &_markStack, &mrias_cl);
4910  {
4911    GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4912    // Iterate over the dirty cards, setting the corresponding bits in the
4913    // mod union table.
4914    {
4915      ModUnionClosure modUnionClosure(&_modUnionTable);
4916      _ct->ct_bs()->dirty_card_iterate(
4917                      _cmsGen->used_region(),
4918                      &modUnionClosure);
4919    }
4920    // Having transferred these marks into the modUnionTable, we just need
4921    // to rescan the marked objects on the dirty cards in the modUnionTable.
4922    // The initial marking may have been done during an asynchronous
4923    // collection so there may be dirty bits in the mod-union table.
4924    const int alignment =
4925      CardTableModRefBS::card_size * BitsPerWord;
4926    {
4927      // ... First handle dirty cards in CMS gen
4928      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4929      MemRegion ur = _cmsGen->used_region();
4930      HeapWord* lb = ur.start();
4931      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4932      MemRegion cms_span(lb, ub);
4933      _modUnionTable.dirty_range_iterate_clear(cms_span,
4934                                               &markFromDirtyCardsClosure);
4935      verify_work_stacks_empty();
4936      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4937    }
4938  }
4939  if (VerifyDuringGC &&
4940      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4941    HandleMark hm;  // Discard invalid handles created during verification
4942    Universe::verify();
4943  }
4944  {
4945    GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
4946
4947    verify_work_stacks_empty();
4948
4949    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4950    StrongRootsScope srs(1);
4951
4952    gch->cms_process_roots(&srs,
4953                           true,  // young gen as roots
4954                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
4955                           should_unload_classes(),
4956                           &mrias_cl,
4957                           NULL); // The dirty klasses will be handled below
4958
4959    assert(should_unload_classes()
4960           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4961           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4962  }
4963
4964  {
4965    GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4966
4967    verify_work_stacks_empty();
4968
4969    // Scan all class loader data objects that might have been introduced
4970    // during concurrent marking.
4971    ResourceMark rm;
4972    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4973    for (int i = 0; i < array->length(); i++) {
4974      mrias_cl.do_cld_nv(array->at(i));
4975    }
4976
4977    // We don't need to keep track of new CLDs anymore.
4978    ClassLoaderDataGraph::remember_new_clds(false);
4979
4980    verify_work_stacks_empty();
4981  }
4982
4983  {
4984    GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
4985
4986    verify_work_stacks_empty();
4987
4988    RemarkKlassClosure remark_klass_closure(&mrias_cl);
4989    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4990
4991    verify_work_stacks_empty();
4992  }
4993
4994  // We might have added oops to ClassLoaderData::_handles during the
4995  // concurrent marking phase. These oops point to newly allocated objects
4996  // that are guaranteed to be kept alive. Either by the direct allocation
4997  // code, or when the young collector processes the roots. Hence,
4998  // we don't have to revisit the _handles block during the remark phase.
4999
5000  verify_work_stacks_empty();
5001  // Restore evacuated mark words, if any, used for overflow list links
5002  restore_preserved_marks_if_any();
5003
5004  verify_overflow_empty();
5005}
5006
5007////////////////////////////////////////////////////////
5008// Parallel Reference Processing Task Proxy Class
5009////////////////////////////////////////////////////////
5010class AbstractGangTaskWOopQueues : public AbstractGangTask {
5011  OopTaskQueueSet*       _queues;
5012  ParallelTaskTerminator _terminator;
5013 public:
5014  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5015    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5016  ParallelTaskTerminator* terminator() { return &_terminator; }
5017  OopTaskQueueSet* queues() { return _queues; }
5018};
5019
5020class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5021  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5022  CMSCollector*          _collector;
5023  CMSBitMap*             _mark_bit_map;
5024  const MemRegion        _span;
5025  ProcessTask&           _task;
5026
5027public:
5028  CMSRefProcTaskProxy(ProcessTask&     task,
5029                      CMSCollector*    collector,
5030                      const MemRegion& span,
5031                      CMSBitMap*       mark_bit_map,
5032                      AbstractWorkGang* workers,
5033                      OopTaskQueueSet* task_queues):
5034    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5035      task_queues,
5036      workers->active_workers()),
5037    _task(task),
5038    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5039  {
5040    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5041           "Inconsistency in _span");
5042  }
5043
5044  OopTaskQueueSet* task_queues() { return queues(); }
5045
5046  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5047
5048  void do_work_steal(int i,
5049                     CMSParDrainMarkingStackClosure* drain,
5050                     CMSParKeepAliveClosure* keep_alive,
5051                     int* seed);
5052
5053  virtual void work(uint worker_id);
5054};
5055
5056void CMSRefProcTaskProxy::work(uint worker_id) {
5057  ResourceMark rm;
5058  HandleMark hm;
5059  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5060  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5061                                        _mark_bit_map,
5062                                        work_queue(worker_id));
5063  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5064                                                 _mark_bit_map,
5065                                                 work_queue(worker_id));
5066  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5067  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5068  if (_task.marks_oops_alive()) {
5069    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5070                  _collector->hash_seed(worker_id));
5071  }
5072  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5073  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5074}
5075
5076class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5077  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5078  EnqueueTask& _task;
5079
5080public:
5081  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5082    : AbstractGangTask("Enqueue reference objects in parallel"),
5083      _task(task)
5084  { }
5085
5086  virtual void work(uint worker_id)
5087  {
5088    _task.work(worker_id);
5089  }
5090};
5091
5092CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5093  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5094   _span(span),
5095   _bit_map(bit_map),
5096   _work_queue(work_queue),
5097   _mark_and_push(collector, span, bit_map, work_queue),
5098   _low_water_mark(MIN2((work_queue->max_elems()/4),
5099                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5100{ }
5101
5102// . see if we can share work_queues with ParNew? XXX
5103void CMSRefProcTaskProxy::do_work_steal(int i,
5104  CMSParDrainMarkingStackClosure* drain,
5105  CMSParKeepAliveClosure* keep_alive,
5106  int* seed) {
5107  OopTaskQueue* work_q = work_queue(i);
5108  NOT_PRODUCT(int num_steals = 0;)
5109  oop obj_to_scan;
5110
5111  while (true) {
5112    // Completely finish any left over work from (an) earlier round(s)
5113    drain->trim_queue(0);
5114    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5115                                         (size_t)ParGCDesiredObjsFromOverflowList);
5116    // Now check if there's any work in the overflow list
5117    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5118    // only affects the number of attempts made to get work from the
5119    // overflow list and does not affect the number of workers.  Just
5120    // pass ParallelGCThreads so this behavior is unchanged.
5121    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5122                                                work_q,
5123                                                ParallelGCThreads)) {
5124      // Found something in global overflow list;
5125      // not yet ready to go stealing work from others.
5126      // We'd like to assert(work_q->size() != 0, ...)
5127      // because we just took work from the overflow list,
5128      // but of course we can't, since all of that might have
5129      // been already stolen from us.
5130      continue;
5131    }
5132    // Verify that we have no work before we resort to stealing
5133    assert(work_q->size() == 0, "Have work, shouldn't steal");
5134    // Try to steal from other queues that have work
5135    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5136      NOT_PRODUCT(num_steals++;)
5137      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5138      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5139      // Do scanning work
5140      obj_to_scan->oop_iterate(keep_alive);
5141      // Loop around, finish this work, and try to steal some more
5142    } else if (terminator()->offer_termination()) {
5143      break;  // nirvana from the infinite cycle
5144    }
5145  }
5146  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5147}
5148
5149void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5150{
5151  GenCollectedHeap* gch = GenCollectedHeap::heap();
5152  WorkGang* workers = gch->workers();
5153  assert(workers != NULL, "Need parallel worker threads.");
5154  CMSRefProcTaskProxy rp_task(task, &_collector,
5155                              _collector.ref_processor()->span(),
5156                              _collector.markBitMap(),
5157                              workers, _collector.task_queues());
5158  workers->run_task(&rp_task);
5159}
5160
5161void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5162{
5163
5164  GenCollectedHeap* gch = GenCollectedHeap::heap();
5165  WorkGang* workers = gch->workers();
5166  assert(workers != NULL, "Need parallel worker threads.");
5167  CMSRefEnqueueTaskProxy enq_task(task);
5168  workers->run_task(&enq_task);
5169}
5170
5171void CMSCollector::refProcessingWork() {
5172  ResourceMark rm;
5173  HandleMark   hm;
5174
5175  ReferenceProcessor* rp = ref_processor();
5176  assert(rp->span().equals(_span), "Spans should be equal");
5177  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5178  // Process weak references.
5179  rp->setup_policy(false);
5180  verify_work_stacks_empty();
5181
5182  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5183                                          &_markStack, false /* !preclean */);
5184  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5185                                _span, &_markBitMap, &_markStack,
5186                                &cmsKeepAliveClosure, false /* !preclean */);
5187  {
5188    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5189
5190    ReferenceProcessorStats stats;
5191    if (rp->processing_is_mt()) {
5192      // Set the degree of MT here.  If the discovery is done MT, there
5193      // may have been a different number of threads doing the discovery
5194      // and a different number of discovered lists may have Ref objects.
5195      // That is OK as long as the Reference lists are balanced (see
5196      // balance_all_queues() and balance_queues()).
5197      GenCollectedHeap* gch = GenCollectedHeap::heap();
5198      uint active_workers = ParallelGCThreads;
5199      WorkGang* workers = gch->workers();
5200      if (workers != NULL) {
5201        active_workers = workers->active_workers();
5202        // The expectation is that active_workers will have already
5203        // been set to a reasonable value.  If it has not been set,
5204        // investigate.
5205        assert(active_workers > 0, "Should have been set during scavenge");
5206      }
5207      rp->set_active_mt_degree(active_workers);
5208      CMSRefProcTaskExecutor task_executor(*this);
5209      stats = rp->process_discovered_references(&_is_alive_closure,
5210                                        &cmsKeepAliveClosure,
5211                                        &cmsDrainMarkingStackClosure,
5212                                        &task_executor,
5213                                        _gc_timer_cm);
5214    } else {
5215      stats = rp->process_discovered_references(&_is_alive_closure,
5216                                        &cmsKeepAliveClosure,
5217                                        &cmsDrainMarkingStackClosure,
5218                                        NULL,
5219                                        _gc_timer_cm);
5220    }
5221    _gc_tracer_cm->report_gc_reference_stats(stats);
5222
5223  }
5224
5225  // This is the point where the entire marking should have completed.
5226  verify_work_stacks_empty();
5227
5228  if (should_unload_classes()) {
5229    {
5230      GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5231
5232      // Unload classes and purge the SystemDictionary.
5233      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure, _gc_timer_cm);
5234
5235      // Unload nmethods.
5236      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5237
5238      // Prune dead klasses from subklass/sibling/implementor lists.
5239      Klass::clean_weak_klass_links(&_is_alive_closure);
5240    }
5241
5242    {
5243      GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
5244      // Clean up unreferenced symbols in symbol table.
5245      SymbolTable::unlink();
5246    }
5247
5248    {
5249      GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
5250      // Delete entries for dead interned strings.
5251      StringTable::unlink(&_is_alive_closure);
5252    }
5253  }
5254
5255  // Restore any preserved marks as a result of mark stack or
5256  // work queue overflow
5257  restore_preserved_marks_if_any();  // done single-threaded for now
5258
5259  rp->set_enqueuing_is_done(true);
5260  if (rp->processing_is_mt()) {
5261    rp->balance_all_queues();
5262    CMSRefProcTaskExecutor task_executor(*this);
5263    rp->enqueue_discovered_references(&task_executor);
5264  } else {
5265    rp->enqueue_discovered_references(NULL);
5266  }
5267  rp->verify_no_references_recorded();
5268  assert(!rp->discovery_enabled(), "should have been disabled");
5269}
5270
5271#ifndef PRODUCT
5272void CMSCollector::check_correct_thread_executing() {
5273  Thread* t = Thread::current();
5274  // Only the VM thread or the CMS thread should be here.
5275  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5276         "Unexpected thread type");
5277  // If this is the vm thread, the foreground process
5278  // should not be waiting.  Note that _foregroundGCIsActive is
5279  // true while the foreground collector is waiting.
5280  if (_foregroundGCShouldWait) {
5281    // We cannot be the VM thread
5282    assert(t->is_ConcurrentGC_thread(),
5283           "Should be CMS thread");
5284  } else {
5285    // We can be the CMS thread only if we are in a stop-world
5286    // phase of CMS collection.
5287    if (t->is_ConcurrentGC_thread()) {
5288      assert(_collectorState == InitialMarking ||
5289             _collectorState == FinalMarking,
5290             "Should be a stop-world phase");
5291      // The CMS thread should be holding the CMS_token.
5292      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5293             "Potential interference with concurrently "
5294             "executing VM thread");
5295    }
5296  }
5297}
5298#endif
5299
5300void CMSCollector::sweep() {
5301  assert(_collectorState == Sweeping, "just checking");
5302  check_correct_thread_executing();
5303  verify_work_stacks_empty();
5304  verify_overflow_empty();
5305  increment_sweep_count();
5306  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5307
5308  _inter_sweep_timer.stop();
5309  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5310
5311  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5312  _intra_sweep_timer.reset();
5313  _intra_sweep_timer.start();
5314  {
5315    GCTraceCPUTime tcpu;
5316    CMSPhaseAccounting pa(this, "Concurrent Sweep");
5317    // First sweep the old gen
5318    {
5319      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5320                               bitMapLock());
5321      sweepWork(_cmsGen);
5322    }
5323
5324    // Update Universe::_heap_*_at_gc figures.
5325    // We need all the free list locks to make the abstract state
5326    // transition from Sweeping to Resetting. See detailed note
5327    // further below.
5328    {
5329      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5330      // Update heap occupancy information which is used as
5331      // input to soft ref clearing policy at the next gc.
5332      Universe::update_heap_info_at_gc();
5333      _collectorState = Resizing;
5334    }
5335  }
5336  verify_work_stacks_empty();
5337  verify_overflow_empty();
5338
5339  if (should_unload_classes()) {
5340    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5341    // requires that the virtual spaces are stable and not deleted.
5342    ClassLoaderDataGraph::set_should_purge(true);
5343  }
5344
5345  _intra_sweep_timer.stop();
5346  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5347
5348  _inter_sweep_timer.reset();
5349  _inter_sweep_timer.start();
5350
5351  // We need to use a monotonically non-decreasing time in ms
5352  // or we will see time-warp warnings and os::javaTimeMillis()
5353  // does not guarantee monotonicity.
5354  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5355  update_time_of_last_gc(now);
5356
5357  // NOTE on abstract state transitions:
5358  // Mutators allocate-live and/or mark the mod-union table dirty
5359  // based on the state of the collection.  The former is done in
5360  // the interval [Marking, Sweeping] and the latter in the interval
5361  // [Marking, Sweeping).  Thus the transitions into the Marking state
5362  // and out of the Sweeping state must be synchronously visible
5363  // globally to the mutators.
5364  // The transition into the Marking state happens with the world
5365  // stopped so the mutators will globally see it.  Sweeping is
5366  // done asynchronously by the background collector so the transition
5367  // from the Sweeping state to the Resizing state must be done
5368  // under the freelistLock (as is the check for whether to
5369  // allocate-live and whether to dirty the mod-union table).
5370  assert(_collectorState == Resizing, "Change of collector state to"
5371    " Resizing must be done under the freelistLocks (plural)");
5372
5373  // Now that sweeping has been completed, we clear
5374  // the incremental_collection_failed flag,
5375  // thus inviting a younger gen collection to promote into
5376  // this generation. If such a promotion may still fail,
5377  // the flag will be set again when a young collection is
5378  // attempted.
5379  GenCollectedHeap* gch = GenCollectedHeap::heap();
5380  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5381  gch->update_full_collections_completed(_collection_count_start);
5382}
5383
5384// FIX ME!!! Looks like this belongs in CFLSpace, with
5385// CMSGen merely delegating to it.
5386void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5387  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5388  HeapWord*  minAddr        = _cmsSpace->bottom();
5389  HeapWord*  largestAddr    =
5390    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5391  if (largestAddr == NULL) {
5392    // The dictionary appears to be empty.  In this case
5393    // try to coalesce at the end of the heap.
5394    largestAddr = _cmsSpace->end();
5395  }
5396  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5397  size_t nearLargestOffset =
5398    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5399  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5400                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5401  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5402}
5403
5404bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5405  return addr >= _cmsSpace->nearLargestChunk();
5406}
5407
5408FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5409  return _cmsSpace->find_chunk_at_end();
5410}
5411
5412void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5413                                                    bool full) {
5414  // If the young generation has been collected, gather any statistics
5415  // that are of interest at this point.
5416  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5417  if (!full && current_is_young) {
5418    // Gather statistics on the young generation collection.
5419    collector()->stats().record_gc0_end(used());
5420  }
5421}
5422
5423void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5424  // We iterate over the space(s) underlying this generation,
5425  // checking the mark bit map to see if the bits corresponding
5426  // to specific blocks are marked or not. Blocks that are
5427  // marked are live and are not swept up. All remaining blocks
5428  // are swept up, with coalescing on-the-fly as we sweep up
5429  // contiguous free and/or garbage blocks:
5430  // We need to ensure that the sweeper synchronizes with allocators
5431  // and stop-the-world collectors. In particular, the following
5432  // locks are used:
5433  // . CMS token: if this is held, a stop the world collection cannot occur
5434  // . freelistLock: if this is held no allocation can occur from this
5435  //                 generation by another thread
5436  // . bitMapLock: if this is held, no other thread can access or update
5437  //
5438
5439  // Note that we need to hold the freelistLock if we use
5440  // block iterate below; else the iterator might go awry if
5441  // a mutator (or promotion) causes block contents to change
5442  // (for instance if the allocator divvies up a block).
5443  // If we hold the free list lock, for all practical purposes
5444  // young generation GC's can't occur (they'll usually need to
5445  // promote), so we might as well prevent all young generation
5446  // GC's while we do a sweeping step. For the same reason, we might
5447  // as well take the bit map lock for the entire duration
5448
5449  // check that we hold the requisite locks
5450  assert(have_cms_token(), "Should hold cms token");
5451  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5452  assert_lock_strong(old_gen->freelistLock());
5453  assert_lock_strong(bitMapLock());
5454
5455  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5456  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5457  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5458                                          _inter_sweep_estimate.padded_average(),
5459                                          _intra_sweep_estimate.padded_average());
5460  old_gen->setNearLargestChunk();
5461
5462  {
5463    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5464    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5465    // We need to free-up/coalesce garbage/blocks from a
5466    // co-terminal free run. This is done in the SweepClosure
5467    // destructor; so, do not remove this scope, else the
5468    // end-of-sweep-census below will be off by a little bit.
5469  }
5470  old_gen->cmsSpace()->sweep_completed();
5471  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5472  if (should_unload_classes()) {                // unloaded classes this cycle,
5473    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5474  } else {                                      // did not unload classes,
5475    _concurrent_cycles_since_last_unload++;     // ... increment count
5476  }
5477}
5478
5479// Reset CMS data structures (for now just the marking bit map)
5480// preparatory for the next cycle.
5481void CMSCollector::reset_concurrent() {
5482  CMSTokenSyncWithLocks ts(true, bitMapLock());
5483
5484  // If the state is not "Resetting", the foreground  thread
5485  // has done a collection and the resetting.
5486  if (_collectorState != Resetting) {
5487    assert(_collectorState == Idling, "The state should only change"
5488      " because the foreground collector has finished the collection");
5489    return;
5490  }
5491
5492  {
5493    // Clear the mark bitmap (no grey objects to start with)
5494    // for the next cycle.
5495    GCTraceCPUTime tcpu;
5496    CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5497
5498    HeapWord* curAddr = _markBitMap.startWord();
5499    while (curAddr < _markBitMap.endWord()) {
5500      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5501      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5502      _markBitMap.clear_large_range(chunk);
5503      if (ConcurrentMarkSweepThread::should_yield() &&
5504          !foregroundGCIsActive() &&
5505          CMSYield) {
5506        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5507               "CMS thread should hold CMS token");
5508        assert_lock_strong(bitMapLock());
5509        bitMapLock()->unlock();
5510        ConcurrentMarkSweepThread::desynchronize(true);
5511        stopTimer();
5512        incrementYields();
5513
5514        // See the comment in coordinator_yield()
5515        for (unsigned i = 0; i < CMSYieldSleepCount &&
5516                         ConcurrentMarkSweepThread::should_yield() &&
5517                         !CMSCollector::foregroundGCIsActive(); ++i) {
5518          os::sleep(Thread::current(), 1, false);
5519        }
5520
5521        ConcurrentMarkSweepThread::synchronize(true);
5522        bitMapLock()->lock_without_safepoint_check();
5523        startTimer();
5524      }
5525      curAddr = chunk.end();
5526    }
5527    // A successful mostly concurrent collection has been done.
5528    // Because only the full (i.e., concurrent mode failure) collections
5529    // are being measured for gc overhead limits, clean the "near" flag
5530    // and count.
5531    size_policy()->reset_gc_overhead_limit_count();
5532    _collectorState = Idling;
5533  }
5534
5535  register_gc_end();
5536}
5537
5538// Same as above but for STW paths
5539void CMSCollector::reset_stw() {
5540  // already have the lock
5541  assert(_collectorState == Resetting, "just checking");
5542  assert_lock_strong(bitMapLock());
5543  GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5544  _markBitMap.clear_all();
5545  _collectorState = Idling;
5546  register_gc_end();
5547}
5548
5549void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5550  GCTraceCPUTime tcpu;
5551  TraceCollectorStats tcs(counters());
5552
5553  switch (op) {
5554    case CMS_op_checkpointRootsInitial: {
5555      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5556      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5557      checkpointRootsInitial();
5558      break;
5559    }
5560    case CMS_op_checkpointRootsFinal: {
5561      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5562      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5563      checkpointRootsFinal();
5564      break;
5565    }
5566    default:
5567      fatal("No such CMS_op");
5568  }
5569}
5570
5571#ifndef PRODUCT
5572size_t const CMSCollector::skip_header_HeapWords() {
5573  return FreeChunk::header_size();
5574}
5575
5576// Try and collect here conditions that should hold when
5577// CMS thread is exiting. The idea is that the foreground GC
5578// thread should not be blocked if it wants to terminate
5579// the CMS thread and yet continue to run the VM for a while
5580// after that.
5581void CMSCollector::verify_ok_to_terminate() const {
5582  assert(Thread::current()->is_ConcurrentGC_thread(),
5583         "should be called by CMS thread");
5584  assert(!_foregroundGCShouldWait, "should be false");
5585  // We could check here that all the various low-level locks
5586  // are not held by the CMS thread, but that is overkill; see
5587  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5588  // is checked.
5589}
5590#endif
5591
5592size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5593   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5594          "missing Printezis mark?");
5595  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5596  size_t size = pointer_delta(nextOneAddr + 1, addr);
5597  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5598         "alignment problem");
5599  assert(size >= 3, "Necessary for Printezis marks to work");
5600  return size;
5601}
5602
5603// A variant of the above (block_size_using_printezis_bits()) except
5604// that we return 0 if the P-bits are not yet set.
5605size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5606  if (_markBitMap.isMarked(addr + 1)) {
5607    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5608    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5609    size_t size = pointer_delta(nextOneAddr + 1, addr);
5610    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5611           "alignment problem");
5612    assert(size >= 3, "Necessary for Printezis marks to work");
5613    return size;
5614  }
5615  return 0;
5616}
5617
5618HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5619  size_t sz = 0;
5620  oop p = (oop)addr;
5621  if (p->klass_or_null_acquire() != NULL) {
5622    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5623  } else {
5624    sz = block_size_using_printezis_bits(addr);
5625  }
5626  assert(sz > 0, "size must be nonzero");
5627  HeapWord* next_block = addr + sz;
5628  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5629                                             CardTableModRefBS::card_size);
5630  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5631         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5632         "must be different cards");
5633  return next_card;
5634}
5635
5636
5637// CMS Bit Map Wrapper /////////////////////////////////////////
5638
5639// Construct a CMS bit map infrastructure, but don't create the
5640// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5641// further below.
5642CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5643  _bm(),
5644  _shifter(shifter),
5645  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5646                                    Monitor::_safepoint_check_sometimes) : NULL)
5647{
5648  _bmStartWord = 0;
5649  _bmWordSize  = 0;
5650}
5651
5652bool CMSBitMap::allocate(MemRegion mr) {
5653  _bmStartWord = mr.start();
5654  _bmWordSize  = mr.word_size();
5655  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5656                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5657  if (!brs.is_reserved()) {
5658    log_warning(gc)("CMS bit map allocation failure");
5659    return false;
5660  }
5661  // For now we'll just commit all of the bit map up front.
5662  // Later on we'll try to be more parsimonious with swap.
5663  if (!_virtual_space.initialize(brs, brs.size())) {
5664    log_warning(gc)("CMS bit map backing store failure");
5665    return false;
5666  }
5667  assert(_virtual_space.committed_size() == brs.size(),
5668         "didn't reserve backing store for all of CMS bit map?");
5669  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5670         _bmWordSize, "inconsistency in bit map sizing");
5671  _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5672
5673  // bm.clear(); // can we rely on getting zero'd memory? verify below
5674  assert(isAllClear(),
5675         "Expected zero'd memory from ReservedSpace constructor");
5676  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5677         "consistency check");
5678  return true;
5679}
5680
5681void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5682  HeapWord *next_addr, *end_addr, *last_addr;
5683  assert_locked();
5684  assert(covers(mr), "out-of-range error");
5685  // XXX assert that start and end are appropriately aligned
5686  for (next_addr = mr.start(), end_addr = mr.end();
5687       next_addr < end_addr; next_addr = last_addr) {
5688    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5689    last_addr = dirty_region.end();
5690    if (!dirty_region.is_empty()) {
5691      cl->do_MemRegion(dirty_region);
5692    } else {
5693      assert(last_addr == end_addr, "program logic");
5694      return;
5695    }
5696  }
5697}
5698
5699void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5700  _bm.print_on_error(st, prefix);
5701}
5702
5703#ifndef PRODUCT
5704void CMSBitMap::assert_locked() const {
5705  CMSLockVerifier::assert_locked(lock());
5706}
5707
5708bool CMSBitMap::covers(MemRegion mr) const {
5709  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5710  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5711         "size inconsistency");
5712  return (mr.start() >= _bmStartWord) &&
5713         (mr.end()   <= endWord());
5714}
5715
5716bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5717    return (start >= _bmStartWord && (start + size) <= endWord());
5718}
5719
5720void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5721  // verify that there are no 1 bits in the interval [left, right)
5722  FalseBitMapClosure falseBitMapClosure;
5723  iterate(&falseBitMapClosure, left, right);
5724}
5725
5726void CMSBitMap::region_invariant(MemRegion mr)
5727{
5728  assert_locked();
5729  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5730  assert(!mr.is_empty(), "unexpected empty region");
5731  assert(covers(mr), "mr should be covered by bit map");
5732  // convert address range into offset range
5733  size_t start_ofs = heapWordToOffset(mr.start());
5734  // Make sure that end() is appropriately aligned
5735  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5736                        (1 << (_shifter+LogHeapWordSize))),
5737         "Misaligned mr.end()");
5738  size_t end_ofs   = heapWordToOffset(mr.end());
5739  assert(end_ofs > start_ofs, "Should mark at least one bit");
5740}
5741
5742#endif
5743
5744bool CMSMarkStack::allocate(size_t size) {
5745  // allocate a stack of the requisite depth
5746  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5747                   size * sizeof(oop)));
5748  if (!rs.is_reserved()) {
5749    log_warning(gc)("CMSMarkStack allocation failure");
5750    return false;
5751  }
5752  if (!_virtual_space.initialize(rs, rs.size())) {
5753    log_warning(gc)("CMSMarkStack backing store failure");
5754    return false;
5755  }
5756  assert(_virtual_space.committed_size() == rs.size(),
5757         "didn't reserve backing store for all of CMS stack?");
5758  _base = (oop*)(_virtual_space.low());
5759  _index = 0;
5760  _capacity = size;
5761  NOT_PRODUCT(_max_depth = 0);
5762  return true;
5763}
5764
5765// XXX FIX ME !!! In the MT case we come in here holding a
5766// leaf lock. For printing we need to take a further lock
5767// which has lower rank. We need to recalibrate the two
5768// lock-ranks involved in order to be able to print the
5769// messages below. (Or defer the printing to the caller.
5770// For now we take the expedient path of just disabling the
5771// messages for the problematic case.)
5772void CMSMarkStack::expand() {
5773  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5774  if (_capacity == MarkStackSizeMax) {
5775    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5776      // We print a warning message only once per CMS cycle.
5777      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5778    }
5779    return;
5780  }
5781  // Double capacity if possible
5782  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5783  // Do not give up existing stack until we have managed to
5784  // get the double capacity that we desired.
5785  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5786                   new_capacity * sizeof(oop)));
5787  if (rs.is_reserved()) {
5788    // Release the backing store associated with old stack
5789    _virtual_space.release();
5790    // Reinitialize virtual space for new stack
5791    if (!_virtual_space.initialize(rs, rs.size())) {
5792      fatal("Not enough swap for expanded marking stack");
5793    }
5794    _base = (oop*)(_virtual_space.low());
5795    _index = 0;
5796    _capacity = new_capacity;
5797  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5798    // Failed to double capacity, continue;
5799    // we print a detail message only once per CMS cycle.
5800    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5801                        _capacity / K, new_capacity / K);
5802  }
5803}
5804
5805
5806// Closures
5807// XXX: there seems to be a lot of code  duplication here;
5808// should refactor and consolidate common code.
5809
5810// This closure is used to mark refs into the CMS generation in
5811// the CMS bit map. Called at the first checkpoint. This closure
5812// assumes that we do not need to re-mark dirty cards; if the CMS
5813// generation on which this is used is not an oldest
5814// generation then this will lose younger_gen cards!
5815
5816MarkRefsIntoClosure::MarkRefsIntoClosure(
5817  MemRegion span, CMSBitMap* bitMap):
5818    _span(span),
5819    _bitMap(bitMap)
5820{
5821  assert(ref_processor() == NULL, "deliberately left NULL");
5822  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5823}
5824
5825void MarkRefsIntoClosure::do_oop(oop obj) {
5826  // if p points into _span, then mark corresponding bit in _markBitMap
5827  assert(obj->is_oop(), "expected an oop");
5828  HeapWord* addr = (HeapWord*)obj;
5829  if (_span.contains(addr)) {
5830    // this should be made more efficient
5831    _bitMap->mark(addr);
5832  }
5833}
5834
5835void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5836void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5837
5838ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5839  MemRegion span, CMSBitMap* bitMap):
5840    _span(span),
5841    _bitMap(bitMap)
5842{
5843  assert(ref_processor() == NULL, "deliberately left NULL");
5844  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5845}
5846
5847void ParMarkRefsIntoClosure::do_oop(oop obj) {
5848  // if p points into _span, then mark corresponding bit in _markBitMap
5849  assert(obj->is_oop(), "expected an oop");
5850  HeapWord* addr = (HeapWord*)obj;
5851  if (_span.contains(addr)) {
5852    // this should be made more efficient
5853    _bitMap->par_mark(addr);
5854  }
5855}
5856
5857void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5858void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5859
5860// A variant of the above, used for CMS marking verification.
5861MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5862  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5863    _span(span),
5864    _verification_bm(verification_bm),
5865    _cms_bm(cms_bm)
5866{
5867  assert(ref_processor() == NULL, "deliberately left NULL");
5868  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5869}
5870
5871void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5872  // if p points into _span, then mark corresponding bit in _markBitMap
5873  assert(obj->is_oop(), "expected an oop");
5874  HeapWord* addr = (HeapWord*)obj;
5875  if (_span.contains(addr)) {
5876    _verification_bm->mark(addr);
5877    if (!_cms_bm->isMarked(addr)) {
5878      Log(gc, verify) log;
5879      ResourceMark rm;
5880      oop(addr)->print_on(log.error_stream());
5881      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5882      fatal("... aborting");
5883    }
5884  }
5885}
5886
5887void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5888void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5889
5890//////////////////////////////////////////////////
5891// MarkRefsIntoAndScanClosure
5892//////////////////////////////////////////////////
5893
5894MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5895                                                       ReferenceProcessor* rp,
5896                                                       CMSBitMap* bit_map,
5897                                                       CMSBitMap* mod_union_table,
5898                                                       CMSMarkStack*  mark_stack,
5899                                                       CMSCollector* collector,
5900                                                       bool should_yield,
5901                                                       bool concurrent_precleaning):
5902  _collector(collector),
5903  _span(span),
5904  _bit_map(bit_map),
5905  _mark_stack(mark_stack),
5906  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5907                      mark_stack, concurrent_precleaning),
5908  _yield(should_yield),
5909  _concurrent_precleaning(concurrent_precleaning),
5910  _freelistLock(NULL)
5911{
5912  // FIXME: Should initialize in base class constructor.
5913  assert(rp != NULL, "ref_processor shouldn't be NULL");
5914  set_ref_processor_internal(rp);
5915}
5916
5917// This closure is used to mark refs into the CMS generation at the
5918// second (final) checkpoint, and to scan and transitively follow
5919// the unmarked oops. It is also used during the concurrent precleaning
5920// phase while scanning objects on dirty cards in the CMS generation.
5921// The marks are made in the marking bit map and the marking stack is
5922// used for keeping the (newly) grey objects during the scan.
5923// The parallel version (Par_...) appears further below.
5924void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5925  if (obj != NULL) {
5926    assert(obj->is_oop(), "expected an oop");
5927    HeapWord* addr = (HeapWord*)obj;
5928    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5929    assert(_collector->overflow_list_is_empty(),
5930           "overflow list should be empty");
5931    if (_span.contains(addr) &&
5932        !_bit_map->isMarked(addr)) {
5933      // mark bit map (object is now grey)
5934      _bit_map->mark(addr);
5935      // push on marking stack (stack should be empty), and drain the
5936      // stack by applying this closure to the oops in the oops popped
5937      // from the stack (i.e. blacken the grey objects)
5938      bool res = _mark_stack->push(obj);
5939      assert(res, "Should have space to push on empty stack");
5940      do {
5941        oop new_oop = _mark_stack->pop();
5942        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5943        assert(_bit_map->isMarked((HeapWord*)new_oop),
5944               "only grey objects on this stack");
5945        // iterate over the oops in this oop, marking and pushing
5946        // the ones in CMS heap (i.e. in _span).
5947        new_oop->oop_iterate(&_pushAndMarkClosure);
5948        // check if it's time to yield
5949        do_yield_check();
5950      } while (!_mark_stack->isEmpty() ||
5951               (!_concurrent_precleaning && take_from_overflow_list()));
5952        // if marking stack is empty, and we are not doing this
5953        // during precleaning, then check the overflow list
5954    }
5955    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5956    assert(_collector->overflow_list_is_empty(),
5957           "overflow list was drained above");
5958
5959    assert(_collector->no_preserved_marks(),
5960           "All preserved marks should have been restored above");
5961  }
5962}
5963
5964void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5965void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5966
5967void MarkRefsIntoAndScanClosure::do_yield_work() {
5968  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5969         "CMS thread should hold CMS token");
5970  assert_lock_strong(_freelistLock);
5971  assert_lock_strong(_bit_map->lock());
5972  // relinquish the free_list_lock and bitMaplock()
5973  _bit_map->lock()->unlock();
5974  _freelistLock->unlock();
5975  ConcurrentMarkSweepThread::desynchronize(true);
5976  _collector->stopTimer();
5977  _collector->incrementYields();
5978
5979  // See the comment in coordinator_yield()
5980  for (unsigned i = 0;
5981       i < CMSYieldSleepCount &&
5982       ConcurrentMarkSweepThread::should_yield() &&
5983       !CMSCollector::foregroundGCIsActive();
5984       ++i) {
5985    os::sleep(Thread::current(), 1, false);
5986  }
5987
5988  ConcurrentMarkSweepThread::synchronize(true);
5989  _freelistLock->lock_without_safepoint_check();
5990  _bit_map->lock()->lock_without_safepoint_check();
5991  _collector->startTimer();
5992}
5993
5994///////////////////////////////////////////////////////////
5995// ParMarkRefsIntoAndScanClosure: a parallel version of
5996//                                MarkRefsIntoAndScanClosure
5997///////////////////////////////////////////////////////////
5998ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
5999  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6000  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6001  _span(span),
6002  _bit_map(bit_map),
6003  _work_queue(work_queue),
6004  _low_water_mark(MIN2((work_queue->max_elems()/4),
6005                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6006  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6007{
6008  // FIXME: Should initialize in base class constructor.
6009  assert(rp != NULL, "ref_processor shouldn't be NULL");
6010  set_ref_processor_internal(rp);
6011}
6012
6013// This closure is used to mark refs into the CMS generation at the
6014// second (final) checkpoint, and to scan and transitively follow
6015// the unmarked oops. The marks are made in the marking bit map and
6016// the work_queue is used for keeping the (newly) grey objects during
6017// the scan phase whence they are also available for stealing by parallel
6018// threads. Since the marking bit map is shared, updates are
6019// synchronized (via CAS).
6020void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6021  if (obj != NULL) {
6022    // Ignore mark word because this could be an already marked oop
6023    // that may be chained at the end of the overflow list.
6024    assert(obj->is_oop(true), "expected an oop");
6025    HeapWord* addr = (HeapWord*)obj;
6026    if (_span.contains(addr) &&
6027        !_bit_map->isMarked(addr)) {
6028      // mark bit map (object will become grey):
6029      // It is possible for several threads to be
6030      // trying to "claim" this object concurrently;
6031      // the unique thread that succeeds in marking the
6032      // object first will do the subsequent push on
6033      // to the work queue (or overflow list).
6034      if (_bit_map->par_mark(addr)) {
6035        // push on work_queue (which may not be empty), and trim the
6036        // queue to an appropriate length by applying this closure to
6037        // the oops in the oops popped from the stack (i.e. blacken the
6038        // grey objects)
6039        bool res = _work_queue->push(obj);
6040        assert(res, "Low water mark should be less than capacity?");
6041        trim_queue(_low_water_mark);
6042      } // Else, another thread claimed the object
6043    }
6044  }
6045}
6046
6047void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6048void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6049
6050// This closure is used to rescan the marked objects on the dirty cards
6051// in the mod union table and the card table proper.
6052size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6053  oop p, MemRegion mr) {
6054
6055  size_t size = 0;
6056  HeapWord* addr = (HeapWord*)p;
6057  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6058  assert(_span.contains(addr), "we are scanning the CMS generation");
6059  // check if it's time to yield
6060  if (do_yield_check()) {
6061    // We yielded for some foreground stop-world work,
6062    // and we have been asked to abort this ongoing preclean cycle.
6063    return 0;
6064  }
6065  if (_bitMap->isMarked(addr)) {
6066    // it's marked; is it potentially uninitialized?
6067    if (p->klass_or_null_acquire() != NULL) {
6068        // an initialized object; ignore mark word in verification below
6069        // since we are running concurrent with mutators
6070        assert(p->is_oop(true), "should be an oop");
6071        if (p->is_objArray()) {
6072          // objArrays are precisely marked; restrict scanning
6073          // to dirty cards only.
6074          size = CompactibleFreeListSpace::adjustObjectSize(
6075                   p->oop_iterate_size(_scanningClosure, mr));
6076        } else {
6077          // A non-array may have been imprecisely marked; we need
6078          // to scan object in its entirety.
6079          size = CompactibleFreeListSpace::adjustObjectSize(
6080                   p->oop_iterate_size(_scanningClosure));
6081        }
6082      #ifdef ASSERT
6083        size_t direct_size =
6084          CompactibleFreeListSpace::adjustObjectSize(p->size());
6085        assert(size == direct_size, "Inconsistency in size");
6086        assert(size >= 3, "Necessary for Printezis marks to work");
6087        HeapWord* start_pbit = addr + 1;
6088        HeapWord* end_pbit = addr + size - 1;
6089        assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6090               "inconsistent Printezis mark");
6091        // Verify inner mark bits (between Printezis bits) are clear,
6092        // but don't repeat if there are multiple dirty regions for
6093        // the same object, to avoid potential O(N^2) performance.
6094        if (addr != _last_scanned_object) {
6095          _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6096          _last_scanned_object = addr;
6097        }
6098      #endif // ASSERT
6099    } else {
6100      // An uninitialized object.
6101      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6102      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6103      size = pointer_delta(nextOneAddr + 1, addr);
6104      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6105             "alignment problem");
6106      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6107      // will dirty the card when the klass pointer is installed in the
6108      // object (signaling the completion of initialization).
6109    }
6110  } else {
6111    // Either a not yet marked object or an uninitialized object
6112    if (p->klass_or_null_acquire() == NULL) {
6113      // An uninitialized object, skip to the next card, since
6114      // we may not be able to read its P-bits yet.
6115      assert(size == 0, "Initial value");
6116    } else {
6117      // An object not (yet) reached by marking: we merely need to
6118      // compute its size so as to go look at the next block.
6119      assert(p->is_oop(true), "should be an oop");
6120      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6121    }
6122  }
6123  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6124  return size;
6125}
6126
6127void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6128  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6129         "CMS thread should hold CMS token");
6130  assert_lock_strong(_freelistLock);
6131  assert_lock_strong(_bitMap->lock());
6132  // relinquish the free_list_lock and bitMaplock()
6133  _bitMap->lock()->unlock();
6134  _freelistLock->unlock();
6135  ConcurrentMarkSweepThread::desynchronize(true);
6136  _collector->stopTimer();
6137  _collector->incrementYields();
6138
6139  // See the comment in coordinator_yield()
6140  for (unsigned i = 0; i < CMSYieldSleepCount &&
6141                   ConcurrentMarkSweepThread::should_yield() &&
6142                   !CMSCollector::foregroundGCIsActive(); ++i) {
6143    os::sleep(Thread::current(), 1, false);
6144  }
6145
6146  ConcurrentMarkSweepThread::synchronize(true);
6147  _freelistLock->lock_without_safepoint_check();
6148  _bitMap->lock()->lock_without_safepoint_check();
6149  _collector->startTimer();
6150}
6151
6152
6153//////////////////////////////////////////////////////////////////
6154// SurvivorSpacePrecleanClosure
6155//////////////////////////////////////////////////////////////////
6156// This (single-threaded) closure is used to preclean the oops in
6157// the survivor spaces.
6158size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6159
6160  HeapWord* addr = (HeapWord*)p;
6161  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6162  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6163  assert(p->klass_or_null() != NULL, "object should be initialized");
6164  // an initialized object; ignore mark word in verification below
6165  // since we are running concurrent with mutators
6166  assert(p->is_oop(true), "should be an oop");
6167  // Note that we do not yield while we iterate over
6168  // the interior oops of p, pushing the relevant ones
6169  // on our marking stack.
6170  size_t size = p->oop_iterate_size(_scanning_closure);
6171  do_yield_check();
6172  // Observe that below, we do not abandon the preclean
6173  // phase as soon as we should; rather we empty the
6174  // marking stack before returning. This is to satisfy
6175  // some existing assertions. In general, it may be a
6176  // good idea to abort immediately and complete the marking
6177  // from the grey objects at a later time.
6178  while (!_mark_stack->isEmpty()) {
6179    oop new_oop = _mark_stack->pop();
6180    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6181    assert(_bit_map->isMarked((HeapWord*)new_oop),
6182           "only grey objects on this stack");
6183    // iterate over the oops in this oop, marking and pushing
6184    // the ones in CMS heap (i.e. in _span).
6185    new_oop->oop_iterate(_scanning_closure);
6186    // check if it's time to yield
6187    do_yield_check();
6188  }
6189  unsigned int after_count =
6190    GenCollectedHeap::heap()->total_collections();
6191  bool abort = (_before_count != after_count) ||
6192               _collector->should_abort_preclean();
6193  return abort ? 0 : size;
6194}
6195
6196void SurvivorSpacePrecleanClosure::do_yield_work() {
6197  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6198         "CMS thread should hold CMS token");
6199  assert_lock_strong(_bit_map->lock());
6200  // Relinquish the bit map lock
6201  _bit_map->lock()->unlock();
6202  ConcurrentMarkSweepThread::desynchronize(true);
6203  _collector->stopTimer();
6204  _collector->incrementYields();
6205
6206  // See the comment in coordinator_yield()
6207  for (unsigned i = 0; i < CMSYieldSleepCount &&
6208                       ConcurrentMarkSweepThread::should_yield() &&
6209                       !CMSCollector::foregroundGCIsActive(); ++i) {
6210    os::sleep(Thread::current(), 1, false);
6211  }
6212
6213  ConcurrentMarkSweepThread::synchronize(true);
6214  _bit_map->lock()->lock_without_safepoint_check();
6215  _collector->startTimer();
6216}
6217
6218// This closure is used to rescan the marked objects on the dirty cards
6219// in the mod union table and the card table proper. In the parallel
6220// case, although the bitMap is shared, we do a single read so the
6221// isMarked() query is "safe".
6222bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6223  // Ignore mark word because we are running concurrent with mutators
6224  assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6225  HeapWord* addr = (HeapWord*)p;
6226  assert(_span.contains(addr), "we are scanning the CMS generation");
6227  bool is_obj_array = false;
6228  #ifdef ASSERT
6229    if (!_parallel) {
6230      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6231      assert(_collector->overflow_list_is_empty(),
6232             "overflow list should be empty");
6233
6234    }
6235  #endif // ASSERT
6236  if (_bit_map->isMarked(addr)) {
6237    // Obj arrays are precisely marked, non-arrays are not;
6238    // so we scan objArrays precisely and non-arrays in their
6239    // entirety.
6240    if (p->is_objArray()) {
6241      is_obj_array = true;
6242      if (_parallel) {
6243        p->oop_iterate(_par_scan_closure, mr);
6244      } else {
6245        p->oop_iterate(_scan_closure, mr);
6246      }
6247    } else {
6248      if (_parallel) {
6249        p->oop_iterate(_par_scan_closure);
6250      } else {
6251        p->oop_iterate(_scan_closure);
6252      }
6253    }
6254  }
6255  #ifdef ASSERT
6256    if (!_parallel) {
6257      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6258      assert(_collector->overflow_list_is_empty(),
6259             "overflow list should be empty");
6260
6261    }
6262  #endif // ASSERT
6263  return is_obj_array;
6264}
6265
6266MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6267                        MemRegion span,
6268                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6269                        bool should_yield, bool verifying):
6270  _collector(collector),
6271  _span(span),
6272  _bitMap(bitMap),
6273  _mut(&collector->_modUnionTable),
6274  _markStack(markStack),
6275  _yield(should_yield),
6276  _skipBits(0)
6277{
6278  assert(_markStack->isEmpty(), "stack should be empty");
6279  _finger = _bitMap->startWord();
6280  _threshold = _finger;
6281  assert(_collector->_restart_addr == NULL, "Sanity check");
6282  assert(_span.contains(_finger), "Out of bounds _finger?");
6283  DEBUG_ONLY(_verifying = verifying;)
6284}
6285
6286void MarkFromRootsClosure::reset(HeapWord* addr) {
6287  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6288  assert(_span.contains(addr), "Out of bounds _finger?");
6289  _finger = addr;
6290  _threshold = (HeapWord*)round_to(
6291                 (intptr_t)_finger, CardTableModRefBS::card_size);
6292}
6293
6294// Should revisit to see if this should be restructured for
6295// greater efficiency.
6296bool MarkFromRootsClosure::do_bit(size_t offset) {
6297  if (_skipBits > 0) {
6298    _skipBits--;
6299    return true;
6300  }
6301  // convert offset into a HeapWord*
6302  HeapWord* addr = _bitMap->startWord() + offset;
6303  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6304         "address out of range");
6305  assert(_bitMap->isMarked(addr), "tautology");
6306  if (_bitMap->isMarked(addr+1)) {
6307    // this is an allocated but not yet initialized object
6308    assert(_skipBits == 0, "tautology");
6309    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6310    oop p = oop(addr);
6311    if (p->klass_or_null_acquire() == NULL) {
6312      DEBUG_ONLY(if (!_verifying) {)
6313        // We re-dirty the cards on which this object lies and increase
6314        // the _threshold so that we'll come back to scan this object
6315        // during the preclean or remark phase. (CMSCleanOnEnter)
6316        if (CMSCleanOnEnter) {
6317          size_t sz = _collector->block_size_using_printezis_bits(addr);
6318          HeapWord* end_card_addr   = (HeapWord*)round_to(
6319                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6320          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6321          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6322          // Bump _threshold to end_card_addr; note that
6323          // _threshold cannot possibly exceed end_card_addr, anyhow.
6324          // This prevents future clearing of the card as the scan proceeds
6325          // to the right.
6326          assert(_threshold <= end_card_addr,
6327                 "Because we are just scanning into this object");
6328          if (_threshold < end_card_addr) {
6329            _threshold = end_card_addr;
6330          }
6331          if (p->klass_or_null_acquire() != NULL) {
6332            // Redirty the range of cards...
6333            _mut->mark_range(redirty_range);
6334          } // ...else the setting of klass will dirty the card anyway.
6335        }
6336      DEBUG_ONLY(})
6337      return true;
6338    }
6339  }
6340  scanOopsInOop(addr);
6341  return true;
6342}
6343
6344// We take a break if we've been at this for a while,
6345// so as to avoid monopolizing the locks involved.
6346void MarkFromRootsClosure::do_yield_work() {
6347  // First give up the locks, then yield, then re-lock
6348  // We should probably use a constructor/destructor idiom to
6349  // do this unlock/lock or modify the MutexUnlocker class to
6350  // serve our purpose. XXX
6351  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6352         "CMS thread should hold CMS token");
6353  assert_lock_strong(_bitMap->lock());
6354  _bitMap->lock()->unlock();
6355  ConcurrentMarkSweepThread::desynchronize(true);
6356  _collector->stopTimer();
6357  _collector->incrementYields();
6358
6359  // See the comment in coordinator_yield()
6360  for (unsigned i = 0; i < CMSYieldSleepCount &&
6361                       ConcurrentMarkSweepThread::should_yield() &&
6362                       !CMSCollector::foregroundGCIsActive(); ++i) {
6363    os::sleep(Thread::current(), 1, false);
6364  }
6365
6366  ConcurrentMarkSweepThread::synchronize(true);
6367  _bitMap->lock()->lock_without_safepoint_check();
6368  _collector->startTimer();
6369}
6370
6371void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6372  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6373  assert(_markStack->isEmpty(),
6374         "should drain stack to limit stack usage");
6375  // convert ptr to an oop preparatory to scanning
6376  oop obj = oop(ptr);
6377  // Ignore mark word in verification below, since we
6378  // may be running concurrent with mutators.
6379  assert(obj->is_oop(true), "should be an oop");
6380  assert(_finger <= ptr, "_finger runneth ahead");
6381  // advance the finger to right end of this object
6382  _finger = ptr + obj->size();
6383  assert(_finger > ptr, "we just incremented it above");
6384  // On large heaps, it may take us some time to get through
6385  // the marking phase. During
6386  // this time it's possible that a lot of mutations have
6387  // accumulated in the card table and the mod union table --
6388  // these mutation records are redundant until we have
6389  // actually traced into the corresponding card.
6390  // Here, we check whether advancing the finger would make
6391  // us cross into a new card, and if so clear corresponding
6392  // cards in the MUT (preclean them in the card-table in the
6393  // future).
6394
6395  DEBUG_ONLY(if (!_verifying) {)
6396    // The clean-on-enter optimization is disabled by default,
6397    // until we fix 6178663.
6398    if (CMSCleanOnEnter && (_finger > _threshold)) {
6399      // [_threshold, _finger) represents the interval
6400      // of cards to be cleared  in MUT (or precleaned in card table).
6401      // The set of cards to be cleared is all those that overlap
6402      // with the interval [_threshold, _finger); note that
6403      // _threshold is always kept card-aligned but _finger isn't
6404      // always card-aligned.
6405      HeapWord* old_threshold = _threshold;
6406      assert(old_threshold == (HeapWord*)round_to(
6407              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6408             "_threshold should always be card-aligned");
6409      _threshold = (HeapWord*)round_to(
6410                     (intptr_t)_finger, CardTableModRefBS::card_size);
6411      MemRegion mr(old_threshold, _threshold);
6412      assert(!mr.is_empty(), "Control point invariant");
6413      assert(_span.contains(mr), "Should clear within span");
6414      _mut->clear_range(mr);
6415    }
6416  DEBUG_ONLY(})
6417  // Note: the finger doesn't advance while we drain
6418  // the stack below.
6419  PushOrMarkClosure pushOrMarkClosure(_collector,
6420                                      _span, _bitMap, _markStack,
6421                                      _finger, this);
6422  bool res = _markStack->push(obj);
6423  assert(res, "Empty non-zero size stack should have space for single push");
6424  while (!_markStack->isEmpty()) {
6425    oop new_oop = _markStack->pop();
6426    // Skip verifying header mark word below because we are
6427    // running concurrent with mutators.
6428    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6429    // now scan this oop's oops
6430    new_oop->oop_iterate(&pushOrMarkClosure);
6431    do_yield_check();
6432  }
6433  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6434}
6435
6436ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6437                       CMSCollector* collector, MemRegion span,
6438                       CMSBitMap* bit_map,
6439                       OopTaskQueue* work_queue,
6440                       CMSMarkStack*  overflow_stack):
6441  _collector(collector),
6442  _whole_span(collector->_span),
6443  _span(span),
6444  _bit_map(bit_map),
6445  _mut(&collector->_modUnionTable),
6446  _work_queue(work_queue),
6447  _overflow_stack(overflow_stack),
6448  _skip_bits(0),
6449  _task(task)
6450{
6451  assert(_work_queue->size() == 0, "work_queue should be empty");
6452  _finger = span.start();
6453  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6454  assert(_span.contains(_finger), "Out of bounds _finger?");
6455}
6456
6457// Should revisit to see if this should be restructured for
6458// greater efficiency.
6459bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6460  if (_skip_bits > 0) {
6461    _skip_bits--;
6462    return true;
6463  }
6464  // convert offset into a HeapWord*
6465  HeapWord* addr = _bit_map->startWord() + offset;
6466  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6467         "address out of range");
6468  assert(_bit_map->isMarked(addr), "tautology");
6469  if (_bit_map->isMarked(addr+1)) {
6470    // this is an allocated object that might not yet be initialized
6471    assert(_skip_bits == 0, "tautology");
6472    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6473    oop p = oop(addr);
6474    if (p->klass_or_null_acquire() == NULL) {
6475      // in the case of Clean-on-Enter optimization, redirty card
6476      // and avoid clearing card by increasing  the threshold.
6477      return true;
6478    }
6479  }
6480  scan_oops_in_oop(addr);
6481  return true;
6482}
6483
6484void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6485  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6486  // Should we assert that our work queue is empty or
6487  // below some drain limit?
6488  assert(_work_queue->size() == 0,
6489         "should drain stack to limit stack usage");
6490  // convert ptr to an oop preparatory to scanning
6491  oop obj = oop(ptr);
6492  // Ignore mark word in verification below, since we
6493  // may be running concurrent with mutators.
6494  assert(obj->is_oop(true), "should be an oop");
6495  assert(_finger <= ptr, "_finger runneth ahead");
6496  // advance the finger to right end of this object
6497  _finger = ptr + obj->size();
6498  assert(_finger > ptr, "we just incremented it above");
6499  // On large heaps, it may take us some time to get through
6500  // the marking phase. During
6501  // this time it's possible that a lot of mutations have
6502  // accumulated in the card table and the mod union table --
6503  // these mutation records are redundant until we have
6504  // actually traced into the corresponding card.
6505  // Here, we check whether advancing the finger would make
6506  // us cross into a new card, and if so clear corresponding
6507  // cards in the MUT (preclean them in the card-table in the
6508  // future).
6509
6510  // The clean-on-enter optimization is disabled by default,
6511  // until we fix 6178663.
6512  if (CMSCleanOnEnter && (_finger > _threshold)) {
6513    // [_threshold, _finger) represents the interval
6514    // of cards to be cleared  in MUT (or precleaned in card table).
6515    // The set of cards to be cleared is all those that overlap
6516    // with the interval [_threshold, _finger); note that
6517    // _threshold is always kept card-aligned but _finger isn't
6518    // always card-aligned.
6519    HeapWord* old_threshold = _threshold;
6520    assert(old_threshold == (HeapWord*)round_to(
6521            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6522           "_threshold should always be card-aligned");
6523    _threshold = (HeapWord*)round_to(
6524                   (intptr_t)_finger, CardTableModRefBS::card_size);
6525    MemRegion mr(old_threshold, _threshold);
6526    assert(!mr.is_empty(), "Control point invariant");
6527    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6528    _mut->clear_range(mr);
6529  }
6530
6531  // Note: the local finger doesn't advance while we drain
6532  // the stack below, but the global finger sure can and will.
6533  HeapWord* volatile* gfa = _task->global_finger_addr();
6534  ParPushOrMarkClosure pushOrMarkClosure(_collector,
6535                                         _span, _bit_map,
6536                                         _work_queue,
6537                                         _overflow_stack,
6538                                         _finger,
6539                                         gfa, this);
6540  bool res = _work_queue->push(obj);   // overflow could occur here
6541  assert(res, "Will hold once we use workqueues");
6542  while (true) {
6543    oop new_oop;
6544    if (!_work_queue->pop_local(new_oop)) {
6545      // We emptied our work_queue; check if there's stuff that can
6546      // be gotten from the overflow stack.
6547      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6548            _overflow_stack, _work_queue)) {
6549        do_yield_check();
6550        continue;
6551      } else {  // done
6552        break;
6553      }
6554    }
6555    // Skip verifying header mark word below because we are
6556    // running concurrent with mutators.
6557    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6558    // now scan this oop's oops
6559    new_oop->oop_iterate(&pushOrMarkClosure);
6560    do_yield_check();
6561  }
6562  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6563}
6564
6565// Yield in response to a request from VM Thread or
6566// from mutators.
6567void ParMarkFromRootsClosure::do_yield_work() {
6568  assert(_task != NULL, "sanity");
6569  _task->yield();
6570}
6571
6572// A variant of the above used for verifying CMS marking work.
6573MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6574                        MemRegion span,
6575                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6576                        CMSMarkStack*  mark_stack):
6577  _collector(collector),
6578  _span(span),
6579  _verification_bm(verification_bm),
6580  _cms_bm(cms_bm),
6581  _mark_stack(mark_stack),
6582  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6583                      mark_stack)
6584{
6585  assert(_mark_stack->isEmpty(), "stack should be empty");
6586  _finger = _verification_bm->startWord();
6587  assert(_collector->_restart_addr == NULL, "Sanity check");
6588  assert(_span.contains(_finger), "Out of bounds _finger?");
6589}
6590
6591void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6592  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6593  assert(_span.contains(addr), "Out of bounds _finger?");
6594  _finger = addr;
6595}
6596
6597// Should revisit to see if this should be restructured for
6598// greater efficiency.
6599bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6600  // convert offset into a HeapWord*
6601  HeapWord* addr = _verification_bm->startWord() + offset;
6602  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6603         "address out of range");
6604  assert(_verification_bm->isMarked(addr), "tautology");
6605  assert(_cms_bm->isMarked(addr), "tautology");
6606
6607  assert(_mark_stack->isEmpty(),
6608         "should drain stack to limit stack usage");
6609  // convert addr to an oop preparatory to scanning
6610  oop obj = oop(addr);
6611  assert(obj->is_oop(), "should be an oop");
6612  assert(_finger <= addr, "_finger runneth ahead");
6613  // advance the finger to right end of this object
6614  _finger = addr + obj->size();
6615  assert(_finger > addr, "we just incremented it above");
6616  // Note: the finger doesn't advance while we drain
6617  // the stack below.
6618  bool res = _mark_stack->push(obj);
6619  assert(res, "Empty non-zero size stack should have space for single push");
6620  while (!_mark_stack->isEmpty()) {
6621    oop new_oop = _mark_stack->pop();
6622    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6623    // now scan this oop's oops
6624    new_oop->oop_iterate(&_pam_verify_closure);
6625  }
6626  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6627  return true;
6628}
6629
6630PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6631  CMSCollector* collector, MemRegion span,
6632  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6633  CMSMarkStack*  mark_stack):
6634  MetadataAwareOopClosure(collector->ref_processor()),
6635  _collector(collector),
6636  _span(span),
6637  _verification_bm(verification_bm),
6638  _cms_bm(cms_bm),
6639  _mark_stack(mark_stack)
6640{ }
6641
6642void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6643void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6644
6645// Upon stack overflow, we discard (part of) the stack,
6646// remembering the least address amongst those discarded
6647// in CMSCollector's _restart_address.
6648void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6649  // Remember the least grey address discarded
6650  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6651  _collector->lower_restart_addr(ra);
6652  _mark_stack->reset();  // discard stack contents
6653  _mark_stack->expand(); // expand the stack if possible
6654}
6655
6656void PushAndMarkVerifyClosure::do_oop(oop obj) {
6657  assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6658  HeapWord* addr = (HeapWord*)obj;
6659  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6660    // Oop lies in _span and isn't yet grey or black
6661    _verification_bm->mark(addr);            // now grey
6662    if (!_cms_bm->isMarked(addr)) {
6663      Log(gc, verify) log;
6664      ResourceMark rm;
6665      oop(addr)->print_on(log.error_stream());
6666      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6667      fatal("... aborting");
6668    }
6669
6670    if (!_mark_stack->push(obj)) { // stack overflow
6671      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6672      assert(_mark_stack->isFull(), "Else push should have succeeded");
6673      handle_stack_overflow(addr);
6674    }
6675    // anything including and to the right of _finger
6676    // will be scanned as we iterate over the remainder of the
6677    // bit map
6678  }
6679}
6680
6681PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6682                     MemRegion span,
6683                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6684                     HeapWord* finger, MarkFromRootsClosure* parent) :
6685  MetadataAwareOopClosure(collector->ref_processor()),
6686  _collector(collector),
6687  _span(span),
6688  _bitMap(bitMap),
6689  _markStack(markStack),
6690  _finger(finger),
6691  _parent(parent)
6692{ }
6693
6694ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6695                                           MemRegion span,
6696                                           CMSBitMap* bit_map,
6697                                           OopTaskQueue* work_queue,
6698                                           CMSMarkStack*  overflow_stack,
6699                                           HeapWord* finger,
6700                                           HeapWord* volatile* global_finger_addr,
6701                                           ParMarkFromRootsClosure* parent) :
6702  MetadataAwareOopClosure(collector->ref_processor()),
6703  _collector(collector),
6704  _whole_span(collector->_span),
6705  _span(span),
6706  _bit_map(bit_map),
6707  _work_queue(work_queue),
6708  _overflow_stack(overflow_stack),
6709  _finger(finger),
6710  _global_finger_addr(global_finger_addr),
6711  _parent(parent)
6712{ }
6713
6714// Assumes thread-safe access by callers, who are
6715// responsible for mutual exclusion.
6716void CMSCollector::lower_restart_addr(HeapWord* low) {
6717  assert(_span.contains(low), "Out of bounds addr");
6718  if (_restart_addr == NULL) {
6719    _restart_addr = low;
6720  } else {
6721    _restart_addr = MIN2(_restart_addr, low);
6722  }
6723}
6724
6725// Upon stack overflow, we discard (part of) the stack,
6726// remembering the least address amongst those discarded
6727// in CMSCollector's _restart_address.
6728void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6729  // Remember the least grey address discarded
6730  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6731  _collector->lower_restart_addr(ra);
6732  _markStack->reset();  // discard stack contents
6733  _markStack->expand(); // expand the stack if possible
6734}
6735
6736// Upon stack overflow, we discard (part of) the stack,
6737// remembering the least address amongst those discarded
6738// in CMSCollector's _restart_address.
6739void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6740  // We need to do this under a mutex to prevent other
6741  // workers from interfering with the work done below.
6742  MutexLockerEx ml(_overflow_stack->par_lock(),
6743                   Mutex::_no_safepoint_check_flag);
6744  // Remember the least grey address discarded
6745  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6746  _collector->lower_restart_addr(ra);
6747  _overflow_stack->reset();  // discard stack contents
6748  _overflow_stack->expand(); // expand the stack if possible
6749}
6750
6751void PushOrMarkClosure::do_oop(oop obj) {
6752  // Ignore mark word because we are running concurrent with mutators.
6753  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6754  HeapWord* addr = (HeapWord*)obj;
6755  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6756    // Oop lies in _span and isn't yet grey or black
6757    _bitMap->mark(addr);            // now grey
6758    if (addr < _finger) {
6759      // the bit map iteration has already either passed, or
6760      // sampled, this bit in the bit map; we'll need to
6761      // use the marking stack to scan this oop's oops.
6762      bool simulate_overflow = false;
6763      NOT_PRODUCT(
6764        if (CMSMarkStackOverflowALot &&
6765            _collector->simulate_overflow()) {
6766          // simulate a stack overflow
6767          simulate_overflow = true;
6768        }
6769      )
6770      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6771        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6772        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6773        handle_stack_overflow(addr);
6774      }
6775    }
6776    // anything including and to the right of _finger
6777    // will be scanned as we iterate over the remainder of the
6778    // bit map
6779    do_yield_check();
6780  }
6781}
6782
6783void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6784void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6785
6786void ParPushOrMarkClosure::do_oop(oop obj) {
6787  // Ignore mark word because we are running concurrent with mutators.
6788  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6789  HeapWord* addr = (HeapWord*)obj;
6790  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6791    // Oop lies in _span and isn't yet grey or black
6792    // We read the global_finger (volatile read) strictly after marking oop
6793    bool res = _bit_map->par_mark(addr);    // now grey
6794    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6795    // Should we push this marked oop on our stack?
6796    // -- if someone else marked it, nothing to do
6797    // -- if target oop is above global finger nothing to do
6798    // -- if target oop is in chunk and above local finger
6799    //      then nothing to do
6800    // -- else push on work queue
6801    if (   !res       // someone else marked it, they will deal with it
6802        || (addr >= *gfa)  // will be scanned in a later task
6803        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6804      return;
6805    }
6806    // the bit map iteration has already either passed, or
6807    // sampled, this bit in the bit map; we'll need to
6808    // use the marking stack to scan this oop's oops.
6809    bool simulate_overflow = false;
6810    NOT_PRODUCT(
6811      if (CMSMarkStackOverflowALot &&
6812          _collector->simulate_overflow()) {
6813        // simulate a stack overflow
6814        simulate_overflow = true;
6815      }
6816    )
6817    if (simulate_overflow ||
6818        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6819      // stack overflow
6820      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6821      // We cannot assert that the overflow stack is full because
6822      // it may have been emptied since.
6823      assert(simulate_overflow ||
6824             _work_queue->size() == _work_queue->max_elems(),
6825            "Else push should have succeeded");
6826      handle_stack_overflow(addr);
6827    }
6828    do_yield_check();
6829  }
6830}
6831
6832void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6833void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6834
6835PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6836                                       MemRegion span,
6837                                       ReferenceProcessor* rp,
6838                                       CMSBitMap* bit_map,
6839                                       CMSBitMap* mod_union_table,
6840                                       CMSMarkStack*  mark_stack,
6841                                       bool           concurrent_precleaning):
6842  MetadataAwareOopClosure(rp),
6843  _collector(collector),
6844  _span(span),
6845  _bit_map(bit_map),
6846  _mod_union_table(mod_union_table),
6847  _mark_stack(mark_stack),
6848  _concurrent_precleaning(concurrent_precleaning)
6849{
6850  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6851}
6852
6853// Grey object rescan during pre-cleaning and second checkpoint phases --
6854// the non-parallel version (the parallel version appears further below.)
6855void PushAndMarkClosure::do_oop(oop obj) {
6856  // Ignore mark word verification. If during concurrent precleaning,
6857  // the object monitor may be locked. If during the checkpoint
6858  // phases, the object may already have been reached by a  different
6859  // path and may be at the end of the global overflow list (so
6860  // the mark word may be NULL).
6861  assert(obj->is_oop_or_null(true /* ignore mark word */),
6862         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6863  HeapWord* addr = (HeapWord*)obj;
6864  // Check if oop points into the CMS generation
6865  // and is not marked
6866  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6867    // a white object ...
6868    _bit_map->mark(addr);         // ... now grey
6869    // push on the marking stack (grey set)
6870    bool simulate_overflow = false;
6871    NOT_PRODUCT(
6872      if (CMSMarkStackOverflowALot &&
6873          _collector->simulate_overflow()) {
6874        // simulate a stack overflow
6875        simulate_overflow = true;
6876      }
6877    )
6878    if (simulate_overflow || !_mark_stack->push(obj)) {
6879      if (_concurrent_precleaning) {
6880         // During precleaning we can just dirty the appropriate card(s)
6881         // in the mod union table, thus ensuring that the object remains
6882         // in the grey set  and continue. In the case of object arrays
6883         // we need to dirty all of the cards that the object spans,
6884         // since the rescan of object arrays will be limited to the
6885         // dirty cards.
6886         // Note that no one can be interfering with us in this action
6887         // of dirtying the mod union table, so no locking or atomics
6888         // are required.
6889         if (obj->is_objArray()) {
6890           size_t sz = obj->size();
6891           HeapWord* end_card_addr = (HeapWord*)round_to(
6892                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6893           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6894           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6895           _mod_union_table->mark_range(redirty_range);
6896         } else {
6897           _mod_union_table->mark(addr);
6898         }
6899         _collector->_ser_pmc_preclean_ovflw++;
6900      } else {
6901         // During the remark phase, we need to remember this oop
6902         // in the overflow list.
6903         _collector->push_on_overflow_list(obj);
6904         _collector->_ser_pmc_remark_ovflw++;
6905      }
6906    }
6907  }
6908}
6909
6910ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6911                                             MemRegion span,
6912                                             ReferenceProcessor* rp,
6913                                             CMSBitMap* bit_map,
6914                                             OopTaskQueue* work_queue):
6915  MetadataAwareOopClosure(rp),
6916  _collector(collector),
6917  _span(span),
6918  _bit_map(bit_map),
6919  _work_queue(work_queue)
6920{
6921  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6922}
6923
6924void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6925void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6926
6927// Grey object rescan during second checkpoint phase --
6928// the parallel version.
6929void ParPushAndMarkClosure::do_oop(oop obj) {
6930  // In the assert below, we ignore the mark word because
6931  // this oop may point to an already visited object that is
6932  // on the overflow stack (in which case the mark word has
6933  // been hijacked for chaining into the overflow stack --
6934  // if this is the last object in the overflow stack then
6935  // its mark word will be NULL). Because this object may
6936  // have been subsequently popped off the global overflow
6937  // stack, and the mark word possibly restored to the prototypical
6938  // value, by the time we get to examined this failing assert in
6939  // the debugger, is_oop_or_null(false) may subsequently start
6940  // to hold.
6941  assert(obj->is_oop_or_null(true),
6942         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6943  HeapWord* addr = (HeapWord*)obj;
6944  // Check if oop points into the CMS generation
6945  // and is not marked
6946  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6947    // a white object ...
6948    // If we manage to "claim" the object, by being the
6949    // first thread to mark it, then we push it on our
6950    // marking stack
6951    if (_bit_map->par_mark(addr)) {     // ... now grey
6952      // push on work queue (grey set)
6953      bool simulate_overflow = false;
6954      NOT_PRODUCT(
6955        if (CMSMarkStackOverflowALot &&
6956            _collector->par_simulate_overflow()) {
6957          // simulate a stack overflow
6958          simulate_overflow = true;
6959        }
6960      )
6961      if (simulate_overflow || !_work_queue->push(obj)) {
6962        _collector->par_push_on_overflow_list(obj);
6963        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6964      }
6965    } // Else, some other thread got there first
6966  }
6967}
6968
6969void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6970void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6971
6972void CMSPrecleanRefsYieldClosure::do_yield_work() {
6973  Mutex* bml = _collector->bitMapLock();
6974  assert_lock_strong(bml);
6975  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6976         "CMS thread should hold CMS token");
6977
6978  bml->unlock();
6979  ConcurrentMarkSweepThread::desynchronize(true);
6980
6981  _collector->stopTimer();
6982  _collector->incrementYields();
6983
6984  // See the comment in coordinator_yield()
6985  for (unsigned i = 0; i < CMSYieldSleepCount &&
6986                       ConcurrentMarkSweepThread::should_yield() &&
6987                       !CMSCollector::foregroundGCIsActive(); ++i) {
6988    os::sleep(Thread::current(), 1, false);
6989  }
6990
6991  ConcurrentMarkSweepThread::synchronize(true);
6992  bml->lock();
6993
6994  _collector->startTimer();
6995}
6996
6997bool CMSPrecleanRefsYieldClosure::should_return() {
6998  if (ConcurrentMarkSweepThread::should_yield()) {
6999    do_yield_work();
7000  }
7001  return _collector->foregroundGCIsActive();
7002}
7003
7004void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7005  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7006         "mr should be aligned to start at a card boundary");
7007  // We'd like to assert:
7008  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7009  //        "mr should be a range of cards");
7010  // However, that would be too strong in one case -- the last
7011  // partition ends at _unallocated_block which, in general, can be
7012  // an arbitrary boundary, not necessarily card aligned.
7013  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7014  _space->object_iterate_mem(mr, &_scan_cl);
7015}
7016
7017SweepClosure::SweepClosure(CMSCollector* collector,
7018                           ConcurrentMarkSweepGeneration* g,
7019                           CMSBitMap* bitMap, bool should_yield) :
7020  _collector(collector),
7021  _g(g),
7022  _sp(g->cmsSpace()),
7023  _limit(_sp->sweep_limit()),
7024  _freelistLock(_sp->freelistLock()),
7025  _bitMap(bitMap),
7026  _yield(should_yield),
7027  _inFreeRange(false),           // No free range at beginning of sweep
7028  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7029  _lastFreeRangeCoalesced(false),
7030  _freeFinger(g->used_region().start())
7031{
7032  NOT_PRODUCT(
7033    _numObjectsFreed = 0;
7034    _numWordsFreed   = 0;
7035    _numObjectsLive = 0;
7036    _numWordsLive = 0;
7037    _numObjectsAlreadyFree = 0;
7038    _numWordsAlreadyFree = 0;
7039    _last_fc = NULL;
7040
7041    _sp->initializeIndexedFreeListArrayReturnedBytes();
7042    _sp->dictionary()->initialize_dict_returned_bytes();
7043  )
7044  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7045         "sweep _limit out of bounds");
7046  log_develop_trace(gc, sweep)("====================");
7047  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7048}
7049
7050void SweepClosure::print_on(outputStream* st) const {
7051  st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7052               p2i(_sp->bottom()), p2i(_sp->end()));
7053  st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7054  st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7055  NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7056  st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7057               _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7058}
7059
7060#ifndef PRODUCT
7061// Assertion checking only:  no useful work in product mode --
7062// however, if any of the flags below become product flags,
7063// you may need to review this code to see if it needs to be
7064// enabled in product mode.
7065SweepClosure::~SweepClosure() {
7066  assert_lock_strong(_freelistLock);
7067  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7068         "sweep _limit out of bounds");
7069  if (inFreeRange()) {
7070    Log(gc, sweep) log;
7071    log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7072    ResourceMark rm;
7073    print_on(log.error_stream());
7074    ShouldNotReachHere();
7075  }
7076
7077  if (log_is_enabled(Debug, gc, sweep)) {
7078    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7079                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7080    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7081                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7082    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7083    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7084  }
7085
7086  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7087    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7088    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7089    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7090    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7091                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7092  }
7093  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7094  log_develop_trace(gc, sweep)("================");
7095}
7096#endif  // PRODUCT
7097
7098void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7099    bool freeRangeInFreeLists) {
7100  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7101                               p2i(freeFinger), freeRangeInFreeLists);
7102  assert(!inFreeRange(), "Trampling existing free range");
7103  set_inFreeRange(true);
7104  set_lastFreeRangeCoalesced(false);
7105
7106  set_freeFinger(freeFinger);
7107  set_freeRangeInFreeLists(freeRangeInFreeLists);
7108  if (CMSTestInFreeList) {
7109    if (freeRangeInFreeLists) {
7110      FreeChunk* fc = (FreeChunk*) freeFinger;
7111      assert(fc->is_free(), "A chunk on the free list should be free.");
7112      assert(fc->size() > 0, "Free range should have a size");
7113      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7114    }
7115  }
7116}
7117
7118// Note that the sweeper runs concurrently with mutators. Thus,
7119// it is possible for direct allocation in this generation to happen
7120// in the middle of the sweep. Note that the sweeper also coalesces
7121// contiguous free blocks. Thus, unless the sweeper and the allocator
7122// synchronize appropriately freshly allocated blocks may get swept up.
7123// This is accomplished by the sweeper locking the free lists while
7124// it is sweeping. Thus blocks that are determined to be free are
7125// indeed free. There is however one additional complication:
7126// blocks that have been allocated since the final checkpoint and
7127// mark, will not have been marked and so would be treated as
7128// unreachable and swept up. To prevent this, the allocator marks
7129// the bit map when allocating during the sweep phase. This leads,
7130// however, to a further complication -- objects may have been allocated
7131// but not yet initialized -- in the sense that the header isn't yet
7132// installed. The sweeper can not then determine the size of the block
7133// in order to skip over it. To deal with this case, we use a technique
7134// (due to Printezis) to encode such uninitialized block sizes in the
7135// bit map. Since the bit map uses a bit per every HeapWord, but the
7136// CMS generation has a minimum object size of 3 HeapWords, it follows
7137// that "normal marks" won't be adjacent in the bit map (there will
7138// always be at least two 0 bits between successive 1 bits). We make use
7139// of these "unused" bits to represent uninitialized blocks -- the bit
7140// corresponding to the start of the uninitialized object and the next
7141// bit are both set. Finally, a 1 bit marks the end of the object that
7142// started with the two consecutive 1 bits to indicate its potentially
7143// uninitialized state.
7144
7145size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7146  FreeChunk* fc = (FreeChunk*)addr;
7147  size_t res;
7148
7149  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7150  // than "addr == _limit" because although _limit was a block boundary when
7151  // we started the sweep, it may no longer be one because heap expansion
7152  // may have caused us to coalesce the block ending at the address _limit
7153  // with a newly expanded chunk (this happens when _limit was set to the
7154  // previous _end of the space), so we may have stepped past _limit:
7155  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7156  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7157    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7158           "sweep _limit out of bounds");
7159    assert(addr < _sp->end(), "addr out of bounds");
7160    // Flush any free range we might be holding as a single
7161    // coalesced chunk to the appropriate free list.
7162    if (inFreeRange()) {
7163      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7164             "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7165      flush_cur_free_chunk(freeFinger(),
7166                           pointer_delta(addr, freeFinger()));
7167      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7168                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7169                                   lastFreeRangeCoalesced() ? 1 : 0);
7170    }
7171
7172    // help the iterator loop finish
7173    return pointer_delta(_sp->end(), addr);
7174  }
7175
7176  assert(addr < _limit, "sweep invariant");
7177  // check if we should yield
7178  do_yield_check(addr);
7179  if (fc->is_free()) {
7180    // Chunk that is already free
7181    res = fc->size();
7182    do_already_free_chunk(fc);
7183    debug_only(_sp->verifyFreeLists());
7184    // If we flush the chunk at hand in lookahead_and_flush()
7185    // and it's coalesced with a preceding chunk, then the
7186    // process of "mangling" the payload of the coalesced block
7187    // will cause erasure of the size information from the
7188    // (erstwhile) header of all the coalesced blocks but the
7189    // first, so the first disjunct in the assert will not hold
7190    // in that specific case (in which case the second disjunct
7191    // will hold).
7192    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7193           "Otherwise the size info doesn't change at this step");
7194    NOT_PRODUCT(
7195      _numObjectsAlreadyFree++;
7196      _numWordsAlreadyFree += res;
7197    )
7198    NOT_PRODUCT(_last_fc = fc;)
7199  } else if (!_bitMap->isMarked(addr)) {
7200    // Chunk is fresh garbage
7201    res = do_garbage_chunk(fc);
7202    debug_only(_sp->verifyFreeLists());
7203    NOT_PRODUCT(
7204      _numObjectsFreed++;
7205      _numWordsFreed += res;
7206    )
7207  } else {
7208    // Chunk that is alive.
7209    res = do_live_chunk(fc);
7210    debug_only(_sp->verifyFreeLists());
7211    NOT_PRODUCT(
7212        _numObjectsLive++;
7213        _numWordsLive += res;
7214    )
7215  }
7216  return res;
7217}
7218
7219// For the smart allocation, record following
7220//  split deaths - a free chunk is removed from its free list because
7221//      it is being split into two or more chunks.
7222//  split birth - a free chunk is being added to its free list because
7223//      a larger free chunk has been split and resulted in this free chunk.
7224//  coal death - a free chunk is being removed from its free list because
7225//      it is being coalesced into a large free chunk.
7226//  coal birth - a free chunk is being added to its free list because
7227//      it was created when two or more free chunks where coalesced into
7228//      this free chunk.
7229//
7230// These statistics are used to determine the desired number of free
7231// chunks of a given size.  The desired number is chosen to be relative
7232// to the end of a CMS sweep.  The desired number at the end of a sweep
7233// is the
7234//      count-at-end-of-previous-sweep (an amount that was enough)
7235//              - count-at-beginning-of-current-sweep  (the excess)
7236//              + split-births  (gains in this size during interval)
7237//              - split-deaths  (demands on this size during interval)
7238// where the interval is from the end of one sweep to the end of the
7239// next.
7240//
7241// When sweeping the sweeper maintains an accumulated chunk which is
7242// the chunk that is made up of chunks that have been coalesced.  That
7243// will be termed the left-hand chunk.  A new chunk of garbage that
7244// is being considered for coalescing will be referred to as the
7245// right-hand chunk.
7246//
7247// When making a decision on whether to coalesce a right-hand chunk with
7248// the current left-hand chunk, the current count vs. the desired count
7249// of the left-hand chunk is considered.  Also if the right-hand chunk
7250// is near the large chunk at the end of the heap (see
7251// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7252// left-hand chunk is coalesced.
7253//
7254// When making a decision about whether to split a chunk, the desired count
7255// vs. the current count of the candidate to be split is also considered.
7256// If the candidate is underpopulated (currently fewer chunks than desired)
7257// a chunk of an overpopulated (currently more chunks than desired) size may
7258// be chosen.  The "hint" associated with a free list, if non-null, points
7259// to a free list which may be overpopulated.
7260//
7261
7262void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7263  const size_t size = fc->size();
7264  // Chunks that cannot be coalesced are not in the
7265  // free lists.
7266  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7267    assert(_sp->verify_chunk_in_free_list(fc),
7268           "free chunk should be in free lists");
7269  }
7270  // a chunk that is already free, should not have been
7271  // marked in the bit map
7272  HeapWord* const addr = (HeapWord*) fc;
7273  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7274  // Verify that the bit map has no bits marked between
7275  // addr and purported end of this block.
7276  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7277
7278  // Some chunks cannot be coalesced under any circumstances.
7279  // See the definition of cantCoalesce().
7280  if (!fc->cantCoalesce()) {
7281    // This chunk can potentially be coalesced.
7282    // All the work is done in
7283    do_post_free_or_garbage_chunk(fc, size);
7284    // Note that if the chunk is not coalescable (the else arm
7285    // below), we unconditionally flush, without needing to do
7286    // a "lookahead," as we do below.
7287    if (inFreeRange()) lookahead_and_flush(fc, size);
7288  } else {
7289    // Code path common to both original and adaptive free lists.
7290
7291    // cant coalesce with previous block; this should be treated
7292    // as the end of a free run if any
7293    if (inFreeRange()) {
7294      // we kicked some butt; time to pick up the garbage
7295      assert(freeFinger() < addr, "freeFinger points too high");
7296      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7297    }
7298    // else, nothing to do, just continue
7299  }
7300}
7301
7302size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7303  // This is a chunk of garbage.  It is not in any free list.
7304  // Add it to a free list or let it possibly be coalesced into
7305  // a larger chunk.
7306  HeapWord* const addr = (HeapWord*) fc;
7307  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7308
7309  // Verify that the bit map has no bits marked between
7310  // addr and purported end of just dead object.
7311  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7312  do_post_free_or_garbage_chunk(fc, size);
7313
7314  assert(_limit >= addr + size,
7315         "A freshly garbage chunk can't possibly straddle over _limit");
7316  if (inFreeRange()) lookahead_and_flush(fc, size);
7317  return size;
7318}
7319
7320size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7321  HeapWord* addr = (HeapWord*) fc;
7322  // The sweeper has just found a live object. Return any accumulated
7323  // left hand chunk to the free lists.
7324  if (inFreeRange()) {
7325    assert(freeFinger() < addr, "freeFinger points too high");
7326    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7327  }
7328
7329  // This object is live: we'd normally expect this to be
7330  // an oop, and like to assert the following:
7331  // assert(oop(addr)->is_oop(), "live block should be an oop");
7332  // However, as we commented above, this may be an object whose
7333  // header hasn't yet been initialized.
7334  size_t size;
7335  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7336  if (_bitMap->isMarked(addr + 1)) {
7337    // Determine the size from the bit map, rather than trying to
7338    // compute it from the object header.
7339    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7340    size = pointer_delta(nextOneAddr + 1, addr);
7341    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7342           "alignment problem");
7343
7344#ifdef ASSERT
7345      if (oop(addr)->klass_or_null_acquire() != NULL) {
7346        // Ignore mark word because we are running concurrent with mutators
7347        assert(oop(addr)->is_oop(true), "live block should be an oop");
7348        assert(size ==
7349               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7350               "P-mark and computed size do not agree");
7351      }
7352#endif
7353
7354  } else {
7355    // This should be an initialized object that's alive.
7356    assert(oop(addr)->klass_or_null_acquire() != NULL,
7357           "Should be an initialized object");
7358    // Ignore mark word because we are running concurrent with mutators
7359    assert(oop(addr)->is_oop(true), "live block should be an oop");
7360    // Verify that the bit map has no bits marked between
7361    // addr and purported end of this block.
7362    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7363    assert(size >= 3, "Necessary for Printezis marks to work");
7364    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7365    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7366  }
7367  return size;
7368}
7369
7370void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7371                                                 size_t chunkSize) {
7372  // do_post_free_or_garbage_chunk() should only be called in the case
7373  // of the adaptive free list allocator.
7374  const bool fcInFreeLists = fc->is_free();
7375  assert((HeapWord*)fc <= _limit, "sweep invariant");
7376  if (CMSTestInFreeList && fcInFreeLists) {
7377    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7378  }
7379
7380  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7381
7382  HeapWord* const fc_addr = (HeapWord*) fc;
7383
7384  bool coalesce = false;
7385  const size_t left  = pointer_delta(fc_addr, freeFinger());
7386  const size_t right = chunkSize;
7387  switch (FLSCoalescePolicy) {
7388    // numeric value forms a coalition aggressiveness metric
7389    case 0:  { // never coalesce
7390      coalesce = false;
7391      break;
7392    }
7393    case 1: { // coalesce if left & right chunks on overpopulated lists
7394      coalesce = _sp->coalOverPopulated(left) &&
7395                 _sp->coalOverPopulated(right);
7396      break;
7397    }
7398    case 2: { // coalesce if left chunk on overpopulated list (default)
7399      coalesce = _sp->coalOverPopulated(left);
7400      break;
7401    }
7402    case 3: { // coalesce if left OR right chunk on overpopulated list
7403      coalesce = _sp->coalOverPopulated(left) ||
7404                 _sp->coalOverPopulated(right);
7405      break;
7406    }
7407    case 4: { // always coalesce
7408      coalesce = true;
7409      break;
7410    }
7411    default:
7412     ShouldNotReachHere();
7413  }
7414
7415  // Should the current free range be coalesced?
7416  // If the chunk is in a free range and either we decided to coalesce above
7417  // or the chunk is near the large block at the end of the heap
7418  // (isNearLargestChunk() returns true), then coalesce this chunk.
7419  const bool doCoalesce = inFreeRange()
7420                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7421  if (doCoalesce) {
7422    // Coalesce the current free range on the left with the new
7423    // chunk on the right.  If either is on a free list,
7424    // it must be removed from the list and stashed in the closure.
7425    if (freeRangeInFreeLists()) {
7426      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7427      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7428             "Size of free range is inconsistent with chunk size.");
7429      if (CMSTestInFreeList) {
7430        assert(_sp->verify_chunk_in_free_list(ffc),
7431               "Chunk is not in free lists");
7432      }
7433      _sp->coalDeath(ffc->size());
7434      _sp->removeFreeChunkFromFreeLists(ffc);
7435      set_freeRangeInFreeLists(false);
7436    }
7437    if (fcInFreeLists) {
7438      _sp->coalDeath(chunkSize);
7439      assert(fc->size() == chunkSize,
7440        "The chunk has the wrong size or is not in the free lists");
7441      _sp->removeFreeChunkFromFreeLists(fc);
7442    }
7443    set_lastFreeRangeCoalesced(true);
7444    print_free_block_coalesced(fc);
7445  } else {  // not in a free range and/or should not coalesce
7446    // Return the current free range and start a new one.
7447    if (inFreeRange()) {
7448      // In a free range but cannot coalesce with the right hand chunk.
7449      // Put the current free range into the free lists.
7450      flush_cur_free_chunk(freeFinger(),
7451                           pointer_delta(fc_addr, freeFinger()));
7452    }
7453    // Set up for new free range.  Pass along whether the right hand
7454    // chunk is in the free lists.
7455    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7456  }
7457}
7458
7459// Lookahead flush:
7460// If we are tracking a free range, and this is the last chunk that
7461// we'll look at because its end crosses past _limit, we'll preemptively
7462// flush it along with any free range we may be holding on to. Note that
7463// this can be the case only for an already free or freshly garbage
7464// chunk. If this block is an object, it can never straddle
7465// over _limit. The "straddling" occurs when _limit is set at
7466// the previous end of the space when this cycle started, and
7467// a subsequent heap expansion caused the previously co-terminal
7468// free block to be coalesced with the newly expanded portion,
7469// thus rendering _limit a non-block-boundary making it dangerous
7470// for the sweeper to step over and examine.
7471void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7472  assert(inFreeRange(), "Should only be called if currently in a free range.");
7473  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7474  assert(_sp->used_region().contains(eob - 1),
7475         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7476         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7477         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7478         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7479  if (eob >= _limit) {
7480    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7481    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7482                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7483                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
7484                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7485    // Return the storage we are tracking back into the free lists.
7486    log_develop_trace(gc, sweep)("Flushing ... ");
7487    assert(freeFinger() < eob, "Error");
7488    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7489  }
7490}
7491
7492void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7493  assert(inFreeRange(), "Should only be called if currently in a free range.");
7494  assert(size > 0,
7495    "A zero sized chunk cannot be added to the free lists.");
7496  if (!freeRangeInFreeLists()) {
7497    if (CMSTestInFreeList) {
7498      FreeChunk* fc = (FreeChunk*) chunk;
7499      fc->set_size(size);
7500      assert(!_sp->verify_chunk_in_free_list(fc),
7501             "chunk should not be in free lists yet");
7502    }
7503    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7504    // A new free range is going to be starting.  The current
7505    // free range has not been added to the free lists yet or
7506    // was removed so add it back.
7507    // If the current free range was coalesced, then the death
7508    // of the free range was recorded.  Record a birth now.
7509    if (lastFreeRangeCoalesced()) {
7510      _sp->coalBirth(size);
7511    }
7512    _sp->addChunkAndRepairOffsetTable(chunk, size,
7513            lastFreeRangeCoalesced());
7514  } else {
7515    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7516  }
7517  set_inFreeRange(false);
7518  set_freeRangeInFreeLists(false);
7519}
7520
7521// We take a break if we've been at this for a while,
7522// so as to avoid monopolizing the locks involved.
7523void SweepClosure::do_yield_work(HeapWord* addr) {
7524  // Return current free chunk being used for coalescing (if any)
7525  // to the appropriate freelist.  After yielding, the next
7526  // free block encountered will start a coalescing range of
7527  // free blocks.  If the next free block is adjacent to the
7528  // chunk just flushed, they will need to wait for the next
7529  // sweep to be coalesced.
7530  if (inFreeRange()) {
7531    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7532  }
7533
7534  // First give up the locks, then yield, then re-lock.
7535  // We should probably use a constructor/destructor idiom to
7536  // do this unlock/lock or modify the MutexUnlocker class to
7537  // serve our purpose. XXX
7538  assert_lock_strong(_bitMap->lock());
7539  assert_lock_strong(_freelistLock);
7540  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7541         "CMS thread should hold CMS token");
7542  _bitMap->lock()->unlock();
7543  _freelistLock->unlock();
7544  ConcurrentMarkSweepThread::desynchronize(true);
7545  _collector->stopTimer();
7546  _collector->incrementYields();
7547
7548  // See the comment in coordinator_yield()
7549  for (unsigned i = 0; i < CMSYieldSleepCount &&
7550                       ConcurrentMarkSweepThread::should_yield() &&
7551                       !CMSCollector::foregroundGCIsActive(); ++i) {
7552    os::sleep(Thread::current(), 1, false);
7553  }
7554
7555  ConcurrentMarkSweepThread::synchronize(true);
7556  _freelistLock->lock();
7557  _bitMap->lock()->lock_without_safepoint_check();
7558  _collector->startTimer();
7559}
7560
7561#ifndef PRODUCT
7562// This is actually very useful in a product build if it can
7563// be called from the debugger.  Compile it into the product
7564// as needed.
7565bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7566  return debug_cms_space->verify_chunk_in_free_list(fc);
7567}
7568#endif
7569
7570void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7571  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7572                               p2i(fc), fc->size());
7573}
7574
7575// CMSIsAliveClosure
7576bool CMSIsAliveClosure::do_object_b(oop obj) {
7577  HeapWord* addr = (HeapWord*)obj;
7578  return addr != NULL &&
7579         (!_span.contains(addr) || _bit_map->isMarked(addr));
7580}
7581
7582
7583CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7584                      MemRegion span,
7585                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7586                      bool cpc):
7587  _collector(collector),
7588  _span(span),
7589  _bit_map(bit_map),
7590  _mark_stack(mark_stack),
7591  _concurrent_precleaning(cpc) {
7592  assert(!_span.is_empty(), "Empty span could spell trouble");
7593}
7594
7595
7596// CMSKeepAliveClosure: the serial version
7597void CMSKeepAliveClosure::do_oop(oop obj) {
7598  HeapWord* addr = (HeapWord*)obj;
7599  if (_span.contains(addr) &&
7600      !_bit_map->isMarked(addr)) {
7601    _bit_map->mark(addr);
7602    bool simulate_overflow = false;
7603    NOT_PRODUCT(
7604      if (CMSMarkStackOverflowALot &&
7605          _collector->simulate_overflow()) {
7606        // simulate a stack overflow
7607        simulate_overflow = true;
7608      }
7609    )
7610    if (simulate_overflow || !_mark_stack->push(obj)) {
7611      if (_concurrent_precleaning) {
7612        // We dirty the overflown object and let the remark
7613        // phase deal with it.
7614        assert(_collector->overflow_list_is_empty(), "Error");
7615        // In the case of object arrays, we need to dirty all of
7616        // the cards that the object spans. No locking or atomics
7617        // are needed since no one else can be mutating the mod union
7618        // table.
7619        if (obj->is_objArray()) {
7620          size_t sz = obj->size();
7621          HeapWord* end_card_addr =
7622            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7623          MemRegion redirty_range = MemRegion(addr, end_card_addr);
7624          assert(!redirty_range.is_empty(), "Arithmetical tautology");
7625          _collector->_modUnionTable.mark_range(redirty_range);
7626        } else {
7627          _collector->_modUnionTable.mark(addr);
7628        }
7629        _collector->_ser_kac_preclean_ovflw++;
7630      } else {
7631        _collector->push_on_overflow_list(obj);
7632        _collector->_ser_kac_ovflw++;
7633      }
7634    }
7635  }
7636}
7637
7638void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7639void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7640
7641// CMSParKeepAliveClosure: a parallel version of the above.
7642// The work queues are private to each closure (thread),
7643// but (may be) available for stealing by other threads.
7644void CMSParKeepAliveClosure::do_oop(oop obj) {
7645  HeapWord* addr = (HeapWord*)obj;
7646  if (_span.contains(addr) &&
7647      !_bit_map->isMarked(addr)) {
7648    // In general, during recursive tracing, several threads
7649    // may be concurrently getting here; the first one to
7650    // "tag" it, claims it.
7651    if (_bit_map->par_mark(addr)) {
7652      bool res = _work_queue->push(obj);
7653      assert(res, "Low water mark should be much less than capacity");
7654      // Do a recursive trim in the hope that this will keep
7655      // stack usage lower, but leave some oops for potential stealers
7656      trim_queue(_low_water_mark);
7657    } // Else, another thread got there first
7658  }
7659}
7660
7661void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7662void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7663
7664void CMSParKeepAliveClosure::trim_queue(uint max) {
7665  while (_work_queue->size() > max) {
7666    oop new_oop;
7667    if (_work_queue->pop_local(new_oop)) {
7668      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7669      assert(_bit_map->isMarked((HeapWord*)new_oop),
7670             "no white objects on this stack!");
7671      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7672      // iterate over the oops in this oop, marking and pushing
7673      // the ones in CMS heap (i.e. in _span).
7674      new_oop->oop_iterate(&_mark_and_push);
7675    }
7676  }
7677}
7678
7679CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7680                                CMSCollector* collector,
7681                                MemRegion span, CMSBitMap* bit_map,
7682                                OopTaskQueue* work_queue):
7683  _collector(collector),
7684  _span(span),
7685  _bit_map(bit_map),
7686  _work_queue(work_queue) { }
7687
7688void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7689  HeapWord* addr = (HeapWord*)obj;
7690  if (_span.contains(addr) &&
7691      !_bit_map->isMarked(addr)) {
7692    if (_bit_map->par_mark(addr)) {
7693      bool simulate_overflow = false;
7694      NOT_PRODUCT(
7695        if (CMSMarkStackOverflowALot &&
7696            _collector->par_simulate_overflow()) {
7697          // simulate a stack overflow
7698          simulate_overflow = true;
7699        }
7700      )
7701      if (simulate_overflow || !_work_queue->push(obj)) {
7702        _collector->par_push_on_overflow_list(obj);
7703        _collector->_par_kac_ovflw++;
7704      }
7705    } // Else another thread got there already
7706  }
7707}
7708
7709void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7710void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7711
7712//////////////////////////////////////////////////////////////////
7713//  CMSExpansionCause                /////////////////////////////
7714//////////////////////////////////////////////////////////////////
7715const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7716  switch (cause) {
7717    case _no_expansion:
7718      return "No expansion";
7719    case _satisfy_free_ratio:
7720      return "Free ratio";
7721    case _satisfy_promotion:
7722      return "Satisfy promotion";
7723    case _satisfy_allocation:
7724      return "allocation";
7725    case _allocate_par_lab:
7726      return "Par LAB";
7727    case _allocate_par_spooling_space:
7728      return "Par Spooling Space";
7729    case _adaptive_size_policy:
7730      return "Ergonomics";
7731    default:
7732      return "unknown";
7733  }
7734}
7735
7736void CMSDrainMarkingStackClosure::do_void() {
7737  // the max number to take from overflow list at a time
7738  const size_t num = _mark_stack->capacity()/4;
7739  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7740         "Overflow list should be NULL during concurrent phases");
7741  while (!_mark_stack->isEmpty() ||
7742         // if stack is empty, check the overflow list
7743         _collector->take_from_overflow_list(num, _mark_stack)) {
7744    oop obj = _mark_stack->pop();
7745    HeapWord* addr = (HeapWord*)obj;
7746    assert(_span.contains(addr), "Should be within span");
7747    assert(_bit_map->isMarked(addr), "Should be marked");
7748    assert(obj->is_oop(), "Should be an oop");
7749    obj->oop_iterate(_keep_alive);
7750  }
7751}
7752
7753void CMSParDrainMarkingStackClosure::do_void() {
7754  // drain queue
7755  trim_queue(0);
7756}
7757
7758// Trim our work_queue so its length is below max at return
7759void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7760  while (_work_queue->size() > max) {
7761    oop new_oop;
7762    if (_work_queue->pop_local(new_oop)) {
7763      assert(new_oop->is_oop(), "Expected an oop");
7764      assert(_bit_map->isMarked((HeapWord*)new_oop),
7765             "no white objects on this stack!");
7766      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7767      // iterate over the oops in this oop, marking and pushing
7768      // the ones in CMS heap (i.e. in _span).
7769      new_oop->oop_iterate(&_mark_and_push);
7770    }
7771  }
7772}
7773
7774////////////////////////////////////////////////////////////////////
7775// Support for Marking Stack Overflow list handling and related code
7776////////////////////////////////////////////////////////////////////
7777// Much of the following code is similar in shape and spirit to the
7778// code used in ParNewGC. We should try and share that code
7779// as much as possible in the future.
7780
7781#ifndef PRODUCT
7782// Debugging support for CMSStackOverflowALot
7783
7784// It's OK to call this multi-threaded;  the worst thing
7785// that can happen is that we'll get a bunch of closely
7786// spaced simulated overflows, but that's OK, in fact
7787// probably good as it would exercise the overflow code
7788// under contention.
7789bool CMSCollector::simulate_overflow() {
7790  if (_overflow_counter-- <= 0) { // just being defensive
7791    _overflow_counter = CMSMarkStackOverflowInterval;
7792    return true;
7793  } else {
7794    return false;
7795  }
7796}
7797
7798bool CMSCollector::par_simulate_overflow() {
7799  return simulate_overflow();
7800}
7801#endif
7802
7803// Single-threaded
7804bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7805  assert(stack->isEmpty(), "Expected precondition");
7806  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7807  size_t i = num;
7808  oop  cur = _overflow_list;
7809  const markOop proto = markOopDesc::prototype();
7810  NOT_PRODUCT(ssize_t n = 0;)
7811  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7812    next = oop(cur->mark());
7813    cur->set_mark(proto);   // until proven otherwise
7814    assert(cur->is_oop(), "Should be an oop");
7815    bool res = stack->push(cur);
7816    assert(res, "Bit off more than can chew?");
7817    NOT_PRODUCT(n++;)
7818  }
7819  _overflow_list = cur;
7820#ifndef PRODUCT
7821  assert(_num_par_pushes >= n, "Too many pops?");
7822  _num_par_pushes -=n;
7823#endif
7824  return !stack->isEmpty();
7825}
7826
7827#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7828// (MT-safe) Get a prefix of at most "num" from the list.
7829// The overflow list is chained through the mark word of
7830// each object in the list. We fetch the entire list,
7831// break off a prefix of the right size and return the
7832// remainder. If other threads try to take objects from
7833// the overflow list at that time, they will wait for
7834// some time to see if data becomes available. If (and
7835// only if) another thread places one or more object(s)
7836// on the global list before we have returned the suffix
7837// to the global list, we will walk down our local list
7838// to find its end and append the global list to
7839// our suffix before returning it. This suffix walk can
7840// prove to be expensive (quadratic in the amount of traffic)
7841// when there are many objects in the overflow list and
7842// there is much producer-consumer contention on the list.
7843// *NOTE*: The overflow list manipulation code here and
7844// in ParNewGeneration:: are very similar in shape,
7845// except that in the ParNew case we use the old (from/eden)
7846// copy of the object to thread the list via its klass word.
7847// Because of the common code, if you make any changes in
7848// the code below, please check the ParNew version to see if
7849// similar changes might be needed.
7850// CR 6797058 has been filed to consolidate the common code.
7851bool CMSCollector::par_take_from_overflow_list(size_t num,
7852                                               OopTaskQueue* work_q,
7853                                               int no_of_gc_threads) {
7854  assert(work_q->size() == 0, "First empty local work queue");
7855  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7856  if (_overflow_list == NULL) {
7857    return false;
7858  }
7859  // Grab the entire list; we'll put back a suffix
7860  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7861  Thread* tid = Thread::current();
7862  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7863  // set to ParallelGCThreads.
7864  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7865  size_t sleep_time_millis = MAX2((size_t)1, num/100);
7866  // If the list is busy, we spin for a short while,
7867  // sleeping between attempts to get the list.
7868  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7869    os::sleep(tid, sleep_time_millis, false);
7870    if (_overflow_list == NULL) {
7871      // Nothing left to take
7872      return false;
7873    } else if (_overflow_list != BUSY) {
7874      // Try and grab the prefix
7875      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7876    }
7877  }
7878  // If the list was found to be empty, or we spun long
7879  // enough, we give up and return empty-handed. If we leave
7880  // the list in the BUSY state below, it must be the case that
7881  // some other thread holds the overflow list and will set it
7882  // to a non-BUSY state in the future.
7883  if (prefix == NULL || prefix == BUSY) {
7884     // Nothing to take or waited long enough
7885     if (prefix == NULL) {
7886       // Write back the NULL in case we overwrote it with BUSY above
7887       // and it is still the same value.
7888       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7889     }
7890     return false;
7891  }
7892  assert(prefix != NULL && prefix != BUSY, "Error");
7893  size_t i = num;
7894  oop cur = prefix;
7895  // Walk down the first "num" objects, unless we reach the end.
7896  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7897  if (cur->mark() == NULL) {
7898    // We have "num" or fewer elements in the list, so there
7899    // is nothing to return to the global list.
7900    // Write back the NULL in lieu of the BUSY we wrote
7901    // above, if it is still the same value.
7902    if (_overflow_list == BUSY) {
7903      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7904    }
7905  } else {
7906    // Chop off the suffix and return it to the global list.
7907    assert(cur->mark() != BUSY, "Error");
7908    oop suffix_head = cur->mark(); // suffix will be put back on global list
7909    cur->set_mark(NULL);           // break off suffix
7910    // It's possible that the list is still in the empty(busy) state
7911    // we left it in a short while ago; in that case we may be
7912    // able to place back the suffix without incurring the cost
7913    // of a walk down the list.
7914    oop observed_overflow_list = _overflow_list;
7915    oop cur_overflow_list = observed_overflow_list;
7916    bool attached = false;
7917    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7918      observed_overflow_list =
7919        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7920      if (cur_overflow_list == observed_overflow_list) {
7921        attached = true;
7922        break;
7923      } else cur_overflow_list = observed_overflow_list;
7924    }
7925    if (!attached) {
7926      // Too bad, someone else sneaked in (at least) an element; we'll need
7927      // to do a splice. Find tail of suffix so we can prepend suffix to global
7928      // list.
7929      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7930      oop suffix_tail = cur;
7931      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7932             "Tautology");
7933      observed_overflow_list = _overflow_list;
7934      do {
7935        cur_overflow_list = observed_overflow_list;
7936        if (cur_overflow_list != BUSY) {
7937          // Do the splice ...
7938          suffix_tail->set_mark(markOop(cur_overflow_list));
7939        } else { // cur_overflow_list == BUSY
7940          suffix_tail->set_mark(NULL);
7941        }
7942        // ... and try to place spliced list back on overflow_list ...
7943        observed_overflow_list =
7944          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7945      } while (cur_overflow_list != observed_overflow_list);
7946      // ... until we have succeeded in doing so.
7947    }
7948  }
7949
7950  // Push the prefix elements on work_q
7951  assert(prefix != NULL, "control point invariant");
7952  const markOop proto = markOopDesc::prototype();
7953  oop next;
7954  NOT_PRODUCT(ssize_t n = 0;)
7955  for (cur = prefix; cur != NULL; cur = next) {
7956    next = oop(cur->mark());
7957    cur->set_mark(proto);   // until proven otherwise
7958    assert(cur->is_oop(), "Should be an oop");
7959    bool res = work_q->push(cur);
7960    assert(res, "Bit off more than we can chew?");
7961    NOT_PRODUCT(n++;)
7962  }
7963#ifndef PRODUCT
7964  assert(_num_par_pushes >= n, "Too many pops?");
7965  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7966#endif
7967  return true;
7968}
7969
7970// Single-threaded
7971void CMSCollector::push_on_overflow_list(oop p) {
7972  NOT_PRODUCT(_num_par_pushes++;)
7973  assert(p->is_oop(), "Not an oop");
7974  preserve_mark_if_necessary(p);
7975  p->set_mark((markOop)_overflow_list);
7976  _overflow_list = p;
7977}
7978
7979// Multi-threaded; use CAS to prepend to overflow list
7980void CMSCollector::par_push_on_overflow_list(oop p) {
7981  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7982  assert(p->is_oop(), "Not an oop");
7983  par_preserve_mark_if_necessary(p);
7984  oop observed_overflow_list = _overflow_list;
7985  oop cur_overflow_list;
7986  do {
7987    cur_overflow_list = observed_overflow_list;
7988    if (cur_overflow_list != BUSY) {
7989      p->set_mark(markOop(cur_overflow_list));
7990    } else {
7991      p->set_mark(NULL);
7992    }
7993    observed_overflow_list =
7994      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7995  } while (cur_overflow_list != observed_overflow_list);
7996}
7997#undef BUSY
7998
7999// Single threaded
8000// General Note on GrowableArray: pushes may silently fail
8001// because we are (temporarily) out of C-heap for expanding
8002// the stack. The problem is quite ubiquitous and affects
8003// a lot of code in the JVM. The prudent thing for GrowableArray
8004// to do (for now) is to exit with an error. However, that may
8005// be too draconian in some cases because the caller may be
8006// able to recover without much harm. For such cases, we
8007// should probably introduce a "soft_push" method which returns
8008// an indication of success or failure with the assumption that
8009// the caller may be able to recover from a failure; code in
8010// the VM can then be changed, incrementally, to deal with such
8011// failures where possible, thus, incrementally hardening the VM
8012// in such low resource situations.
8013void CMSCollector::preserve_mark_work(oop p, markOop m) {
8014  _preserved_oop_stack.push(p);
8015  _preserved_mark_stack.push(m);
8016  assert(m == p->mark(), "Mark word changed");
8017  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8018         "bijection");
8019}
8020
8021// Single threaded
8022void CMSCollector::preserve_mark_if_necessary(oop p) {
8023  markOop m = p->mark();
8024  if (m->must_be_preserved(p)) {
8025    preserve_mark_work(p, m);
8026  }
8027}
8028
8029void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8030  markOop m = p->mark();
8031  if (m->must_be_preserved(p)) {
8032    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8033    // Even though we read the mark word without holding
8034    // the lock, we are assured that it will not change
8035    // because we "own" this oop, so no other thread can
8036    // be trying to push it on the overflow list; see
8037    // the assertion in preserve_mark_work() that checks
8038    // that m == p->mark().
8039    preserve_mark_work(p, m);
8040  }
8041}
8042
8043// We should be able to do this multi-threaded,
8044// a chunk of stack being a task (this is
8045// correct because each oop only ever appears
8046// once in the overflow list. However, it's
8047// not very easy to completely overlap this with
8048// other operations, so will generally not be done
8049// until all work's been completed. Because we
8050// expect the preserved oop stack (set) to be small,
8051// it's probably fine to do this single-threaded.
8052// We can explore cleverer concurrent/overlapped/parallel
8053// processing of preserved marks if we feel the
8054// need for this in the future. Stack overflow should
8055// be so rare in practice and, when it happens, its
8056// effect on performance so great that this will
8057// likely just be in the noise anyway.
8058void CMSCollector::restore_preserved_marks_if_any() {
8059  assert(SafepointSynchronize::is_at_safepoint(),
8060         "world should be stopped");
8061  assert(Thread::current()->is_ConcurrentGC_thread() ||
8062         Thread::current()->is_VM_thread(),
8063         "should be single-threaded");
8064  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8065         "bijection");
8066
8067  while (!_preserved_oop_stack.is_empty()) {
8068    oop p = _preserved_oop_stack.pop();
8069    assert(p->is_oop(), "Should be an oop");
8070    assert(_span.contains(p), "oop should be in _span");
8071    assert(p->mark() == markOopDesc::prototype(),
8072           "Set when taken from overflow list");
8073    markOop m = _preserved_mark_stack.pop();
8074    p->set_mark(m);
8075  }
8076  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8077         "stacks were cleared above");
8078}
8079
8080#ifndef PRODUCT
8081bool CMSCollector::no_preserved_marks() const {
8082  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8083}
8084#endif
8085
8086// Transfer some number of overflown objects to usual marking
8087// stack. Return true if some objects were transferred.
8088bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8089  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8090                    (size_t)ParGCDesiredObjsFromOverflowList);
8091
8092  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8093  assert(_collector->overflow_list_is_empty() || res,
8094         "If list is not empty, we should have taken something");
8095  assert(!res || !_mark_stack->isEmpty(),
8096         "If we took something, it should now be on our stack");
8097  return res;
8098}
8099
8100size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8101  size_t res = _sp->block_size_no_stall(addr, _collector);
8102  if (_sp->block_is_obj(addr)) {
8103    if (_live_bit_map->isMarked(addr)) {
8104      // It can't have been dead in a previous cycle
8105      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8106    } else {
8107      _dead_bit_map->mark(addr);      // mark the dead object
8108    }
8109  }
8110  // Could be 0, if the block size could not be computed without stalling.
8111  return res;
8112}
8113
8114TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8115
8116  switch (phase) {
8117    case CMSCollector::InitialMarking:
8118      initialize(true  /* fullGC */ ,
8119                 cause /* cause of the GC */,
8120                 true  /* recordGCBeginTime */,
8121                 true  /* recordPreGCUsage */,
8122                 false /* recordPeakUsage */,
8123                 false /* recordPostGCusage */,
8124                 true  /* recordAccumulatedGCTime */,
8125                 false /* recordGCEndTime */,
8126                 false /* countCollection */  );
8127      break;
8128
8129    case CMSCollector::FinalMarking:
8130      initialize(true  /* fullGC */ ,
8131                 cause /* cause of the GC */,
8132                 false /* recordGCBeginTime */,
8133                 false /* recordPreGCUsage */,
8134                 false /* recordPeakUsage */,
8135                 false /* recordPostGCusage */,
8136                 true  /* recordAccumulatedGCTime */,
8137                 false /* recordGCEndTime */,
8138                 false /* countCollection */  );
8139      break;
8140
8141    case CMSCollector::Sweeping:
8142      initialize(true  /* fullGC */ ,
8143                 cause /* cause of the GC */,
8144                 false /* recordGCBeginTime */,
8145                 false /* recordPreGCUsage */,
8146                 true  /* recordPeakUsage */,
8147                 true  /* recordPostGCusage */,
8148                 false /* recordAccumulatedGCTime */,
8149                 true  /* recordGCEndTime */,
8150                 true  /* countCollection */  );
8151      break;
8152
8153    default:
8154      ShouldNotReachHere();
8155  }
8156}
8157