1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/cms/cmsCollectorPolicy.hpp"
32#include "gc/cms/cmsOopClosures.inline.hpp"
33#include "gc/cms/compactibleFreeListSpace.hpp"
34#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
35#include "gc/cms/concurrentMarkSweepThread.hpp"
36#include "gc/cms/parNewGeneration.hpp"
37#include "gc/cms/vmCMSOperations.hpp"
38#include "gc/serial/genMarkSweep.hpp"
39#include "gc/serial/tenuredGeneration.hpp"
40#include "gc/shared/adaptiveSizePolicy.hpp"
41#include "gc/shared/cardGeneration.inline.hpp"
42#include "gc/shared/cardTableRS.hpp"
43#include "gc/shared/collectedHeap.inline.hpp"
44#include "gc/shared/collectorCounters.hpp"
45#include "gc/shared/collectorPolicy.hpp"
46#include "gc/shared/gcLocker.inline.hpp"
47#include "gc/shared/gcPolicyCounters.hpp"
48#include "gc/shared/gcTimer.hpp"
49#include "gc/shared/gcTrace.hpp"
50#include "gc/shared/gcTraceTime.inline.hpp"
51#include "gc/shared/genCollectedHeap.hpp"
52#include "gc/shared/genOopClosures.inline.hpp"
53#include "gc/shared/isGCActiveMark.hpp"
54#include "gc/shared/referencePolicy.hpp"
55#include "gc/shared/strongRootsScope.hpp"
56#include "gc/shared/taskqueue.inline.hpp"
57#include "logging/log.hpp"
58#include "memory/allocation.hpp"
59#include "memory/iterator.inline.hpp"
60#include "memory/padded.hpp"
61#include "memory/resourceArea.hpp"
62#include "oops/oop.inline.hpp"
63#include "prims/jvmtiExport.hpp"
64#include "runtime/atomic.hpp"
65#include "runtime/globals_extension.hpp"
66#include "runtime/handles.inline.hpp"
67#include "runtime/java.hpp"
68#include "runtime/orderAccess.inline.hpp"
69#include "runtime/timer.hpp"
70#include "runtime/vmThread.hpp"
71#include "services/memoryService.hpp"
72#include "services/runtimeService.hpp"
73#include "utilities/stack.inline.hpp"
74
75// statics
76CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
77bool CMSCollector::_full_gc_requested = false;
78GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
79
80//////////////////////////////////////////////////////////////////
81// In support of CMS/VM thread synchronization
82//////////////////////////////////////////////////////////////////
83// We split use of the CGC_lock into 2 "levels".
84// The low-level locking is of the usual CGC_lock monitor. We introduce
85// a higher level "token" (hereafter "CMS token") built on top of the
86// low level monitor (hereafter "CGC lock").
87// The token-passing protocol gives priority to the VM thread. The
88// CMS-lock doesn't provide any fairness guarantees, but clients
89// should ensure that it is only held for very short, bounded
90// durations.
91//
92// When either of the CMS thread or the VM thread is involved in
93// collection operations during which it does not want the other
94// thread to interfere, it obtains the CMS token.
95//
96// If either thread tries to get the token while the other has
97// it, that thread waits. However, if the VM thread and CMS thread
98// both want the token, then the VM thread gets priority while the
99// CMS thread waits. This ensures, for instance, that the "concurrent"
100// phases of the CMS thread's work do not block out the VM thread
101// for long periods of time as the CMS thread continues to hog
102// the token. (See bug 4616232).
103//
104// The baton-passing functions are, however, controlled by the
105// flags _foregroundGCShouldWait and _foregroundGCIsActive,
106// and here the low-level CMS lock, not the high level token,
107// ensures mutual exclusion.
108//
109// Two important conditions that we have to satisfy:
110// 1. if a thread does a low-level wait on the CMS lock, then it
111//    relinquishes the CMS token if it were holding that token
112//    when it acquired the low-level CMS lock.
113// 2. any low-level notifications on the low-level lock
114//    should only be sent when a thread has relinquished the token.
115//
116// In the absence of either property, we'd have potential deadlock.
117//
118// We protect each of the CMS (concurrent and sequential) phases
119// with the CMS _token_, not the CMS _lock_.
120//
121// The only code protected by CMS lock is the token acquisition code
122// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
123// baton-passing code.
124//
125// Unfortunately, i couldn't come up with a good abstraction to factor and
126// hide the naked CGC_lock manipulation in the baton-passing code
127// further below. That's something we should try to do. Also, the proof
128// of correctness of this 2-level locking scheme is far from obvious,
129// and potentially quite slippery. We have an uneasy suspicion, for instance,
130// that there may be a theoretical possibility of delay/starvation in the
131// low-level lock/wait/notify scheme used for the baton-passing because of
132// potential interference with the priority scheme embodied in the
133// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
134// invocation further below and marked with "XXX 20011219YSR".
135// Indeed, as we note elsewhere, this may become yet more slippery
136// in the presence of multiple CMS and/or multiple VM threads. XXX
137
138class CMSTokenSync: public StackObj {
139 private:
140  bool _is_cms_thread;
141 public:
142  CMSTokenSync(bool is_cms_thread):
143    _is_cms_thread(is_cms_thread) {
144    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
145           "Incorrect argument to constructor");
146    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
147  }
148
149  ~CMSTokenSync() {
150    assert(_is_cms_thread ?
151             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
152             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
153          "Incorrect state");
154    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
155  }
156};
157
158// Convenience class that does a CMSTokenSync, and then acquires
159// upto three locks.
160class CMSTokenSyncWithLocks: public CMSTokenSync {
161 private:
162  // Note: locks are acquired in textual declaration order
163  // and released in the opposite order
164  MutexLockerEx _locker1, _locker2, _locker3;
165 public:
166  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
167                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
168    CMSTokenSync(is_cms_thread),
169    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
170    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
171    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
172  { }
173};
174
175
176//////////////////////////////////////////////////////////////////
177//  Concurrent Mark-Sweep Generation /////////////////////////////
178//////////////////////////////////////////////////////////////////
179
180NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
181
182// This struct contains per-thread things necessary to support parallel
183// young-gen collection.
184class CMSParGCThreadState: public CHeapObj<mtGC> {
185 public:
186  CompactibleFreeListSpaceLAB lab;
187  PromotionInfo promo;
188
189  // Constructor.
190  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
191    promo.setSpace(cfls);
192  }
193};
194
195ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
196     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
197  CardGeneration(rs, initial_byte_size, ct),
198  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
199  _did_compact(false)
200{
201  HeapWord* bottom = (HeapWord*) _virtual_space.low();
202  HeapWord* end    = (HeapWord*) _virtual_space.high();
203
204  _direct_allocated_words = 0;
205  NOT_PRODUCT(
206    _numObjectsPromoted = 0;
207    _numWordsPromoted = 0;
208    _numObjectsAllocated = 0;
209    _numWordsAllocated = 0;
210  )
211
212  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
213  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
214  _cmsSpace->_old_gen = this;
215
216  _gc_stats = new CMSGCStats();
217
218  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
219  // offsets match. The ability to tell free chunks from objects
220  // depends on this property.
221  debug_only(
222    FreeChunk* junk = NULL;
223    assert(UseCompressedClassPointers ||
224           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
225           "Offset of FreeChunk::_prev within FreeChunk must match"
226           "  that of OopDesc::_klass within OopDesc");
227  )
228
229  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
230  for (uint i = 0; i < ParallelGCThreads; i++) {
231    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
232  }
233
234  _incremental_collection_failed = false;
235  // The "dilatation_factor" is the expansion that can occur on
236  // account of the fact that the minimum object size in the CMS
237  // generation may be larger than that in, say, a contiguous young
238  //  generation.
239  // Ideally, in the calculation below, we'd compute the dilatation
240  // factor as: MinChunkSize/(promoting_gen's min object size)
241  // Since we do not have such a general query interface for the
242  // promoting generation, we'll instead just use the minimum
243  // object size (which today is a header's worth of space);
244  // note that all arithmetic is in units of HeapWords.
245  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
246  assert(_dilatation_factor >= 1.0, "from previous assert");
247}
248
249
250// The field "_initiating_occupancy" represents the occupancy percentage
251// at which we trigger a new collection cycle.  Unless explicitly specified
252// via CMSInitiatingOccupancyFraction (argument "io" below), it
253// is calculated by:
254//
255//   Let "f" be MinHeapFreeRatio in
256//
257//    _initiating_occupancy = 100-f +
258//                           f * (CMSTriggerRatio/100)
259//   where CMSTriggerRatio is the argument "tr" below.
260//
261// That is, if we assume the heap is at its desired maximum occupancy at the
262// end of a collection, we let CMSTriggerRatio of the (purported) free
263// space be allocated before initiating a new collection cycle.
264//
265void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
266  assert(io <= 100 && tr <= 100, "Check the arguments");
267  if (io >= 0) {
268    _initiating_occupancy = (double)io / 100.0;
269  } else {
270    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
271                             (double)(tr * MinHeapFreeRatio) / 100.0)
272                            / 100.0;
273  }
274}
275
276void ConcurrentMarkSweepGeneration::ref_processor_init() {
277  assert(collector() != NULL, "no collector");
278  collector()->ref_processor_init();
279}
280
281void CMSCollector::ref_processor_init() {
282  if (_ref_processor == NULL) {
283    // Allocate and initialize a reference processor
284    _ref_processor =
285      new ReferenceProcessor(_span,                               // span
286                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
287                             ParallelGCThreads,                   // mt processing degree
288                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
289                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
290                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
291                             &_is_alive_closure);                 // closure for liveness info
292    // Initialize the _ref_processor field of CMSGen
293    _cmsGen->set_ref_processor(_ref_processor);
294
295  }
296}
297
298AdaptiveSizePolicy* CMSCollector::size_policy() {
299  GenCollectedHeap* gch = GenCollectedHeap::heap();
300  return gch->gen_policy()->size_policy();
301}
302
303void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
304
305  const char* gen_name = "old";
306  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
307  // Generation Counters - generation 1, 1 subspace
308  _gen_counters = new GenerationCounters(gen_name, 1, 1,
309      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
310
311  _space_counters = new GSpaceCounters(gen_name, 0,
312                                       _virtual_space.reserved_size(),
313                                       this, _gen_counters);
314}
315
316CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317  _cms_gen(cms_gen)
318{
319  assert(alpha <= 100, "bad value");
320  _saved_alpha = alpha;
321
322  // Initialize the alphas to the bootstrap value of 100.
323  _gc0_alpha = _cms_alpha = 100;
324
325  _cms_begin_time.update();
326  _cms_end_time.update();
327
328  _gc0_duration = 0.0;
329  _gc0_period = 0.0;
330  _gc0_promoted = 0;
331
332  _cms_duration = 0.0;
333  _cms_period = 0.0;
334  _cms_allocated = 0;
335
336  _cms_used_at_gc0_begin = 0;
337  _cms_used_at_gc0_end = 0;
338  _allow_duty_cycle_reduction = false;
339  _valid_bits = 0;
340}
341
342double CMSStats::cms_free_adjustment_factor(size_t free) const {
343  // TBD: CR 6909490
344  return 1.0;
345}
346
347void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
348}
349
350// If promotion failure handling is on use
351// the padded average size of the promotion for each
352// young generation collection.
353double CMSStats::time_until_cms_gen_full() const {
354  size_t cms_free = _cms_gen->cmsSpace()->free();
355  GenCollectedHeap* gch = GenCollectedHeap::heap();
356  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
357                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
358  if (cms_free > expected_promotion) {
359    // Start a cms collection if there isn't enough space to promote
360    // for the next young collection.  Use the padded average as
361    // a safety factor.
362    cms_free -= expected_promotion;
363
364    // Adjust by the safety factor.
365    double cms_free_dbl = (double)cms_free;
366    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
367    // Apply a further correction factor which tries to adjust
368    // for recent occurance of concurrent mode failures.
369    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
370    cms_free_dbl = cms_free_dbl * cms_adjustment;
371
372    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
373                  cms_free, expected_promotion);
374    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
375    // Add 1 in case the consumption rate goes to zero.
376    return cms_free_dbl / (cms_consumption_rate() + 1.0);
377  }
378  return 0.0;
379}
380
381// Compare the duration of the cms collection to the
382// time remaining before the cms generation is empty.
383// Note that the time from the start of the cms collection
384// to the start of the cms sweep (less than the total
385// duration of the cms collection) can be used.  This
386// has been tried and some applications experienced
387// promotion failures early in execution.  This was
388// possibly because the averages were not accurate
389// enough at the beginning.
390double CMSStats::time_until_cms_start() const {
391  // We add "gc0_period" to the "work" calculation
392  // below because this query is done (mostly) at the
393  // end of a scavenge, so we need to conservatively
394  // account for that much possible delay
395  // in the query so as to avoid concurrent mode failures
396  // due to starting the collection just a wee bit too
397  // late.
398  double work = cms_duration() + gc0_period();
399  double deadline = time_until_cms_gen_full();
400  // If a concurrent mode failure occurred recently, we want to be
401  // more conservative and halve our expected time_until_cms_gen_full()
402  if (work > deadline) {
403    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
404                          cms_duration(), gc0_period(), time_until_cms_gen_full());
405    return 0.0;
406  }
407  return work - deadline;
408}
409
410#ifndef PRODUCT
411void CMSStats::print_on(outputStream *st) const {
412  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
413  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
414               gc0_duration(), gc0_period(), gc0_promoted());
415  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
416            cms_duration(), cms_period(), cms_allocated());
417  st->print(",cms_since_beg=%g,cms_since_end=%g",
418            cms_time_since_begin(), cms_time_since_end());
419  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
420            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
421
422  if (valid()) {
423    st->print(",promo_rate=%g,cms_alloc_rate=%g",
424              promotion_rate(), cms_allocation_rate());
425    st->print(",cms_consumption_rate=%g,time_until_full=%g",
426              cms_consumption_rate(), time_until_cms_gen_full());
427  }
428  st->cr();
429}
430#endif // #ifndef PRODUCT
431
432CMSCollector::CollectorState CMSCollector::_collectorState =
433                             CMSCollector::Idling;
434bool CMSCollector::_foregroundGCIsActive = false;
435bool CMSCollector::_foregroundGCShouldWait = false;
436
437CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
438                           CardTableRS*                   ct,
439                           ConcurrentMarkSweepPolicy*     cp):
440  _cmsGen(cmsGen),
441  _ct(ct),
442  _ref_processor(NULL),    // will be set later
443  _conc_workers(NULL),     // may be set later
444  _abort_preclean(false),
445  _start_sampling(false),
446  _between_prologue_and_epilogue(false),
447  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
448  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
449                 -1 /* lock-free */, "No_lock" /* dummy */),
450  _modUnionClosurePar(&_modUnionTable),
451  // Adjust my span to cover old (cms) gen
452  _span(cmsGen->reserved()),
453  // Construct the is_alive_closure with _span & markBitMap
454  _is_alive_closure(_span, &_markBitMap),
455  _restart_addr(NULL),
456  _overflow_list(NULL),
457  _stats(cmsGen),
458  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
459                             //verify that this lock should be acquired with safepoint check.
460                             Monitor::_safepoint_check_sometimes)),
461  _eden_chunk_array(NULL),     // may be set in ctor body
462  _eden_chunk_capacity(0),     // -- ditto --
463  _eden_chunk_index(0),        // -- ditto --
464  _survivor_plab_array(NULL),  // -- ditto --
465  _survivor_chunk_array(NULL), // -- ditto --
466  _survivor_chunk_capacity(0), // -- ditto --
467  _survivor_chunk_index(0),    // -- ditto --
468  _ser_pmc_preclean_ovflw(0),
469  _ser_kac_preclean_ovflw(0),
470  _ser_pmc_remark_ovflw(0),
471  _par_pmc_remark_ovflw(0),
472  _ser_kac_ovflw(0),
473  _par_kac_ovflw(0),
474#ifndef PRODUCT
475  _num_par_pushes(0),
476#endif
477  _collection_count_start(0),
478  _verifying(false),
479  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
480  _completed_initialization(false),
481  _collector_policy(cp),
482  _should_unload_classes(CMSClassUnloadingEnabled),
483  _concurrent_cycles_since_last_unload(0),
484  _roots_scanning_options(GenCollectedHeap::SO_None),
485  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
486  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
487  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
488  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
489  _cms_start_registered(false)
490{
491  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
492    ExplicitGCInvokesConcurrent = true;
493  }
494  // Now expand the span and allocate the collection support structures
495  // (MUT, marking bit map etc.) to cover both generations subject to
496  // collection.
497
498  // For use by dirty card to oop closures.
499  _cmsGen->cmsSpace()->set_collector(this);
500
501  // Allocate MUT and marking bit map
502  {
503    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
504    if (!_markBitMap.allocate(_span)) {
505      log_warning(gc)("Failed to allocate CMS Bit Map");
506      return;
507    }
508    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
509  }
510  {
511    _modUnionTable.allocate(_span);
512    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
513  }
514
515  if (!_markStack.allocate(MarkStackSize)) {
516    log_warning(gc)("Failed to allocate CMS Marking Stack");
517    return;
518  }
519
520  // Support for multi-threaded concurrent phases
521  if (CMSConcurrentMTEnabled) {
522    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
523      // just for now
524      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
525    }
526    if (ConcGCThreads > 1) {
527      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
528                                 ConcGCThreads, true);
529      if (_conc_workers == NULL) {
530        log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
531        CMSConcurrentMTEnabled = false;
532      } else {
533        _conc_workers->initialize_workers();
534      }
535    } else {
536      CMSConcurrentMTEnabled = false;
537    }
538  }
539  if (!CMSConcurrentMTEnabled) {
540    ConcGCThreads = 0;
541  } else {
542    // Turn off CMSCleanOnEnter optimization temporarily for
543    // the MT case where it's not fixed yet; see 6178663.
544    CMSCleanOnEnter = false;
545  }
546  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
547         "Inconsistency");
548  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
549  log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
550
551  // Parallel task queues; these are shared for the
552  // concurrent and stop-world phases of CMS, but
553  // are not shared with parallel scavenge (ParNew).
554  {
555    uint i;
556    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
557
558    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
559         || ParallelRefProcEnabled)
560        && num_queues > 0) {
561      _task_queues = new OopTaskQueueSet(num_queues);
562      if (_task_queues == NULL) {
563        log_warning(gc)("task_queues allocation failure.");
564        return;
565      }
566      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
567      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
568      for (i = 0; i < num_queues; i++) {
569        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
570        if (q == NULL) {
571          log_warning(gc)("work_queue allocation failure.");
572          return;
573        }
574        _task_queues->register_queue(i, q);
575      }
576      for (i = 0; i < num_queues; i++) {
577        _task_queues->queue(i)->initialize();
578        _hash_seed[i] = 17;  // copied from ParNew
579      }
580    }
581  }
582
583  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
584
585  // Clip CMSBootstrapOccupancy between 0 and 100.
586  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
587
588  // Now tell CMS generations the identity of their collector
589  ConcurrentMarkSweepGeneration::set_collector(this);
590
591  // Create & start a CMS thread for this CMS collector
592  _cmsThread = ConcurrentMarkSweepThread::start(this);
593  assert(cmsThread() != NULL, "CMS Thread should have been created");
594  assert(cmsThread()->collector() == this,
595         "CMS Thread should refer to this gen");
596  assert(CGC_lock != NULL, "Where's the CGC_lock?");
597
598  // Support for parallelizing young gen rescan
599  GenCollectedHeap* gch = GenCollectedHeap::heap();
600  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
601  _young_gen = (ParNewGeneration*)gch->young_gen();
602  if (gch->supports_inline_contig_alloc()) {
603    _top_addr = gch->top_addr();
604    _end_addr = gch->end_addr();
605    assert(_young_gen != NULL, "no _young_gen");
606    _eden_chunk_index = 0;
607    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
608    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
609  }
610
611  // Support for parallelizing survivor space rescan
612  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
613    const size_t max_plab_samples =
614      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
615
616    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
617    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
618    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
619    _survivor_chunk_capacity = max_plab_samples;
620    for (uint i = 0; i < ParallelGCThreads; i++) {
621      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
622      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
623      assert(cur->end() == 0, "Should be 0");
624      assert(cur->array() == vec, "Should be vec");
625      assert(cur->capacity() == max_plab_samples, "Error");
626    }
627  }
628
629  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
630  _gc_counters = new CollectorCounters("CMS", 1);
631  _completed_initialization = true;
632  _inter_sweep_timer.start();  // start of time
633}
634
635const char* ConcurrentMarkSweepGeneration::name() const {
636  return "concurrent mark-sweep generation";
637}
638void ConcurrentMarkSweepGeneration::update_counters() {
639  if (UsePerfData) {
640    _space_counters->update_all();
641    _gen_counters->update_all();
642  }
643}
644
645// this is an optimized version of update_counters(). it takes the
646// used value as a parameter rather than computing it.
647//
648void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
649  if (UsePerfData) {
650    _space_counters->update_used(used);
651    _space_counters->update_capacity();
652    _gen_counters->update_all();
653  }
654}
655
656void ConcurrentMarkSweepGeneration::print() const {
657  Generation::print();
658  cmsSpace()->print();
659}
660
661#ifndef PRODUCT
662void ConcurrentMarkSweepGeneration::print_statistics() {
663  cmsSpace()->printFLCensus(0);
664}
665#endif
666
667size_t
668ConcurrentMarkSweepGeneration::contiguous_available() const {
669  // dld proposes an improvement in precision here. If the committed
670  // part of the space ends in a free block we should add that to
671  // uncommitted size in the calculation below. Will make this
672  // change later, staying with the approximation below for the
673  // time being. -- ysr.
674  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
675}
676
677size_t
678ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
679  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
680}
681
682size_t ConcurrentMarkSweepGeneration::max_available() const {
683  return free() + _virtual_space.uncommitted_size();
684}
685
686bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
687  size_t available = max_available();
688  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
689  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
690  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
691                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
692  return res;
693}
694
695// At a promotion failure dump information on block layout in heap
696// (cms old generation).
697void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
698  Log(gc, promotion) log;
699  if (log.is_trace()) {
700    ResourceMark rm;
701    cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
702  }
703}
704
705void ConcurrentMarkSweepGeneration::reset_after_compaction() {
706  // Clear the promotion information.  These pointers can be adjusted
707  // along with all the other pointers into the heap but
708  // compaction is expected to be a rare event with
709  // a heap using cms so don't do it without seeing the need.
710  for (uint i = 0; i < ParallelGCThreads; i++) {
711    _par_gc_thread_states[i]->promo.reset();
712  }
713}
714
715void ConcurrentMarkSweepGeneration::compute_new_size() {
716  assert_locked_or_safepoint(Heap_lock);
717
718  // If incremental collection failed, we just want to expand
719  // to the limit.
720  if (incremental_collection_failed()) {
721    clear_incremental_collection_failed();
722    grow_to_reserved();
723    return;
724  }
725
726  // The heap has been compacted but not reset yet.
727  // Any metric such as free() or used() will be incorrect.
728
729  CardGeneration::compute_new_size();
730
731  // Reset again after a possible resizing
732  if (did_compact()) {
733    cmsSpace()->reset_after_compaction();
734  }
735}
736
737void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
738  assert_locked_or_safepoint(Heap_lock);
739
740  // If incremental collection failed, we just want to expand
741  // to the limit.
742  if (incremental_collection_failed()) {
743    clear_incremental_collection_failed();
744    grow_to_reserved();
745    return;
746  }
747
748  double free_percentage = ((double) free()) / capacity();
749  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
750  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
751
752  // compute expansion delta needed for reaching desired free percentage
753  if (free_percentage < desired_free_percentage) {
754    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
755    assert(desired_capacity >= capacity(), "invalid expansion size");
756    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
757    Log(gc) log;
758    if (log.is_trace()) {
759      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
760      log.trace("From compute_new_size: ");
761      log.trace("  Free fraction %f", free_percentage);
762      log.trace("  Desired free fraction %f", desired_free_percentage);
763      log.trace("  Maximum free fraction %f", maximum_free_percentage);
764      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
765      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
766      GenCollectedHeap* gch = GenCollectedHeap::heap();
767      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
768      size_t young_size = gch->young_gen()->capacity();
769      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
770      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
771      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
772      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
773    }
774    // safe if expansion fails
775    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
776    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
777  } else {
778    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
779    assert(desired_capacity <= capacity(), "invalid expansion size");
780    size_t shrink_bytes = capacity() - desired_capacity;
781    // Don't shrink unless the delta is greater than the minimum shrink we want
782    if (shrink_bytes >= MinHeapDeltaBytes) {
783      shrink_free_list_by(shrink_bytes);
784    }
785  }
786}
787
788Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
789  return cmsSpace()->freelistLock();
790}
791
792HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
793  CMSSynchronousYieldRequest yr;
794  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
795  return have_lock_and_allocate(size, tlab);
796}
797
798HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
799                                                                bool   tlab /* ignored */) {
800  assert_lock_strong(freelistLock());
801  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
802  HeapWord* res = cmsSpace()->allocate(adjustedSize);
803  // Allocate the object live (grey) if the background collector has
804  // started marking. This is necessary because the marker may
805  // have passed this address and consequently this object will
806  // not otherwise be greyed and would be incorrectly swept up.
807  // Note that if this object contains references, the writing
808  // of those references will dirty the card containing this object
809  // allowing the object to be blackened (and its references scanned)
810  // either during a preclean phase or at the final checkpoint.
811  if (res != NULL) {
812    // We may block here with an uninitialized object with
813    // its mark-bit or P-bits not yet set. Such objects need
814    // to be safely navigable by block_start().
815    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
816    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
817    collector()->direct_allocated(res, adjustedSize);
818    _direct_allocated_words += adjustedSize;
819    // allocation counters
820    NOT_PRODUCT(
821      _numObjectsAllocated++;
822      _numWordsAllocated += (int)adjustedSize;
823    )
824  }
825  return res;
826}
827
828// In the case of direct allocation by mutators in a generation that
829// is being concurrently collected, the object must be allocated
830// live (grey) if the background collector has started marking.
831// This is necessary because the marker may
832// have passed this address and consequently this object will
833// not otherwise be greyed and would be incorrectly swept up.
834// Note that if this object contains references, the writing
835// of those references will dirty the card containing this object
836// allowing the object to be blackened (and its references scanned)
837// either during a preclean phase or at the final checkpoint.
838void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
839  assert(_markBitMap.covers(start, size), "Out of bounds");
840  if (_collectorState >= Marking) {
841    MutexLockerEx y(_markBitMap.lock(),
842                    Mutex::_no_safepoint_check_flag);
843    // [see comments preceding SweepClosure::do_blk() below for details]
844    //
845    // Can the P-bits be deleted now?  JJJ
846    //
847    // 1. need to mark the object as live so it isn't collected
848    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
849    // 3. need to mark the end of the object so marking, precleaning or sweeping
850    //    can skip over uninitialized or unparsable objects. An allocated
851    //    object is considered uninitialized for our purposes as long as
852    //    its klass word is NULL.  All old gen objects are parsable
853    //    as soon as they are initialized.)
854    _markBitMap.mark(start);          // object is live
855    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
856    _markBitMap.mark(start + size - 1);
857                                      // mark end of object
858  }
859  // check that oop looks uninitialized
860  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
861}
862
863void CMSCollector::promoted(bool par, HeapWord* start,
864                            bool is_obj_array, size_t obj_size) {
865  assert(_markBitMap.covers(start), "Out of bounds");
866  // See comment in direct_allocated() about when objects should
867  // be allocated live.
868  if (_collectorState >= Marking) {
869    // we already hold the marking bit map lock, taken in
870    // the prologue
871    if (par) {
872      _markBitMap.par_mark(start);
873    } else {
874      _markBitMap.mark(start);
875    }
876    // We don't need to mark the object as uninitialized (as
877    // in direct_allocated above) because this is being done with the
878    // world stopped and the object will be initialized by the
879    // time the marking, precleaning or sweeping get to look at it.
880    // But see the code for copying objects into the CMS generation,
881    // where we need to ensure that concurrent readers of the
882    // block offset table are able to safely navigate a block that
883    // is in flux from being free to being allocated (and in
884    // transition while being copied into) and subsequently
885    // becoming a bona-fide object when the copy/promotion is complete.
886    assert(SafepointSynchronize::is_at_safepoint(),
887           "expect promotion only at safepoints");
888
889    if (_collectorState < Sweeping) {
890      // Mark the appropriate cards in the modUnionTable, so that
891      // this object gets scanned before the sweep. If this is
892      // not done, CMS generation references in the object might
893      // not get marked.
894      // For the case of arrays, which are otherwise precisely
895      // marked, we need to dirty the entire array, not just its head.
896      if (is_obj_array) {
897        // The [par_]mark_range() method expects mr.end() below to
898        // be aligned to the granularity of a bit's representation
899        // in the heap. In the case of the MUT below, that's a
900        // card size.
901        MemRegion mr(start,
902                     (HeapWord*)round_to((intptr_t)(start + obj_size),
903                        CardTableModRefBS::card_size /* bytes */));
904        if (par) {
905          _modUnionTable.par_mark_range(mr);
906        } else {
907          _modUnionTable.mark_range(mr);
908        }
909      } else {  // not an obj array; we can just mark the head
910        if (par) {
911          _modUnionTable.par_mark(start);
912        } else {
913          _modUnionTable.mark(start);
914        }
915      }
916    }
917  }
918}
919
920oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
921  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
922  // allocate, copy and if necessary update promoinfo --
923  // delegate to underlying space.
924  assert_lock_strong(freelistLock());
925
926#ifndef PRODUCT
927  if (GenCollectedHeap::heap()->promotion_should_fail()) {
928    return NULL;
929  }
930#endif  // #ifndef PRODUCT
931
932  oop res = _cmsSpace->promote(obj, obj_size);
933  if (res == NULL) {
934    // expand and retry
935    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
936    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
937    // Since this is the old generation, we don't try to promote
938    // into a more senior generation.
939    res = _cmsSpace->promote(obj, obj_size);
940  }
941  if (res != NULL) {
942    // See comment in allocate() about when objects should
943    // be allocated live.
944    assert(obj->is_oop(), "Will dereference klass pointer below");
945    collector()->promoted(false,           // Not parallel
946                          (HeapWord*)res, obj->is_objArray(), obj_size);
947    // promotion counters
948    NOT_PRODUCT(
949      _numObjectsPromoted++;
950      _numWordsPromoted +=
951        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
952    )
953  }
954  return res;
955}
956
957
958// IMPORTANT: Notes on object size recognition in CMS.
959// ---------------------------------------------------
960// A block of storage in the CMS generation is always in
961// one of three states. A free block (FREE), an allocated
962// object (OBJECT) whose size() method reports the correct size,
963// and an intermediate state (TRANSIENT) in which its size cannot
964// be accurately determined.
965// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
966// -----------------------------------------------------
967// FREE:      klass_word & 1 == 1; mark_word holds block size
968//
969// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
970//            obj->size() computes correct size
971//
972// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
973//
974// STATE IDENTIFICATION: (64 bit+COOPS)
975// ------------------------------------
976// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
977//
978// OBJECT:    klass_word installed; klass_word != 0;
979//            obj->size() computes correct size
980//
981// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
982//
983//
984// STATE TRANSITION DIAGRAM
985//
986//        mut / parnew                     mut  /  parnew
987// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
988//  ^                                                                   |
989//  |------------------------ DEAD <------------------------------------|
990//         sweep                            mut
991//
992// While a block is in TRANSIENT state its size cannot be determined
993// so readers will either need to come back later or stall until
994// the size can be determined. Note that for the case of direct
995// allocation, P-bits, when available, may be used to determine the
996// size of an object that may not yet have been initialized.
997
998// Things to support parallel young-gen collection.
999oop
1000ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1001                                           oop old, markOop m,
1002                                           size_t word_sz) {
1003#ifndef PRODUCT
1004  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1005    return NULL;
1006  }
1007#endif  // #ifndef PRODUCT
1008
1009  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1010  PromotionInfo* promoInfo = &ps->promo;
1011  // if we are tracking promotions, then first ensure space for
1012  // promotion (including spooling space for saving header if necessary).
1013  // then allocate and copy, then track promoted info if needed.
1014  // When tracking (see PromotionInfo::track()), the mark word may
1015  // be displaced and in this case restoration of the mark word
1016  // occurs in the (oop_since_save_marks_)iterate phase.
1017  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1018    // Out of space for allocating spooling buffers;
1019    // try expanding and allocating spooling buffers.
1020    if (!expand_and_ensure_spooling_space(promoInfo)) {
1021      return NULL;
1022    }
1023  }
1024  assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1025  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1026  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1027  if (obj_ptr == NULL) {
1028     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1029     if (obj_ptr == NULL) {
1030       return NULL;
1031     }
1032  }
1033  oop obj = oop(obj_ptr);
1034  OrderAccess::storestore();
1035  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1036  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1037  // IMPORTANT: See note on object initialization for CMS above.
1038  // Otherwise, copy the object.  Here we must be careful to insert the
1039  // klass pointer last, since this marks the block as an allocated object.
1040  // Except with compressed oops it's the mark word.
1041  HeapWord* old_ptr = (HeapWord*)old;
1042  // Restore the mark word copied above.
1043  obj->set_mark(m);
1044  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1045  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1046  OrderAccess::storestore();
1047
1048  if (UseCompressedClassPointers) {
1049    // Copy gap missed by (aligned) header size calculation below
1050    obj->set_klass_gap(old->klass_gap());
1051  }
1052  if (word_sz > (size_t)oopDesc::header_size()) {
1053    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1054                                 obj_ptr + oopDesc::header_size(),
1055                                 word_sz - oopDesc::header_size());
1056  }
1057
1058  // Now we can track the promoted object, if necessary.  We take care
1059  // to delay the transition from uninitialized to full object
1060  // (i.e., insertion of klass pointer) until after, so that it
1061  // atomically becomes a promoted object.
1062  if (promoInfo->tracking()) {
1063    promoInfo->track((PromotedObject*)obj, old->klass());
1064  }
1065  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1066  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1067  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1068
1069  // Finally, install the klass pointer (this should be volatile).
1070  OrderAccess::storestore();
1071  obj->set_klass(old->klass());
1072  // We should now be able to calculate the right size for this object
1073  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1074
1075  collector()->promoted(true,          // parallel
1076                        obj_ptr, old->is_objArray(), word_sz);
1077
1078  NOT_PRODUCT(
1079    Atomic::inc_ptr(&_numObjectsPromoted);
1080    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1081  )
1082
1083  return obj;
1084}
1085
1086void
1087ConcurrentMarkSweepGeneration::
1088par_promote_alloc_done(int thread_num) {
1089  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1090  ps->lab.retire(thread_num);
1091}
1092
1093void
1094ConcurrentMarkSweepGeneration::
1095par_oop_since_save_marks_iterate_done(int thread_num) {
1096  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1097  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1098  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1099
1100  // Because card-scanning has been completed, subsequent phases
1101  // (e.g., reference processing) will not need to recognize which
1102  // objects have been promoted during this GC. So, we can now disable
1103  // promotion tracking.
1104  ps->promo.stopTrackingPromotions();
1105}
1106
1107bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1108                                                   size_t size,
1109                                                   bool   tlab)
1110{
1111  // We allow a STW collection only if a full
1112  // collection was requested.
1113  return full || should_allocate(size, tlab); // FIX ME !!!
1114  // This and promotion failure handling are connected at the
1115  // hip and should be fixed by untying them.
1116}
1117
1118bool CMSCollector::shouldConcurrentCollect() {
1119  LogTarget(Trace, gc) log;
1120
1121  if (_full_gc_requested) {
1122    log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1123    return true;
1124  }
1125
1126  FreelistLocker x(this);
1127  // ------------------------------------------------------------------
1128  // Print out lots of information which affects the initiation of
1129  // a collection.
1130  if (log.is_enabled() && stats().valid()) {
1131    log.print("CMSCollector shouldConcurrentCollect: ");
1132
1133    LogStream out(log);
1134    stats().print_on(&out);
1135
1136    log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1137    log.print("free=" SIZE_FORMAT, _cmsGen->free());
1138    log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1139    log.print("promotion_rate=%g", stats().promotion_rate());
1140    log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1141    log.print("occupancy=%3.7f", _cmsGen->occupancy());
1142    log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1143    log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1144    log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1145    log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1146  }
1147  // ------------------------------------------------------------------
1148
1149  // If the estimated time to complete a cms collection (cms_duration())
1150  // is less than the estimated time remaining until the cms generation
1151  // is full, start a collection.
1152  if (!UseCMSInitiatingOccupancyOnly) {
1153    if (stats().valid()) {
1154      if (stats().time_until_cms_start() == 0.0) {
1155        return true;
1156      }
1157    } else {
1158      // We want to conservatively collect somewhat early in order
1159      // to try and "bootstrap" our CMS/promotion statistics;
1160      // this branch will not fire after the first successful CMS
1161      // collection because the stats should then be valid.
1162      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1163        log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1164                  _cmsGen->occupancy(), _bootstrap_occupancy);
1165        return true;
1166      }
1167    }
1168  }
1169
1170  // Otherwise, we start a collection cycle if
1171  // old gen want a collection cycle started. Each may use
1172  // an appropriate criterion for making this decision.
1173  // XXX We need to make sure that the gen expansion
1174  // criterion dovetails well with this. XXX NEED TO FIX THIS
1175  if (_cmsGen->should_concurrent_collect()) {
1176    log.print("CMS old gen initiated");
1177    return true;
1178  }
1179
1180  // We start a collection if we believe an incremental collection may fail;
1181  // this is not likely to be productive in practice because it's probably too
1182  // late anyway.
1183  GenCollectedHeap* gch = GenCollectedHeap::heap();
1184  assert(gch->collector_policy()->is_generation_policy(),
1185         "You may want to check the correctness of the following");
1186  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1187    log.print("CMSCollector: collect because incremental collection will fail ");
1188    return true;
1189  }
1190
1191  if (MetaspaceGC::should_concurrent_collect()) {
1192    log.print("CMSCollector: collect for metadata allocation ");
1193    return true;
1194  }
1195
1196  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1197  if (CMSTriggerInterval >= 0) {
1198    if (CMSTriggerInterval == 0) {
1199      // Trigger always
1200      return true;
1201    }
1202
1203    // Check the CMS time since begin (we do not check the stats validity
1204    // as we want to be able to trigger the first CMS cycle as well)
1205    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1206      if (stats().valid()) {
1207        log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1208                  stats().cms_time_since_begin());
1209      } else {
1210        log.print("CMSCollector: collect because of trigger interval (first collection)");
1211      }
1212      return true;
1213    }
1214  }
1215
1216  return false;
1217}
1218
1219void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1220
1221// Clear _expansion_cause fields of constituent generations
1222void CMSCollector::clear_expansion_cause() {
1223  _cmsGen->clear_expansion_cause();
1224}
1225
1226// We should be conservative in starting a collection cycle.  To
1227// start too eagerly runs the risk of collecting too often in the
1228// extreme.  To collect too rarely falls back on full collections,
1229// which works, even if not optimum in terms of concurrent work.
1230// As a work around for too eagerly collecting, use the flag
1231// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1232// giving the user an easily understandable way of controlling the
1233// collections.
1234// We want to start a new collection cycle if any of the following
1235// conditions hold:
1236// . our current occupancy exceeds the configured initiating occupancy
1237//   for this generation, or
1238// . we recently needed to expand this space and have not, since that
1239//   expansion, done a collection of this generation, or
1240// . the underlying space believes that it may be a good idea to initiate
1241//   a concurrent collection (this may be based on criteria such as the
1242//   following: the space uses linear allocation and linear allocation is
1243//   going to fail, or there is believed to be excessive fragmentation in
1244//   the generation, etc... or ...
1245// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1246//   the case of the old generation; see CR 6543076):
1247//   we may be approaching a point at which allocation requests may fail because
1248//   we will be out of sufficient free space given allocation rate estimates.]
1249bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1250
1251  assert_lock_strong(freelistLock());
1252  if (occupancy() > initiating_occupancy()) {
1253    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1254                  short_name(), occupancy(), initiating_occupancy());
1255    return true;
1256  }
1257  if (UseCMSInitiatingOccupancyOnly) {
1258    return false;
1259  }
1260  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1261    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1262    return true;
1263  }
1264  return false;
1265}
1266
1267void ConcurrentMarkSweepGeneration::collect(bool   full,
1268                                            bool   clear_all_soft_refs,
1269                                            size_t size,
1270                                            bool   tlab)
1271{
1272  collector()->collect(full, clear_all_soft_refs, size, tlab);
1273}
1274
1275void CMSCollector::collect(bool   full,
1276                           bool   clear_all_soft_refs,
1277                           size_t size,
1278                           bool   tlab)
1279{
1280  // The following "if" branch is present for defensive reasons.
1281  // In the current uses of this interface, it can be replaced with:
1282  // assert(!GCLocker.is_active(), "Can't be called otherwise");
1283  // But I am not placing that assert here to allow future
1284  // generality in invoking this interface.
1285  if (GCLocker::is_active()) {
1286    // A consistency test for GCLocker
1287    assert(GCLocker::needs_gc(), "Should have been set already");
1288    // Skip this foreground collection, instead
1289    // expanding the heap if necessary.
1290    // Need the free list locks for the call to free() in compute_new_size()
1291    compute_new_size();
1292    return;
1293  }
1294  acquire_control_and_collect(full, clear_all_soft_refs);
1295}
1296
1297void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1298  GenCollectedHeap* gch = GenCollectedHeap::heap();
1299  unsigned int gc_count = gch->total_full_collections();
1300  if (gc_count == full_gc_count) {
1301    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1302    _full_gc_requested = true;
1303    _full_gc_cause = cause;
1304    CGC_lock->notify();   // nudge CMS thread
1305  } else {
1306    assert(gc_count > full_gc_count, "Error: causal loop");
1307  }
1308}
1309
1310bool CMSCollector::is_external_interruption() {
1311  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1312  return GCCause::is_user_requested_gc(cause) ||
1313         GCCause::is_serviceability_requested_gc(cause);
1314}
1315
1316void CMSCollector::report_concurrent_mode_interruption() {
1317  if (is_external_interruption()) {
1318    log_debug(gc)("Concurrent mode interrupted");
1319  } else {
1320    log_debug(gc)("Concurrent mode failure");
1321    _gc_tracer_cm->report_concurrent_mode_failure();
1322  }
1323}
1324
1325
1326// The foreground and background collectors need to coordinate in order
1327// to make sure that they do not mutually interfere with CMS collections.
1328// When a background collection is active,
1329// the foreground collector may need to take over (preempt) and
1330// synchronously complete an ongoing collection. Depending on the
1331// frequency of the background collections and the heap usage
1332// of the application, this preemption can be seldom or frequent.
1333// There are only certain
1334// points in the background collection that the "collection-baton"
1335// can be passed to the foreground collector.
1336//
1337// The foreground collector will wait for the baton before
1338// starting any part of the collection.  The foreground collector
1339// will only wait at one location.
1340//
1341// The background collector will yield the baton before starting a new
1342// phase of the collection (e.g., before initial marking, marking from roots,
1343// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1344// of the loop which switches the phases. The background collector does some
1345// of the phases (initial mark, final re-mark) with the world stopped.
1346// Because of locking involved in stopping the world,
1347// the foreground collector should not block waiting for the background
1348// collector when it is doing a stop-the-world phase.  The background
1349// collector will yield the baton at an additional point just before
1350// it enters a stop-the-world phase.  Once the world is stopped, the
1351// background collector checks the phase of the collection.  If the
1352// phase has not changed, it proceeds with the collection.  If the
1353// phase has changed, it skips that phase of the collection.  See
1354// the comments on the use of the Heap_lock in collect_in_background().
1355//
1356// Variable used in baton passing.
1357//   _foregroundGCIsActive - Set to true by the foreground collector when
1358//      it wants the baton.  The foreground clears it when it has finished
1359//      the collection.
1360//   _foregroundGCShouldWait - Set to true by the background collector
1361//        when it is running.  The foreground collector waits while
1362//      _foregroundGCShouldWait is true.
1363//  CGC_lock - monitor used to protect access to the above variables
1364//      and to notify the foreground and background collectors.
1365//  _collectorState - current state of the CMS collection.
1366//
1367// The foreground collector
1368//   acquires the CGC_lock
1369//   sets _foregroundGCIsActive
1370//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1371//     various locks acquired in preparation for the collection
1372//     are released so as not to block the background collector
1373//     that is in the midst of a collection
1374//   proceeds with the collection
1375//   clears _foregroundGCIsActive
1376//   returns
1377//
1378// The background collector in a loop iterating on the phases of the
1379//      collection
1380//   acquires the CGC_lock
1381//   sets _foregroundGCShouldWait
1382//   if _foregroundGCIsActive is set
1383//     clears _foregroundGCShouldWait, notifies _CGC_lock
1384//     waits on _CGC_lock for _foregroundGCIsActive to become false
1385//     and exits the loop.
1386//   otherwise
1387//     proceed with that phase of the collection
1388//     if the phase is a stop-the-world phase,
1389//       yield the baton once more just before enqueueing
1390//       the stop-world CMS operation (executed by the VM thread).
1391//   returns after all phases of the collection are done
1392//
1393
1394void CMSCollector::acquire_control_and_collect(bool full,
1395        bool clear_all_soft_refs) {
1396  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1397  assert(!Thread::current()->is_ConcurrentGC_thread(),
1398         "shouldn't try to acquire control from self!");
1399
1400  // Start the protocol for acquiring control of the
1401  // collection from the background collector (aka CMS thread).
1402  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1403         "VM thread should have CMS token");
1404  // Remember the possibly interrupted state of an ongoing
1405  // concurrent collection
1406  CollectorState first_state = _collectorState;
1407
1408  // Signal to a possibly ongoing concurrent collection that
1409  // we want to do a foreground collection.
1410  _foregroundGCIsActive = true;
1411
1412  // release locks and wait for a notify from the background collector
1413  // releasing the locks in only necessary for phases which
1414  // do yields to improve the granularity of the collection.
1415  assert_lock_strong(bitMapLock());
1416  // We need to lock the Free list lock for the space that we are
1417  // currently collecting.
1418  assert(haveFreelistLocks(), "Must be holding free list locks");
1419  bitMapLock()->unlock();
1420  releaseFreelistLocks();
1421  {
1422    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1423    if (_foregroundGCShouldWait) {
1424      // We are going to be waiting for action for the CMS thread;
1425      // it had better not be gone (for instance at shutdown)!
1426      assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1427             "CMS thread must be running");
1428      // Wait here until the background collector gives us the go-ahead
1429      ConcurrentMarkSweepThread::clear_CMS_flag(
1430        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1431      // Get a possibly blocked CMS thread going:
1432      //   Note that we set _foregroundGCIsActive true above,
1433      //   without protection of the CGC_lock.
1434      CGC_lock->notify();
1435      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1436             "Possible deadlock");
1437      while (_foregroundGCShouldWait) {
1438        // wait for notification
1439        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1440        // Possibility of delay/starvation here, since CMS token does
1441        // not know to give priority to VM thread? Actually, i think
1442        // there wouldn't be any delay/starvation, but the proof of
1443        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1444      }
1445      ConcurrentMarkSweepThread::set_CMS_flag(
1446        ConcurrentMarkSweepThread::CMS_vm_has_token);
1447    }
1448  }
1449  // The CMS_token is already held.  Get back the other locks.
1450  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1451         "VM thread should have CMS token");
1452  getFreelistLocks();
1453  bitMapLock()->lock_without_safepoint_check();
1454  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1455                       p2i(Thread::current()), first_state);
1456  log_debug(gc, state)("    gets control with state %d", _collectorState);
1457
1458  // Inform cms gen if this was due to partial collection failing.
1459  // The CMS gen may use this fact to determine its expansion policy.
1460  GenCollectedHeap* gch = GenCollectedHeap::heap();
1461  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1462    assert(!_cmsGen->incremental_collection_failed(),
1463           "Should have been noticed, reacted to and cleared");
1464    _cmsGen->set_incremental_collection_failed();
1465  }
1466
1467  if (first_state > Idling) {
1468    report_concurrent_mode_interruption();
1469  }
1470
1471  set_did_compact(true);
1472
1473  // If the collection is being acquired from the background
1474  // collector, there may be references on the discovered
1475  // references lists.  Abandon those references, since some
1476  // of them may have become unreachable after concurrent
1477  // discovery; the STW compacting collector will redo discovery
1478  // more precisely, without being subject to floating garbage.
1479  // Leaving otherwise unreachable references in the discovered
1480  // lists would require special handling.
1481  ref_processor()->disable_discovery();
1482  ref_processor()->abandon_partial_discovery();
1483  ref_processor()->verify_no_references_recorded();
1484
1485  if (first_state > Idling) {
1486    save_heap_summary();
1487  }
1488
1489  do_compaction_work(clear_all_soft_refs);
1490
1491  // Has the GC time limit been exceeded?
1492  size_t max_eden_size = _young_gen->max_eden_size();
1493  GCCause::Cause gc_cause = gch->gc_cause();
1494  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1495                                         _young_gen->eden()->used(),
1496                                         _cmsGen->max_capacity(),
1497                                         max_eden_size,
1498                                         full,
1499                                         gc_cause,
1500                                         gch->collector_policy());
1501
1502  // Reset the expansion cause, now that we just completed
1503  // a collection cycle.
1504  clear_expansion_cause();
1505  _foregroundGCIsActive = false;
1506  return;
1507}
1508
1509// Resize the tenured generation
1510// after obtaining the free list locks for the
1511// two generations.
1512void CMSCollector::compute_new_size() {
1513  assert_locked_or_safepoint(Heap_lock);
1514  FreelistLocker z(this);
1515  MetaspaceGC::compute_new_size();
1516  _cmsGen->compute_new_size_free_list();
1517}
1518
1519// A work method used by the foreground collector to do
1520// a mark-sweep-compact.
1521void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1522  GenCollectedHeap* gch = GenCollectedHeap::heap();
1523
1524  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1525  gc_timer->register_gc_start();
1526
1527  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1528  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1529
1530  gch->pre_full_gc_dump(gc_timer);
1531
1532  GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1533
1534  // Temporarily widen the span of the weak reference processing to
1535  // the entire heap.
1536  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1537  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1538  // Temporarily, clear the "is_alive_non_header" field of the
1539  // reference processor.
1540  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1541  // Temporarily make reference _processing_ single threaded (non-MT).
1542  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1543  // Temporarily make refs discovery atomic
1544  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1545  // Temporarily make reference _discovery_ single threaded (non-MT)
1546  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1547
1548  ref_processor()->set_enqueuing_is_done(false);
1549  ref_processor()->enable_discovery();
1550  ref_processor()->setup_policy(clear_all_soft_refs);
1551  // If an asynchronous collection finishes, the _modUnionTable is
1552  // all clear.  If we are assuming the collection from an asynchronous
1553  // collection, clear the _modUnionTable.
1554  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1555    "_modUnionTable should be clear if the baton was not passed");
1556  _modUnionTable.clear_all();
1557  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1558    "mod union for klasses should be clear if the baton was passed");
1559  _ct->klass_rem_set()->clear_mod_union();
1560
1561  // We must adjust the allocation statistics being maintained
1562  // in the free list space. We do so by reading and clearing
1563  // the sweep timer and updating the block flux rate estimates below.
1564  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1565  if (_inter_sweep_timer.is_active()) {
1566    _inter_sweep_timer.stop();
1567    // Note that we do not use this sample to update the _inter_sweep_estimate.
1568    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1569                                            _inter_sweep_estimate.padded_average(),
1570                                            _intra_sweep_estimate.padded_average());
1571  }
1572
1573  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1574  #ifdef ASSERT
1575    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1576    size_t free_size = cms_space->free();
1577    assert(free_size ==
1578           pointer_delta(cms_space->end(), cms_space->compaction_top())
1579           * HeapWordSize,
1580      "All the free space should be compacted into one chunk at top");
1581    assert(cms_space->dictionary()->total_chunk_size(
1582                                      debug_only(cms_space->freelistLock())) == 0 ||
1583           cms_space->totalSizeInIndexedFreeLists() == 0,
1584      "All the free space should be in a single chunk");
1585    size_t num = cms_space->totalCount();
1586    assert((free_size == 0 && num == 0) ||
1587           (free_size > 0  && (num == 1 || num == 2)),
1588         "There should be at most 2 free chunks after compaction");
1589  #endif // ASSERT
1590  _collectorState = Resetting;
1591  assert(_restart_addr == NULL,
1592         "Should have been NULL'd before baton was passed");
1593  reset_stw();
1594  _cmsGen->reset_after_compaction();
1595  _concurrent_cycles_since_last_unload = 0;
1596
1597  // Clear any data recorded in the PLAB chunk arrays.
1598  if (_survivor_plab_array != NULL) {
1599    reset_survivor_plab_arrays();
1600  }
1601
1602  // Adjust the per-size allocation stats for the next epoch.
1603  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1604  // Restart the "inter sweep timer" for the next epoch.
1605  _inter_sweep_timer.reset();
1606  _inter_sweep_timer.start();
1607
1608  // No longer a need to do a concurrent collection for Metaspace.
1609  MetaspaceGC::set_should_concurrent_collect(false);
1610
1611  gch->post_full_gc_dump(gc_timer);
1612
1613  gc_timer->register_gc_end();
1614
1615  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1616
1617  // For a mark-sweep-compact, compute_new_size() will be called
1618  // in the heap's do_collection() method.
1619}
1620
1621void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1622  Log(gc, heap) log;
1623  if (!log.is_trace()) {
1624    return;
1625  }
1626
1627  ContiguousSpace* eden_space = _young_gen->eden();
1628  ContiguousSpace* from_space = _young_gen->from();
1629  ContiguousSpace* to_space   = _young_gen->to();
1630  // Eden
1631  if (_eden_chunk_array != NULL) {
1632    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1633              p2i(eden_space->bottom()), p2i(eden_space->top()),
1634              p2i(eden_space->end()), eden_space->capacity());
1635    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1636              _eden_chunk_index, _eden_chunk_capacity);
1637    for (size_t i = 0; i < _eden_chunk_index; i++) {
1638      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1639    }
1640  }
1641  // Survivor
1642  if (_survivor_chunk_array != NULL) {
1643    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1644              p2i(from_space->bottom()), p2i(from_space->top()),
1645              p2i(from_space->end()), from_space->capacity());
1646    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1647              _survivor_chunk_index, _survivor_chunk_capacity);
1648    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1649      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1650    }
1651  }
1652}
1653
1654void CMSCollector::getFreelistLocks() const {
1655  // Get locks for all free lists in all generations that this
1656  // collector is responsible for
1657  _cmsGen->freelistLock()->lock_without_safepoint_check();
1658}
1659
1660void CMSCollector::releaseFreelistLocks() const {
1661  // Release locks for all free lists in all generations that this
1662  // collector is responsible for
1663  _cmsGen->freelistLock()->unlock();
1664}
1665
1666bool CMSCollector::haveFreelistLocks() const {
1667  // Check locks for all free lists in all generations that this
1668  // collector is responsible for
1669  assert_lock_strong(_cmsGen->freelistLock());
1670  PRODUCT_ONLY(ShouldNotReachHere());
1671  return true;
1672}
1673
1674// A utility class that is used by the CMS collector to
1675// temporarily "release" the foreground collector from its
1676// usual obligation to wait for the background collector to
1677// complete an ongoing phase before proceeding.
1678class ReleaseForegroundGC: public StackObj {
1679 private:
1680  CMSCollector* _c;
1681 public:
1682  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1683    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1684    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1685    // allow a potentially blocked foreground collector to proceed
1686    _c->_foregroundGCShouldWait = false;
1687    if (_c->_foregroundGCIsActive) {
1688      CGC_lock->notify();
1689    }
1690    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1691           "Possible deadlock");
1692  }
1693
1694  ~ReleaseForegroundGC() {
1695    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1696    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1697    _c->_foregroundGCShouldWait = true;
1698  }
1699};
1700
1701void CMSCollector::collect_in_background(GCCause::Cause cause) {
1702  assert(Thread::current()->is_ConcurrentGC_thread(),
1703    "A CMS asynchronous collection is only allowed on a CMS thread.");
1704
1705  GenCollectedHeap* gch = GenCollectedHeap::heap();
1706  {
1707    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1708    MutexLockerEx hl(Heap_lock, safepoint_check);
1709    FreelistLocker fll(this);
1710    MutexLockerEx x(CGC_lock, safepoint_check);
1711    if (_foregroundGCIsActive) {
1712      // The foreground collector is. Skip this
1713      // background collection.
1714      assert(!_foregroundGCShouldWait, "Should be clear");
1715      return;
1716    } else {
1717      assert(_collectorState == Idling, "Should be idling before start.");
1718      _collectorState = InitialMarking;
1719      register_gc_start(cause);
1720      // Reset the expansion cause, now that we are about to begin
1721      // a new cycle.
1722      clear_expansion_cause();
1723
1724      // Clear the MetaspaceGC flag since a concurrent collection
1725      // is starting but also clear it after the collection.
1726      MetaspaceGC::set_should_concurrent_collect(false);
1727    }
1728    // Decide if we want to enable class unloading as part of the
1729    // ensuing concurrent GC cycle.
1730    update_should_unload_classes();
1731    _full_gc_requested = false;           // acks all outstanding full gc requests
1732    _full_gc_cause = GCCause::_no_gc;
1733    // Signal that we are about to start a collection
1734    gch->increment_total_full_collections();  // ... starting a collection cycle
1735    _collection_count_start = gch->total_full_collections();
1736  }
1737
1738  size_t prev_used = _cmsGen->used();
1739
1740  // The change of the collection state is normally done at this level;
1741  // the exceptions are phases that are executed while the world is
1742  // stopped.  For those phases the change of state is done while the
1743  // world is stopped.  For baton passing purposes this allows the
1744  // background collector to finish the phase and change state atomically.
1745  // The foreground collector cannot wait on a phase that is done
1746  // while the world is stopped because the foreground collector already
1747  // has the world stopped and would deadlock.
1748  while (_collectorState != Idling) {
1749    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1750                         p2i(Thread::current()), _collectorState);
1751    // The foreground collector
1752    //   holds the Heap_lock throughout its collection.
1753    //   holds the CMS token (but not the lock)
1754    //     except while it is waiting for the background collector to yield.
1755    //
1756    // The foreground collector should be blocked (not for long)
1757    //   if the background collector is about to start a phase
1758    //   executed with world stopped.  If the background
1759    //   collector has already started such a phase, the
1760    //   foreground collector is blocked waiting for the
1761    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1762    //   are executed in the VM thread.
1763    //
1764    // The locking order is
1765    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1766    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1767    //   CMS token  (claimed in
1768    //                stop_world_and_do() -->
1769    //                  safepoint_synchronize() -->
1770    //                    CMSThread::synchronize())
1771
1772    {
1773      // Check if the FG collector wants us to yield.
1774      CMSTokenSync x(true); // is cms thread
1775      if (waitForForegroundGC()) {
1776        // We yielded to a foreground GC, nothing more to be
1777        // done this round.
1778        assert(_foregroundGCShouldWait == false, "We set it to false in "
1779               "waitForForegroundGC()");
1780        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1781                             p2i(Thread::current()), _collectorState);
1782        return;
1783      } else {
1784        // The background collector can run but check to see if the
1785        // foreground collector has done a collection while the
1786        // background collector was waiting to get the CGC_lock
1787        // above.  If yes, break so that _foregroundGCShouldWait
1788        // is cleared before returning.
1789        if (_collectorState == Idling) {
1790          break;
1791        }
1792      }
1793    }
1794
1795    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1796      "should be waiting");
1797
1798    switch (_collectorState) {
1799      case InitialMarking:
1800        {
1801          ReleaseForegroundGC x(this);
1802          stats().record_cms_begin();
1803          VM_CMS_Initial_Mark initial_mark_op(this);
1804          VMThread::execute(&initial_mark_op);
1805        }
1806        // The collector state may be any legal state at this point
1807        // since the background collector may have yielded to the
1808        // foreground collector.
1809        break;
1810      case Marking:
1811        // initial marking in checkpointRootsInitialWork has been completed
1812        if (markFromRoots()) { // we were successful
1813          assert(_collectorState == Precleaning, "Collector state should "
1814            "have changed");
1815        } else {
1816          assert(_foregroundGCIsActive, "Internal state inconsistency");
1817        }
1818        break;
1819      case Precleaning:
1820        // marking from roots in markFromRoots has been completed
1821        preclean();
1822        assert(_collectorState == AbortablePreclean ||
1823               _collectorState == FinalMarking,
1824               "Collector state should have changed");
1825        break;
1826      case AbortablePreclean:
1827        abortable_preclean();
1828        assert(_collectorState == FinalMarking, "Collector state should "
1829          "have changed");
1830        break;
1831      case FinalMarking:
1832        {
1833          ReleaseForegroundGC x(this);
1834
1835          VM_CMS_Final_Remark final_remark_op(this);
1836          VMThread::execute(&final_remark_op);
1837        }
1838        assert(_foregroundGCShouldWait, "block post-condition");
1839        break;
1840      case Sweeping:
1841        // final marking in checkpointRootsFinal has been completed
1842        sweep();
1843        assert(_collectorState == Resizing, "Collector state change "
1844          "to Resizing must be done under the free_list_lock");
1845
1846      case Resizing: {
1847        // Sweeping has been completed...
1848        // At this point the background collection has completed.
1849        // Don't move the call to compute_new_size() down
1850        // into code that might be executed if the background
1851        // collection was preempted.
1852        {
1853          ReleaseForegroundGC x(this);   // unblock FG collection
1854          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1855          CMSTokenSync        z(true);   // not strictly needed.
1856          if (_collectorState == Resizing) {
1857            compute_new_size();
1858            save_heap_summary();
1859            _collectorState = Resetting;
1860          } else {
1861            assert(_collectorState == Idling, "The state should only change"
1862                   " because the foreground collector has finished the collection");
1863          }
1864        }
1865        break;
1866      }
1867      case Resetting:
1868        // CMS heap resizing has been completed
1869        reset_concurrent();
1870        assert(_collectorState == Idling, "Collector state should "
1871          "have changed");
1872
1873        MetaspaceGC::set_should_concurrent_collect(false);
1874
1875        stats().record_cms_end();
1876        // Don't move the concurrent_phases_end() and compute_new_size()
1877        // calls to here because a preempted background collection
1878        // has it's state set to "Resetting".
1879        break;
1880      case Idling:
1881      default:
1882        ShouldNotReachHere();
1883        break;
1884    }
1885    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1886                         p2i(Thread::current()), _collectorState);
1887    assert(_foregroundGCShouldWait, "block post-condition");
1888  }
1889
1890  // Should this be in gc_epilogue?
1891  collector_policy()->counters()->update_counters();
1892
1893  {
1894    // Clear _foregroundGCShouldWait and, in the event that the
1895    // foreground collector is waiting, notify it, before
1896    // returning.
1897    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1898    _foregroundGCShouldWait = false;
1899    if (_foregroundGCIsActive) {
1900      CGC_lock->notify();
1901    }
1902    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1903           "Possible deadlock");
1904  }
1905  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1906                       p2i(Thread::current()), _collectorState);
1907  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1908                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1909}
1910
1911void CMSCollector::register_gc_start(GCCause::Cause cause) {
1912  _cms_start_registered = true;
1913  _gc_timer_cm->register_gc_start();
1914  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1915}
1916
1917void CMSCollector::register_gc_end() {
1918  if (_cms_start_registered) {
1919    report_heap_summary(GCWhen::AfterGC);
1920
1921    _gc_timer_cm->register_gc_end();
1922    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1923    _cms_start_registered = false;
1924  }
1925}
1926
1927void CMSCollector::save_heap_summary() {
1928  GenCollectedHeap* gch = GenCollectedHeap::heap();
1929  _last_heap_summary = gch->create_heap_summary();
1930  _last_metaspace_summary = gch->create_metaspace_summary();
1931}
1932
1933void CMSCollector::report_heap_summary(GCWhen::Type when) {
1934  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1935  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1936}
1937
1938bool CMSCollector::waitForForegroundGC() {
1939  bool res = false;
1940  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1941         "CMS thread should have CMS token");
1942  // Block the foreground collector until the
1943  // background collectors decides whether to
1944  // yield.
1945  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1946  _foregroundGCShouldWait = true;
1947  if (_foregroundGCIsActive) {
1948    // The background collector yields to the
1949    // foreground collector and returns a value
1950    // indicating that it has yielded.  The foreground
1951    // collector can proceed.
1952    res = true;
1953    _foregroundGCShouldWait = false;
1954    ConcurrentMarkSweepThread::clear_CMS_flag(
1955      ConcurrentMarkSweepThread::CMS_cms_has_token);
1956    ConcurrentMarkSweepThread::set_CMS_flag(
1957      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1958    // Get a possibly blocked foreground thread going
1959    CGC_lock->notify();
1960    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1961                         p2i(Thread::current()), _collectorState);
1962    while (_foregroundGCIsActive) {
1963      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1964    }
1965    ConcurrentMarkSweepThread::set_CMS_flag(
1966      ConcurrentMarkSweepThread::CMS_cms_has_token);
1967    ConcurrentMarkSweepThread::clear_CMS_flag(
1968      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1969  }
1970  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1971                       p2i(Thread::current()), _collectorState);
1972  return res;
1973}
1974
1975// Because of the need to lock the free lists and other structures in
1976// the collector, common to all the generations that the collector is
1977// collecting, we need the gc_prologues of individual CMS generations
1978// delegate to their collector. It may have been simpler had the
1979// current infrastructure allowed one to call a prologue on a
1980// collector. In the absence of that we have the generation's
1981// prologue delegate to the collector, which delegates back
1982// some "local" work to a worker method in the individual generations
1983// that it's responsible for collecting, while itself doing any
1984// work common to all generations it's responsible for. A similar
1985// comment applies to the  gc_epilogue()'s.
1986// The role of the variable _between_prologue_and_epilogue is to
1987// enforce the invocation protocol.
1988void CMSCollector::gc_prologue(bool full) {
1989  // Call gc_prologue_work() for the CMSGen
1990  // we are responsible for.
1991
1992  // The following locking discipline assumes that we are only called
1993  // when the world is stopped.
1994  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1995
1996  // The CMSCollector prologue must call the gc_prologues for the
1997  // "generations" that it's responsible
1998  // for.
1999
2000  assert(   Thread::current()->is_VM_thread()
2001         || (   CMSScavengeBeforeRemark
2002             && Thread::current()->is_ConcurrentGC_thread()),
2003         "Incorrect thread type for prologue execution");
2004
2005  if (_between_prologue_and_epilogue) {
2006    // We have already been invoked; this is a gc_prologue delegation
2007    // from yet another CMS generation that we are responsible for, just
2008    // ignore it since all relevant work has already been done.
2009    return;
2010  }
2011
2012  // set a bit saying prologue has been called; cleared in epilogue
2013  _between_prologue_and_epilogue = true;
2014  // Claim locks for common data structures, then call gc_prologue_work()
2015  // for each CMSGen.
2016
2017  getFreelistLocks();   // gets free list locks on constituent spaces
2018  bitMapLock()->lock_without_safepoint_check();
2019
2020  // Should call gc_prologue_work() for all cms gens we are responsible for
2021  bool duringMarking =    _collectorState >= Marking
2022                         && _collectorState < Sweeping;
2023
2024  // The young collections clear the modified oops state, which tells if
2025  // there are any modified oops in the class. The remark phase also needs
2026  // that information. Tell the young collection to save the union of all
2027  // modified klasses.
2028  if (duringMarking) {
2029    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2030  }
2031
2032  bool registerClosure = duringMarking;
2033
2034  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2035
2036  if (!full) {
2037    stats().record_gc0_begin();
2038  }
2039}
2040
2041void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2042
2043  _capacity_at_prologue = capacity();
2044  _used_at_prologue = used();
2045
2046  // We enable promotion tracking so that card-scanning can recognize
2047  // which objects have been promoted during this GC and skip them.
2048  for (uint i = 0; i < ParallelGCThreads; i++) {
2049    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2050  }
2051
2052  // Delegate to CMScollector which knows how to coordinate between
2053  // this and any other CMS generations that it is responsible for
2054  // collecting.
2055  collector()->gc_prologue(full);
2056}
2057
2058// This is a "private" interface for use by this generation's CMSCollector.
2059// Not to be called directly by any other entity (for instance,
2060// GenCollectedHeap, which calls the "public" gc_prologue method above).
2061void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2062  bool registerClosure, ModUnionClosure* modUnionClosure) {
2063  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2064  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2065    "Should be NULL");
2066  if (registerClosure) {
2067    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2068  }
2069  cmsSpace()->gc_prologue();
2070  // Clear stat counters
2071  NOT_PRODUCT(
2072    assert(_numObjectsPromoted == 0, "check");
2073    assert(_numWordsPromoted   == 0, "check");
2074    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2075                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2076    _numObjectsAllocated = 0;
2077    _numWordsAllocated   = 0;
2078  )
2079}
2080
2081void CMSCollector::gc_epilogue(bool full) {
2082  // The following locking discipline assumes that we are only called
2083  // when the world is stopped.
2084  assert(SafepointSynchronize::is_at_safepoint(),
2085         "world is stopped assumption");
2086
2087  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2088  // if linear allocation blocks need to be appropriately marked to allow the
2089  // the blocks to be parsable. We also check here whether we need to nudge the
2090  // CMS collector thread to start a new cycle (if it's not already active).
2091  assert(   Thread::current()->is_VM_thread()
2092         || (   CMSScavengeBeforeRemark
2093             && Thread::current()->is_ConcurrentGC_thread()),
2094         "Incorrect thread type for epilogue execution");
2095
2096  if (!_between_prologue_and_epilogue) {
2097    // We have already been invoked; this is a gc_epilogue delegation
2098    // from yet another CMS generation that we are responsible for, just
2099    // ignore it since all relevant work has already been done.
2100    return;
2101  }
2102  assert(haveFreelistLocks(), "must have freelist locks");
2103  assert_lock_strong(bitMapLock());
2104
2105  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2106
2107  _cmsGen->gc_epilogue_work(full);
2108
2109  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2110    // in case sampling was not already enabled, enable it
2111    _start_sampling = true;
2112  }
2113  // reset _eden_chunk_array so sampling starts afresh
2114  _eden_chunk_index = 0;
2115
2116  size_t cms_used   = _cmsGen->cmsSpace()->used();
2117
2118  // update performance counters - this uses a special version of
2119  // update_counters() that allows the utilization to be passed as a
2120  // parameter, avoiding multiple calls to used().
2121  //
2122  _cmsGen->update_counters(cms_used);
2123
2124  bitMapLock()->unlock();
2125  releaseFreelistLocks();
2126
2127  if (!CleanChunkPoolAsync) {
2128    Chunk::clean_chunk_pool();
2129  }
2130
2131  set_did_compact(false);
2132  _between_prologue_and_epilogue = false;  // ready for next cycle
2133}
2134
2135void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2136  collector()->gc_epilogue(full);
2137
2138  // When using ParNew, promotion tracking should have already been
2139  // disabled. However, the prologue (which enables promotion
2140  // tracking) and epilogue are called irrespective of the type of
2141  // GC. So they will also be called before and after Full GCs, during
2142  // which promotion tracking will not be explicitly disabled. So,
2143  // it's safer to also disable it here too (to be symmetric with
2144  // enabling it in the prologue).
2145  for (uint i = 0; i < ParallelGCThreads; i++) {
2146    _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2147  }
2148}
2149
2150void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2151  assert(!incremental_collection_failed(), "Should have been cleared");
2152  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2153  cmsSpace()->gc_epilogue();
2154    // Print stat counters
2155  NOT_PRODUCT(
2156    assert(_numObjectsAllocated == 0, "check");
2157    assert(_numWordsAllocated == 0, "check");
2158    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2159                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2160    _numObjectsPromoted = 0;
2161    _numWordsPromoted   = 0;
2162  )
2163
2164  // Call down the chain in contiguous_available needs the freelistLock
2165  // so print this out before releasing the freeListLock.
2166  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2167}
2168
2169#ifndef PRODUCT
2170bool CMSCollector::have_cms_token() {
2171  Thread* thr = Thread::current();
2172  if (thr->is_VM_thread()) {
2173    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2174  } else if (thr->is_ConcurrentGC_thread()) {
2175    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2176  } else if (thr->is_GC_task_thread()) {
2177    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2178           ParGCRareEvent_lock->owned_by_self();
2179  }
2180  return false;
2181}
2182
2183// Check reachability of the given heap address in CMS generation,
2184// treating all other generations as roots.
2185bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2186  // We could "guarantee" below, rather than assert, but I'll
2187  // leave these as "asserts" so that an adventurous debugger
2188  // could try this in the product build provided some subset of
2189  // the conditions were met, provided they were interested in the
2190  // results and knew that the computation below wouldn't interfere
2191  // with other concurrent computations mutating the structures
2192  // being read or written.
2193  assert(SafepointSynchronize::is_at_safepoint(),
2194         "Else mutations in object graph will make answer suspect");
2195  assert(have_cms_token(), "Should hold cms token");
2196  assert(haveFreelistLocks(), "must hold free list locks");
2197  assert_lock_strong(bitMapLock());
2198
2199  // Clear the marking bit map array before starting, but, just
2200  // for kicks, first report if the given address is already marked
2201  tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2202                _markBitMap.isMarked(addr) ? "" : " not");
2203
2204  if (verify_after_remark()) {
2205    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2206    bool result = verification_mark_bm()->isMarked(addr);
2207    tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2208                  result ? "IS" : "is NOT");
2209    return result;
2210  } else {
2211    tty->print_cr("Could not compute result");
2212    return false;
2213  }
2214}
2215#endif
2216
2217void
2218CMSCollector::print_on_error(outputStream* st) {
2219  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2220  if (collector != NULL) {
2221    CMSBitMap* bitmap = &collector->_markBitMap;
2222    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2223    bitmap->print_on_error(st, " Bits: ");
2224
2225    st->cr();
2226
2227    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2228    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2229    mut_bitmap->print_on_error(st, " Bits: ");
2230  }
2231}
2232
2233////////////////////////////////////////////////////////
2234// CMS Verification Support
2235////////////////////////////////////////////////////////
2236// Following the remark phase, the following invariant
2237// should hold -- each object in the CMS heap which is
2238// marked in markBitMap() should be marked in the verification_mark_bm().
2239
2240class VerifyMarkedClosure: public BitMapClosure {
2241  CMSBitMap* _marks;
2242  bool       _failed;
2243
2244 public:
2245  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2246
2247  bool do_bit(size_t offset) {
2248    HeapWord* addr = _marks->offsetToHeapWord(offset);
2249    if (!_marks->isMarked(addr)) {
2250      Log(gc, verify) log;
2251      ResourceMark rm;
2252      oop(addr)->print_on(log.error_stream());
2253      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2254      _failed = true;
2255    }
2256    return true;
2257  }
2258
2259  bool failed() { return _failed; }
2260};
2261
2262bool CMSCollector::verify_after_remark() {
2263  GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2264  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2265  static bool init = false;
2266
2267  assert(SafepointSynchronize::is_at_safepoint(),
2268         "Else mutations in object graph will make answer suspect");
2269  assert(have_cms_token(),
2270         "Else there may be mutual interference in use of "
2271         " verification data structures");
2272  assert(_collectorState > Marking && _collectorState <= Sweeping,
2273         "Else marking info checked here may be obsolete");
2274  assert(haveFreelistLocks(), "must hold free list locks");
2275  assert_lock_strong(bitMapLock());
2276
2277
2278  // Allocate marking bit map if not already allocated
2279  if (!init) { // first time
2280    if (!verification_mark_bm()->allocate(_span)) {
2281      return false;
2282    }
2283    init = true;
2284  }
2285
2286  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2287
2288  // Turn off refs discovery -- so we will be tracing through refs.
2289  // This is as intended, because by this time
2290  // GC must already have cleared any refs that need to be cleared,
2291  // and traced those that need to be marked; moreover,
2292  // the marking done here is not going to interfere in any
2293  // way with the marking information used by GC.
2294  NoRefDiscovery no_discovery(ref_processor());
2295
2296#if defined(COMPILER2) || INCLUDE_JVMCI
2297  DerivedPointerTableDeactivate dpt_deact;
2298#endif
2299
2300  // Clear any marks from a previous round
2301  verification_mark_bm()->clear_all();
2302  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2303  verify_work_stacks_empty();
2304
2305  GenCollectedHeap* gch = GenCollectedHeap::heap();
2306  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2307  // Update the saved marks which may affect the root scans.
2308  gch->save_marks();
2309
2310  if (CMSRemarkVerifyVariant == 1) {
2311    // In this first variant of verification, we complete
2312    // all marking, then check if the new marks-vector is
2313    // a subset of the CMS marks-vector.
2314    verify_after_remark_work_1();
2315  } else {
2316    guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2317    // In this second variant of verification, we flag an error
2318    // (i.e. an object reachable in the new marks-vector not reachable
2319    // in the CMS marks-vector) immediately, also indicating the
2320    // identify of an object (A) that references the unmarked object (B) --
2321    // presumably, a mutation to A failed to be picked up by preclean/remark?
2322    verify_after_remark_work_2();
2323  }
2324
2325  return true;
2326}
2327
2328void CMSCollector::verify_after_remark_work_1() {
2329  ResourceMark rm;
2330  HandleMark  hm;
2331  GenCollectedHeap* gch = GenCollectedHeap::heap();
2332
2333  // Get a clear set of claim bits for the roots processing to work with.
2334  ClassLoaderDataGraph::clear_claimed_marks();
2335
2336  // Mark from roots one level into CMS
2337  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2338  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2339
2340  {
2341    StrongRootsScope srs(1);
2342
2343    gch->cms_process_roots(&srs,
2344                           true,   // young gen as roots
2345                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2346                           should_unload_classes(),
2347                           &notOlder,
2348                           NULL);
2349  }
2350
2351  // Now mark from the roots
2352  MarkFromRootsClosure markFromRootsClosure(this, _span,
2353    verification_mark_bm(), verification_mark_stack(),
2354    false /* don't yield */, true /* verifying */);
2355  assert(_restart_addr == NULL, "Expected pre-condition");
2356  verification_mark_bm()->iterate(&markFromRootsClosure);
2357  while (_restart_addr != NULL) {
2358    // Deal with stack overflow: by restarting at the indicated
2359    // address.
2360    HeapWord* ra = _restart_addr;
2361    markFromRootsClosure.reset(ra);
2362    _restart_addr = NULL;
2363    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2364  }
2365  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2366  verify_work_stacks_empty();
2367
2368  // Marking completed -- now verify that each bit marked in
2369  // verification_mark_bm() is also marked in markBitMap(); flag all
2370  // errors by printing corresponding objects.
2371  VerifyMarkedClosure vcl(markBitMap());
2372  verification_mark_bm()->iterate(&vcl);
2373  if (vcl.failed()) {
2374    Log(gc, verify) log;
2375    log.error("Failed marking verification after remark");
2376    ResourceMark rm;
2377    gch->print_on(log.error_stream());
2378    fatal("CMS: failed marking verification after remark");
2379  }
2380}
2381
2382class VerifyKlassOopsKlassClosure : public KlassClosure {
2383  class VerifyKlassOopsClosure : public OopClosure {
2384    CMSBitMap* _bitmap;
2385   public:
2386    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2387    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2388    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2389  } _oop_closure;
2390 public:
2391  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2392  void do_klass(Klass* k) {
2393    k->oops_do(&_oop_closure);
2394  }
2395};
2396
2397void CMSCollector::verify_after_remark_work_2() {
2398  ResourceMark rm;
2399  HandleMark  hm;
2400  GenCollectedHeap* gch = GenCollectedHeap::heap();
2401
2402  // Get a clear set of claim bits for the roots processing to work with.
2403  ClassLoaderDataGraph::clear_claimed_marks();
2404
2405  // Mark from roots one level into CMS
2406  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2407                                     markBitMap());
2408  CLDToOopClosure cld_closure(&notOlder, true);
2409
2410  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2411
2412  {
2413    StrongRootsScope srs(1);
2414
2415    gch->cms_process_roots(&srs,
2416                           true,   // young gen as roots
2417                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2418                           should_unload_classes(),
2419                           &notOlder,
2420                           &cld_closure);
2421  }
2422
2423  // Now mark from the roots
2424  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2425    verification_mark_bm(), markBitMap(), verification_mark_stack());
2426  assert(_restart_addr == NULL, "Expected pre-condition");
2427  verification_mark_bm()->iterate(&markFromRootsClosure);
2428  while (_restart_addr != NULL) {
2429    // Deal with stack overflow: by restarting at the indicated
2430    // address.
2431    HeapWord* ra = _restart_addr;
2432    markFromRootsClosure.reset(ra);
2433    _restart_addr = NULL;
2434    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2435  }
2436  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2437  verify_work_stacks_empty();
2438
2439  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2440  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2441
2442  // Marking completed -- now verify that each bit marked in
2443  // verification_mark_bm() is also marked in markBitMap(); flag all
2444  // errors by printing corresponding objects.
2445  VerifyMarkedClosure vcl(markBitMap());
2446  verification_mark_bm()->iterate(&vcl);
2447  assert(!vcl.failed(), "Else verification above should not have succeeded");
2448}
2449
2450void ConcurrentMarkSweepGeneration::save_marks() {
2451  // delegate to CMS space
2452  cmsSpace()->save_marks();
2453}
2454
2455bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2456  return cmsSpace()->no_allocs_since_save_marks();
2457}
2458
2459#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2460                                                                \
2461void ConcurrentMarkSweepGeneration::                            \
2462oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2463  cl->set_generation(this);                                     \
2464  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2465  cl->reset_generation();                                       \
2466  save_marks();                                                 \
2467}
2468
2469ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2470
2471void
2472ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2473  if (freelistLock()->owned_by_self()) {
2474    Generation::oop_iterate(cl);
2475  } else {
2476    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2477    Generation::oop_iterate(cl);
2478  }
2479}
2480
2481void
2482ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2483  if (freelistLock()->owned_by_self()) {
2484    Generation::object_iterate(cl);
2485  } else {
2486    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2487    Generation::object_iterate(cl);
2488  }
2489}
2490
2491void
2492ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2493  if (freelistLock()->owned_by_self()) {
2494    Generation::safe_object_iterate(cl);
2495  } else {
2496    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2497    Generation::safe_object_iterate(cl);
2498  }
2499}
2500
2501void
2502ConcurrentMarkSweepGeneration::post_compact() {
2503}
2504
2505void
2506ConcurrentMarkSweepGeneration::prepare_for_verify() {
2507  // Fix the linear allocation blocks to look like free blocks.
2508
2509  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2510  // are not called when the heap is verified during universe initialization and
2511  // at vm shutdown.
2512  if (freelistLock()->owned_by_self()) {
2513    cmsSpace()->prepare_for_verify();
2514  } else {
2515    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2516    cmsSpace()->prepare_for_verify();
2517  }
2518}
2519
2520void
2521ConcurrentMarkSweepGeneration::verify() {
2522  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2523  // are not called when the heap is verified during universe initialization and
2524  // at vm shutdown.
2525  if (freelistLock()->owned_by_self()) {
2526    cmsSpace()->verify();
2527  } else {
2528    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2529    cmsSpace()->verify();
2530  }
2531}
2532
2533void CMSCollector::verify() {
2534  _cmsGen->verify();
2535}
2536
2537#ifndef PRODUCT
2538bool CMSCollector::overflow_list_is_empty() const {
2539  assert(_num_par_pushes >= 0, "Inconsistency");
2540  if (_overflow_list == NULL) {
2541    assert(_num_par_pushes == 0, "Inconsistency");
2542  }
2543  return _overflow_list == NULL;
2544}
2545
2546// The methods verify_work_stacks_empty() and verify_overflow_empty()
2547// merely consolidate assertion checks that appear to occur together frequently.
2548void CMSCollector::verify_work_stacks_empty() const {
2549  assert(_markStack.isEmpty(), "Marking stack should be empty");
2550  assert(overflow_list_is_empty(), "Overflow list should be empty");
2551}
2552
2553void CMSCollector::verify_overflow_empty() const {
2554  assert(overflow_list_is_empty(), "Overflow list should be empty");
2555  assert(no_preserved_marks(), "No preserved marks");
2556}
2557#endif // PRODUCT
2558
2559// Decide if we want to enable class unloading as part of the
2560// ensuing concurrent GC cycle. We will collect and
2561// unload classes if it's the case that:
2562// (1) an explicit gc request has been made and the flag
2563//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2564// (2) (a) class unloading is enabled at the command line, and
2565//     (b) old gen is getting really full
2566// NOTE: Provided there is no change in the state of the heap between
2567// calls to this method, it should have idempotent results. Moreover,
2568// its results should be monotonically increasing (i.e. going from 0 to 1,
2569// but not 1 to 0) between successive calls between which the heap was
2570// not collected. For the implementation below, it must thus rely on
2571// the property that concurrent_cycles_since_last_unload()
2572// will not decrease unless a collection cycle happened and that
2573// _cmsGen->is_too_full() are
2574// themselves also monotonic in that sense. See check_monotonicity()
2575// below.
2576void CMSCollector::update_should_unload_classes() {
2577  _should_unload_classes = false;
2578  // Condition 1 above
2579  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2580    _should_unload_classes = true;
2581  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2582    // Disjuncts 2.b.(i,ii,iii) above
2583    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2584                              CMSClassUnloadingMaxInterval)
2585                           || _cmsGen->is_too_full();
2586  }
2587}
2588
2589bool ConcurrentMarkSweepGeneration::is_too_full() const {
2590  bool res = should_concurrent_collect();
2591  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2592  return res;
2593}
2594
2595void CMSCollector::setup_cms_unloading_and_verification_state() {
2596  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2597                             || VerifyBeforeExit;
2598  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2599
2600  // We set the proper root for this CMS cycle here.
2601  if (should_unload_classes()) {   // Should unload classes this cycle
2602    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2603    set_verifying(should_verify);    // Set verification state for this cycle
2604    return;                            // Nothing else needs to be done at this time
2605  }
2606
2607  // Not unloading classes this cycle
2608  assert(!should_unload_classes(), "Inconsistency!");
2609
2610  // If we are not unloading classes then add SO_AllCodeCache to root
2611  // scanning options.
2612  add_root_scanning_option(rso);
2613
2614  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2615    set_verifying(true);
2616  } else if (verifying() && !should_verify) {
2617    // We were verifying, but some verification flags got disabled.
2618    set_verifying(false);
2619    // Exclude symbols, strings and code cache elements from root scanning to
2620    // reduce IM and RM pauses.
2621    remove_root_scanning_option(rso);
2622  }
2623}
2624
2625
2626#ifndef PRODUCT
2627HeapWord* CMSCollector::block_start(const void* p) const {
2628  const HeapWord* addr = (HeapWord*)p;
2629  if (_span.contains(p)) {
2630    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2631      return _cmsGen->cmsSpace()->block_start(p);
2632    }
2633  }
2634  return NULL;
2635}
2636#endif
2637
2638HeapWord*
2639ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2640                                                   bool   tlab,
2641                                                   bool   parallel) {
2642  CMSSynchronousYieldRequest yr;
2643  assert(!tlab, "Can't deal with TLAB allocation");
2644  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2645  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2646  if (GCExpandToAllocateDelayMillis > 0) {
2647    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2648  }
2649  return have_lock_and_allocate(word_size, tlab);
2650}
2651
2652void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2653    size_t bytes,
2654    size_t expand_bytes,
2655    CMSExpansionCause::Cause cause)
2656{
2657
2658  bool success = expand(bytes, expand_bytes);
2659
2660  // remember why we expanded; this information is used
2661  // by shouldConcurrentCollect() when making decisions on whether to start
2662  // a new CMS cycle.
2663  if (success) {
2664    set_expansion_cause(cause);
2665    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2666  }
2667}
2668
2669HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2670  HeapWord* res = NULL;
2671  MutexLocker x(ParGCRareEvent_lock);
2672  while (true) {
2673    // Expansion by some other thread might make alloc OK now:
2674    res = ps->lab.alloc(word_sz);
2675    if (res != NULL) return res;
2676    // If there's not enough expansion space available, give up.
2677    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2678      return NULL;
2679    }
2680    // Otherwise, we try expansion.
2681    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2682    // Now go around the loop and try alloc again;
2683    // A competing par_promote might beat us to the expansion space,
2684    // so we may go around the loop again if promotion fails again.
2685    if (GCExpandToAllocateDelayMillis > 0) {
2686      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2687    }
2688  }
2689}
2690
2691
2692bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2693  PromotionInfo* promo) {
2694  MutexLocker x(ParGCRareEvent_lock);
2695  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2696  while (true) {
2697    // Expansion by some other thread might make alloc OK now:
2698    if (promo->ensure_spooling_space()) {
2699      assert(promo->has_spooling_space(),
2700             "Post-condition of successful ensure_spooling_space()");
2701      return true;
2702    }
2703    // If there's not enough expansion space available, give up.
2704    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2705      return false;
2706    }
2707    // Otherwise, we try expansion.
2708    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2709    // Now go around the loop and try alloc again;
2710    // A competing allocation might beat us to the expansion space,
2711    // so we may go around the loop again if allocation fails again.
2712    if (GCExpandToAllocateDelayMillis > 0) {
2713      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2714    }
2715  }
2716}
2717
2718void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2719  // Only shrink if a compaction was done so that all the free space
2720  // in the generation is in a contiguous block at the end.
2721  if (did_compact()) {
2722    CardGeneration::shrink(bytes);
2723  }
2724}
2725
2726void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2727  assert_locked_or_safepoint(Heap_lock);
2728}
2729
2730void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2731  assert_locked_or_safepoint(Heap_lock);
2732  assert_lock_strong(freelistLock());
2733  log_trace(gc)("Shrinking of CMS not yet implemented");
2734  return;
2735}
2736
2737
2738// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2739// phases.
2740class CMSPhaseAccounting: public StackObj {
2741 public:
2742  CMSPhaseAccounting(CMSCollector *collector,
2743                     const char *title);
2744  ~CMSPhaseAccounting();
2745
2746 private:
2747  CMSCollector *_collector;
2748  const char *_title;
2749  GCTraceConcTime(Info, gc) _trace_time;
2750
2751 public:
2752  // Not MT-safe; so do not pass around these StackObj's
2753  // where they may be accessed by other threads.
2754  double wallclock_millis() {
2755    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2756  }
2757};
2758
2759CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2760                                       const char *title) :
2761  _collector(collector), _title(title), _trace_time(title) {
2762
2763  _collector->resetYields();
2764  _collector->resetTimer();
2765  _collector->startTimer();
2766  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2767}
2768
2769CMSPhaseAccounting::~CMSPhaseAccounting() {
2770  _collector->gc_timer_cm()->register_gc_concurrent_end();
2771  _collector->stopTimer();
2772  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2773  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2774}
2775
2776// CMS work
2777
2778// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2779class CMSParMarkTask : public AbstractGangTask {
2780 protected:
2781  CMSCollector*     _collector;
2782  uint              _n_workers;
2783  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2784      AbstractGangTask(name),
2785      _collector(collector),
2786      _n_workers(n_workers) {}
2787  // Work method in support of parallel rescan ... of young gen spaces
2788  void do_young_space_rescan(OopsInGenClosure* cl,
2789                             ContiguousSpace* space,
2790                             HeapWord** chunk_array, size_t chunk_top);
2791  void work_on_young_gen_roots(OopsInGenClosure* cl);
2792};
2793
2794// Parallel initial mark task
2795class CMSParInitialMarkTask: public CMSParMarkTask {
2796  StrongRootsScope* _strong_roots_scope;
2797 public:
2798  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2799      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2800      _strong_roots_scope(strong_roots_scope) {}
2801  void work(uint worker_id);
2802};
2803
2804// Checkpoint the roots into this generation from outside
2805// this generation. [Note this initial checkpoint need only
2806// be approximate -- we'll do a catch up phase subsequently.]
2807void CMSCollector::checkpointRootsInitial() {
2808  assert(_collectorState == InitialMarking, "Wrong collector state");
2809  check_correct_thread_executing();
2810  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2811
2812  save_heap_summary();
2813  report_heap_summary(GCWhen::BeforeGC);
2814
2815  ReferenceProcessor* rp = ref_processor();
2816  assert(_restart_addr == NULL, "Control point invariant");
2817  {
2818    // acquire locks for subsequent manipulations
2819    MutexLockerEx x(bitMapLock(),
2820                    Mutex::_no_safepoint_check_flag);
2821    checkpointRootsInitialWork();
2822    // enable ("weak") refs discovery
2823    rp->enable_discovery();
2824    _collectorState = Marking;
2825  }
2826}
2827
2828void CMSCollector::checkpointRootsInitialWork() {
2829  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2830  assert(_collectorState == InitialMarking, "just checking");
2831
2832  // Already have locks.
2833  assert_lock_strong(bitMapLock());
2834  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2835
2836  // Setup the verification and class unloading state for this
2837  // CMS collection cycle.
2838  setup_cms_unloading_and_verification_state();
2839
2840  GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2841
2842  // Reset all the PLAB chunk arrays if necessary.
2843  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2844    reset_survivor_plab_arrays();
2845  }
2846
2847  ResourceMark rm;
2848  HandleMark  hm;
2849
2850  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2851  GenCollectedHeap* gch = GenCollectedHeap::heap();
2852
2853  verify_work_stacks_empty();
2854  verify_overflow_empty();
2855
2856  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2857  // Update the saved marks which may affect the root scans.
2858  gch->save_marks();
2859
2860  // weak reference processing has not started yet.
2861  ref_processor()->set_enqueuing_is_done(false);
2862
2863  // Need to remember all newly created CLDs,
2864  // so that we can guarantee that the remark finds them.
2865  ClassLoaderDataGraph::remember_new_clds(true);
2866
2867  // Whenever a CLD is found, it will be claimed before proceeding to mark
2868  // the klasses. The claimed marks need to be cleared before marking starts.
2869  ClassLoaderDataGraph::clear_claimed_marks();
2870
2871  print_eden_and_survivor_chunk_arrays();
2872
2873  {
2874#if defined(COMPILER2) || INCLUDE_JVMCI
2875    DerivedPointerTableDeactivate dpt_deact;
2876#endif
2877    if (CMSParallelInitialMarkEnabled) {
2878      // The parallel version.
2879      WorkGang* workers = gch->workers();
2880      assert(workers != NULL, "Need parallel worker threads.");
2881      uint n_workers = workers->active_workers();
2882
2883      StrongRootsScope srs(n_workers);
2884
2885      CMSParInitialMarkTask tsk(this, &srs, n_workers);
2886      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2887      // If the total workers is greater than 1, then multiple workers
2888      // may be used at some time and the initialization has been set
2889      // such that the single threaded path cannot be used.
2890      if (workers->total_workers() > 1) {
2891        workers->run_task(&tsk);
2892      } else {
2893        tsk.work(0);
2894      }
2895    } else {
2896      // The serial version.
2897      CLDToOopClosure cld_closure(&notOlder, true);
2898      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2899
2900      StrongRootsScope srs(1);
2901
2902      gch->cms_process_roots(&srs,
2903                             true,   // young gen as roots
2904                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
2905                             should_unload_classes(),
2906                             &notOlder,
2907                             &cld_closure);
2908    }
2909  }
2910
2911  // Clear mod-union table; it will be dirtied in the prologue of
2912  // CMS generation per each young generation collection.
2913
2914  assert(_modUnionTable.isAllClear(),
2915       "Was cleared in most recent final checkpoint phase"
2916       " or no bits are set in the gc_prologue before the start of the next "
2917       "subsequent marking phase.");
2918
2919  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2920
2921  // Save the end of the used_region of the constituent generations
2922  // to be used to limit the extent of sweep in each generation.
2923  save_sweep_limits();
2924  verify_overflow_empty();
2925}
2926
2927bool CMSCollector::markFromRoots() {
2928  // we might be tempted to assert that:
2929  // assert(!SafepointSynchronize::is_at_safepoint(),
2930  //        "inconsistent argument?");
2931  // However that wouldn't be right, because it's possible that
2932  // a safepoint is indeed in progress as a young generation
2933  // stop-the-world GC happens even as we mark in this generation.
2934  assert(_collectorState == Marking, "inconsistent state?");
2935  check_correct_thread_executing();
2936  verify_overflow_empty();
2937
2938  // Weak ref discovery note: We may be discovering weak
2939  // refs in this generation concurrent (but interleaved) with
2940  // weak ref discovery by the young generation collector.
2941
2942  CMSTokenSyncWithLocks ts(true, bitMapLock());
2943  GCTraceCPUTime tcpu;
2944  CMSPhaseAccounting pa(this, "Concurrent Mark");
2945  bool res = markFromRootsWork();
2946  if (res) {
2947    _collectorState = Precleaning;
2948  } else { // We failed and a foreground collection wants to take over
2949    assert(_foregroundGCIsActive, "internal state inconsistency");
2950    assert(_restart_addr == NULL,  "foreground will restart from scratch");
2951    log_debug(gc)("bailing out to foreground collection");
2952  }
2953  verify_overflow_empty();
2954  return res;
2955}
2956
2957bool CMSCollector::markFromRootsWork() {
2958  // iterate over marked bits in bit map, doing a full scan and mark
2959  // from these roots using the following algorithm:
2960  // . if oop is to the right of the current scan pointer,
2961  //   mark corresponding bit (we'll process it later)
2962  // . else (oop is to left of current scan pointer)
2963  //   push oop on marking stack
2964  // . drain the marking stack
2965
2966  // Note that when we do a marking step we need to hold the
2967  // bit map lock -- recall that direct allocation (by mutators)
2968  // and promotion (by the young generation collector) is also
2969  // marking the bit map. [the so-called allocate live policy.]
2970  // Because the implementation of bit map marking is not
2971  // robust wrt simultaneous marking of bits in the same word,
2972  // we need to make sure that there is no such interference
2973  // between concurrent such updates.
2974
2975  // already have locks
2976  assert_lock_strong(bitMapLock());
2977
2978  verify_work_stacks_empty();
2979  verify_overflow_empty();
2980  bool result = false;
2981  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2982    result = do_marking_mt();
2983  } else {
2984    result = do_marking_st();
2985  }
2986  return result;
2987}
2988
2989// Forward decl
2990class CMSConcMarkingTask;
2991
2992class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2993  CMSCollector*       _collector;
2994  CMSConcMarkingTask* _task;
2995 public:
2996  virtual void yield();
2997
2998  // "n_threads" is the number of threads to be terminated.
2999  // "queue_set" is a set of work queues of other threads.
3000  // "collector" is the CMS collector associated with this task terminator.
3001  // "yield" indicates whether we need the gang as a whole to yield.
3002  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3003    ParallelTaskTerminator(n_threads, queue_set),
3004    _collector(collector) { }
3005
3006  void set_task(CMSConcMarkingTask* task) {
3007    _task = task;
3008  }
3009};
3010
3011class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3012  CMSConcMarkingTask* _task;
3013 public:
3014  bool should_exit_termination();
3015  void set_task(CMSConcMarkingTask* task) {
3016    _task = task;
3017  }
3018};
3019
3020// MT Concurrent Marking Task
3021class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3022  CMSCollector*             _collector;
3023  uint                      _n_workers;      // requested/desired # workers
3024  bool                      _result;
3025  CompactibleFreeListSpace* _cms_space;
3026  char                      _pad_front[64];   // padding to ...
3027  HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
3028  char                      _pad_back[64];
3029  HeapWord*                 _restart_addr;
3030
3031  //  Exposed here for yielding support
3032  Mutex* const _bit_map_lock;
3033
3034  // The per thread work queues, available here for stealing
3035  OopTaskQueueSet*  _task_queues;
3036
3037  // Termination (and yielding) support
3038  CMSConcMarkingTerminator _term;
3039  CMSConcMarkingTerminatorTerminator _term_term;
3040
3041 public:
3042  CMSConcMarkingTask(CMSCollector* collector,
3043                 CompactibleFreeListSpace* cms_space,
3044                 YieldingFlexibleWorkGang* workers,
3045                 OopTaskQueueSet* task_queues):
3046    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3047    _collector(collector),
3048    _cms_space(cms_space),
3049    _n_workers(0), _result(true),
3050    _task_queues(task_queues),
3051    _term(_n_workers, task_queues, _collector),
3052    _bit_map_lock(collector->bitMapLock())
3053  {
3054    _requested_size = _n_workers;
3055    _term.set_task(this);
3056    _term_term.set_task(this);
3057    _restart_addr = _global_finger = _cms_space->bottom();
3058  }
3059
3060
3061  OopTaskQueueSet* task_queues()  { return _task_queues; }
3062
3063  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3064
3065  HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3066
3067  CMSConcMarkingTerminator* terminator() { return &_term; }
3068
3069  virtual void set_for_termination(uint active_workers) {
3070    terminator()->reset_for_reuse(active_workers);
3071  }
3072
3073  void work(uint worker_id);
3074  bool should_yield() {
3075    return    ConcurrentMarkSweepThread::should_yield()
3076           && !_collector->foregroundGCIsActive();
3077  }
3078
3079  virtual void coordinator_yield();  // stuff done by coordinator
3080  bool result() { return _result; }
3081
3082  void reset(HeapWord* ra) {
3083    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3084    _restart_addr = _global_finger = ra;
3085    _term.reset_for_reuse();
3086  }
3087
3088  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3089                                           OopTaskQueue* work_q);
3090
3091 private:
3092  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3093  void do_work_steal(int i);
3094  void bump_global_finger(HeapWord* f);
3095};
3096
3097bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3098  assert(_task != NULL, "Error");
3099  return _task->yielding();
3100  // Note that we do not need the disjunct || _task->should_yield() above
3101  // because we want terminating threads to yield only if the task
3102  // is already in the midst of yielding, which happens only after at least one
3103  // thread has yielded.
3104}
3105
3106void CMSConcMarkingTerminator::yield() {
3107  if (_task->should_yield()) {
3108    _task->yield();
3109  } else {
3110    ParallelTaskTerminator::yield();
3111  }
3112}
3113
3114////////////////////////////////////////////////////////////////
3115// Concurrent Marking Algorithm Sketch
3116////////////////////////////////////////////////////////////////
3117// Until all tasks exhausted (both spaces):
3118// -- claim next available chunk
3119// -- bump global finger via CAS
3120// -- find first object that starts in this chunk
3121//    and start scanning bitmap from that position
3122// -- scan marked objects for oops
3123// -- CAS-mark target, and if successful:
3124//    . if target oop is above global finger (volatile read)
3125//      nothing to do
3126//    . if target oop is in chunk and above local finger
3127//        then nothing to do
3128//    . else push on work-queue
3129// -- Deal with possible overflow issues:
3130//    . local work-queue overflow causes stuff to be pushed on
3131//      global (common) overflow queue
3132//    . always first empty local work queue
3133//    . then get a batch of oops from global work queue if any
3134//    . then do work stealing
3135// -- When all tasks claimed (both spaces)
3136//    and local work queue empty,
3137//    then in a loop do:
3138//    . check global overflow stack; steal a batch of oops and trace
3139//    . try to steal from other threads oif GOS is empty
3140//    . if neither is available, offer termination
3141// -- Terminate and return result
3142//
3143void CMSConcMarkingTask::work(uint worker_id) {
3144  elapsedTimer _timer;
3145  ResourceMark rm;
3146  HandleMark hm;
3147
3148  DEBUG_ONLY(_collector->verify_overflow_empty();)
3149
3150  // Before we begin work, our work queue should be empty
3151  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3152  // Scan the bitmap covering _cms_space, tracing through grey objects.
3153  _timer.start();
3154  do_scan_and_mark(worker_id, _cms_space);
3155  _timer.stop();
3156  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3157
3158  // ... do work stealing
3159  _timer.reset();
3160  _timer.start();
3161  do_work_steal(worker_id);
3162  _timer.stop();
3163  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3164  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3165  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3166  // Note that under the current task protocol, the
3167  // following assertion is true even of the spaces
3168  // expanded since the completion of the concurrent
3169  // marking. XXX This will likely change under a strict
3170  // ABORT semantics.
3171  // After perm removal the comparison was changed to
3172  // greater than or equal to from strictly greater than.
3173  // Before perm removal the highest address sweep would
3174  // have been at the end of perm gen but now is at the
3175  // end of the tenured gen.
3176  assert(_global_finger >=  _cms_space->end(),
3177         "All tasks have been completed");
3178  DEBUG_ONLY(_collector->verify_overflow_empty();)
3179}
3180
3181void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3182  HeapWord* read = _global_finger;
3183  HeapWord* cur  = read;
3184  while (f > read) {
3185    cur = read;
3186    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3187    if (cur == read) {
3188      // our cas succeeded
3189      assert(_global_finger >= f, "protocol consistency");
3190      break;
3191    }
3192  }
3193}
3194
3195// This is really inefficient, and should be redone by
3196// using (not yet available) block-read and -write interfaces to the
3197// stack and the work_queue. XXX FIX ME !!!
3198bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3199                                                      OopTaskQueue* work_q) {
3200  // Fast lock-free check
3201  if (ovflw_stk->length() == 0) {
3202    return false;
3203  }
3204  assert(work_q->size() == 0, "Shouldn't steal");
3205  MutexLockerEx ml(ovflw_stk->par_lock(),
3206                   Mutex::_no_safepoint_check_flag);
3207  // Grab up to 1/4 the size of the work queue
3208  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3209                    (size_t)ParGCDesiredObjsFromOverflowList);
3210  num = MIN2(num, ovflw_stk->length());
3211  for (int i = (int) num; i > 0; i--) {
3212    oop cur = ovflw_stk->pop();
3213    assert(cur != NULL, "Counted wrong?");
3214    work_q->push(cur);
3215  }
3216  return num > 0;
3217}
3218
3219void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3220  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3221  int n_tasks = pst->n_tasks();
3222  // We allow that there may be no tasks to do here because
3223  // we are restarting after a stack overflow.
3224  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3225  uint nth_task = 0;
3226
3227  HeapWord* aligned_start = sp->bottom();
3228  if (sp->used_region().contains(_restart_addr)) {
3229    // Align down to a card boundary for the start of 0th task
3230    // for this space.
3231    aligned_start =
3232      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3233                                 CardTableModRefBS::card_size);
3234  }
3235
3236  size_t chunk_size = sp->marking_task_size();
3237  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3238    // Having claimed the nth task in this space,
3239    // compute the chunk that it corresponds to:
3240    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3241                               aligned_start + (nth_task+1)*chunk_size);
3242    // Try and bump the global finger via a CAS;
3243    // note that we need to do the global finger bump
3244    // _before_ taking the intersection below, because
3245    // the task corresponding to that region will be
3246    // deemed done even if the used_region() expands
3247    // because of allocation -- as it almost certainly will
3248    // during start-up while the threads yield in the
3249    // closure below.
3250    HeapWord* finger = span.end();
3251    bump_global_finger(finger);   // atomically
3252    // There are null tasks here corresponding to chunks
3253    // beyond the "top" address of the space.
3254    span = span.intersection(sp->used_region());
3255    if (!span.is_empty()) {  // Non-null task
3256      HeapWord* prev_obj;
3257      assert(!span.contains(_restart_addr) || nth_task == 0,
3258             "Inconsistency");
3259      if (nth_task == 0) {
3260        // For the 0th task, we'll not need to compute a block_start.
3261        if (span.contains(_restart_addr)) {
3262          // In the case of a restart because of stack overflow,
3263          // we might additionally skip a chunk prefix.
3264          prev_obj = _restart_addr;
3265        } else {
3266          prev_obj = span.start();
3267        }
3268      } else {
3269        // We want to skip the first object because
3270        // the protocol is to scan any object in its entirety
3271        // that _starts_ in this span; a fortiori, any
3272        // object starting in an earlier span is scanned
3273        // as part of an earlier claimed task.
3274        // Below we use the "careful" version of block_start
3275        // so we do not try to navigate uninitialized objects.
3276        prev_obj = sp->block_start_careful(span.start());
3277        // Below we use a variant of block_size that uses the
3278        // Printezis bits to avoid waiting for allocated
3279        // objects to become initialized/parsable.
3280        while (prev_obj < span.start()) {
3281          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3282          if (sz > 0) {
3283            prev_obj += sz;
3284          } else {
3285            // In this case we may end up doing a bit of redundant
3286            // scanning, but that appears unavoidable, short of
3287            // locking the free list locks; see bug 6324141.
3288            break;
3289          }
3290        }
3291      }
3292      if (prev_obj < span.end()) {
3293        MemRegion my_span = MemRegion(prev_obj, span.end());
3294        // Do the marking work within a non-empty span --
3295        // the last argument to the constructor indicates whether the
3296        // iteration should be incremental with periodic yields.
3297        ParMarkFromRootsClosure cl(this, _collector, my_span,
3298                                   &_collector->_markBitMap,
3299                                   work_queue(i),
3300                                   &_collector->_markStack);
3301        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3302      } // else nothing to do for this task
3303    }   // else nothing to do for this task
3304  }
3305  // We'd be tempted to assert here that since there are no
3306  // more tasks left to claim in this space, the global_finger
3307  // must exceed space->top() and a fortiori space->end(). However,
3308  // that would not quite be correct because the bumping of
3309  // global_finger occurs strictly after the claiming of a task,
3310  // so by the time we reach here the global finger may not yet
3311  // have been bumped up by the thread that claimed the last
3312  // task.
3313  pst->all_tasks_completed();
3314}
3315
3316class ParConcMarkingClosure: public MetadataAwareOopClosure {
3317 private:
3318  CMSCollector* _collector;
3319  CMSConcMarkingTask* _task;
3320  MemRegion     _span;
3321  CMSBitMap*    _bit_map;
3322  CMSMarkStack* _overflow_stack;
3323  OopTaskQueue* _work_queue;
3324 protected:
3325  DO_OOP_WORK_DEFN
3326 public:
3327  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3328                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3329    MetadataAwareOopClosure(collector->ref_processor()),
3330    _collector(collector),
3331    _task(task),
3332    _span(collector->_span),
3333    _work_queue(work_queue),
3334    _bit_map(bit_map),
3335    _overflow_stack(overflow_stack)
3336  { }
3337  virtual void do_oop(oop* p);
3338  virtual void do_oop(narrowOop* p);
3339
3340  void trim_queue(size_t max);
3341  void handle_stack_overflow(HeapWord* lost);
3342  void do_yield_check() {
3343    if (_task->should_yield()) {
3344      _task->yield();
3345    }
3346  }
3347};
3348
3349DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3350
3351// Grey object scanning during work stealing phase --
3352// the salient assumption here is that any references
3353// that are in these stolen objects being scanned must
3354// already have been initialized (else they would not have
3355// been published), so we do not need to check for
3356// uninitialized objects before pushing here.
3357void ParConcMarkingClosure::do_oop(oop obj) {
3358  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3359  HeapWord* addr = (HeapWord*)obj;
3360  // Check if oop points into the CMS generation
3361  // and is not marked
3362  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3363    // a white object ...
3364    // If we manage to "claim" the object, by being the
3365    // first thread to mark it, then we push it on our
3366    // marking stack
3367    if (_bit_map->par_mark(addr)) {     // ... now grey
3368      // push on work queue (grey set)
3369      bool simulate_overflow = false;
3370      NOT_PRODUCT(
3371        if (CMSMarkStackOverflowALot &&
3372            _collector->simulate_overflow()) {
3373          // simulate a stack overflow
3374          simulate_overflow = true;
3375        }
3376      )
3377      if (simulate_overflow ||
3378          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3379        // stack overflow
3380        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3381        // We cannot assert that the overflow stack is full because
3382        // it may have been emptied since.
3383        assert(simulate_overflow ||
3384               _work_queue->size() == _work_queue->max_elems(),
3385              "Else push should have succeeded");
3386        handle_stack_overflow(addr);
3387      }
3388    } // Else, some other thread got there first
3389    do_yield_check();
3390  }
3391}
3392
3393void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3394void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3395
3396void ParConcMarkingClosure::trim_queue(size_t max) {
3397  while (_work_queue->size() > max) {
3398    oop new_oop;
3399    if (_work_queue->pop_local(new_oop)) {
3400      assert(new_oop->is_oop(), "Should be an oop");
3401      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3402      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3403      new_oop->oop_iterate(this);  // do_oop() above
3404      do_yield_check();
3405    }
3406  }
3407}
3408
3409// Upon stack overflow, we discard (part of) the stack,
3410// remembering the least address amongst those discarded
3411// in CMSCollector's _restart_address.
3412void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3413  // We need to do this under a mutex to prevent other
3414  // workers from interfering with the work done below.
3415  MutexLockerEx ml(_overflow_stack->par_lock(),
3416                   Mutex::_no_safepoint_check_flag);
3417  // Remember the least grey address discarded
3418  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3419  _collector->lower_restart_addr(ra);
3420  _overflow_stack->reset();  // discard stack contents
3421  _overflow_stack->expand(); // expand the stack if possible
3422}
3423
3424
3425void CMSConcMarkingTask::do_work_steal(int i) {
3426  OopTaskQueue* work_q = work_queue(i);
3427  oop obj_to_scan;
3428  CMSBitMap* bm = &(_collector->_markBitMap);
3429  CMSMarkStack* ovflw = &(_collector->_markStack);
3430  int* seed = _collector->hash_seed(i);
3431  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3432  while (true) {
3433    cl.trim_queue(0);
3434    assert(work_q->size() == 0, "Should have been emptied above");
3435    if (get_work_from_overflow_stack(ovflw, work_q)) {
3436      // Can't assert below because the work obtained from the
3437      // overflow stack may already have been stolen from us.
3438      // assert(work_q->size() > 0, "Work from overflow stack");
3439      continue;
3440    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3441      assert(obj_to_scan->is_oop(), "Should be an oop");
3442      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3443      obj_to_scan->oop_iterate(&cl);
3444    } else if (terminator()->offer_termination(&_term_term)) {
3445      assert(work_q->size() == 0, "Impossible!");
3446      break;
3447    } else if (yielding() || should_yield()) {
3448      yield();
3449    }
3450  }
3451}
3452
3453// This is run by the CMS (coordinator) thread.
3454void CMSConcMarkingTask::coordinator_yield() {
3455  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3456         "CMS thread should hold CMS token");
3457  // First give up the locks, then yield, then re-lock
3458  // We should probably use a constructor/destructor idiom to
3459  // do this unlock/lock or modify the MutexUnlocker class to
3460  // serve our purpose. XXX
3461  assert_lock_strong(_bit_map_lock);
3462  _bit_map_lock->unlock();
3463  ConcurrentMarkSweepThread::desynchronize(true);
3464  _collector->stopTimer();
3465  _collector->incrementYields();
3466
3467  // It is possible for whichever thread initiated the yield request
3468  // not to get a chance to wake up and take the bitmap lock between
3469  // this thread releasing it and reacquiring it. So, while the
3470  // should_yield() flag is on, let's sleep for a bit to give the
3471  // other thread a chance to wake up. The limit imposed on the number
3472  // of iterations is defensive, to avoid any unforseen circumstances
3473  // putting us into an infinite loop. Since it's always been this
3474  // (coordinator_yield()) method that was observed to cause the
3475  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3476  // which is by default non-zero. For the other seven methods that
3477  // also perform the yield operation, as are using a different
3478  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3479  // can enable the sleeping for those methods too, if necessary.
3480  // See 6442774.
3481  //
3482  // We really need to reconsider the synchronization between the GC
3483  // thread and the yield-requesting threads in the future and we
3484  // should really use wait/notify, which is the recommended
3485  // way of doing this type of interaction. Additionally, we should
3486  // consolidate the eight methods that do the yield operation and they
3487  // are almost identical into one for better maintainability and
3488  // readability. See 6445193.
3489  //
3490  // Tony 2006.06.29
3491  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3492                   ConcurrentMarkSweepThread::should_yield() &&
3493                   !CMSCollector::foregroundGCIsActive(); ++i) {
3494    os::sleep(Thread::current(), 1, false);
3495  }
3496
3497  ConcurrentMarkSweepThread::synchronize(true);
3498  _bit_map_lock->lock_without_safepoint_check();
3499  _collector->startTimer();
3500}
3501
3502bool CMSCollector::do_marking_mt() {
3503  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3504  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3505                                                                  conc_workers()->active_workers(),
3506                                                                  Threads::number_of_non_daemon_threads());
3507  num_workers = conc_workers()->update_active_workers(num_workers);
3508  log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3509
3510  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3511
3512  CMSConcMarkingTask tsk(this,
3513                         cms_space,
3514                         conc_workers(),
3515                         task_queues());
3516
3517  // Since the actual number of workers we get may be different
3518  // from the number we requested above, do we need to do anything different
3519  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3520  // class?? XXX
3521  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3522
3523  // Refs discovery is already non-atomic.
3524  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3525  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3526  conc_workers()->start_task(&tsk);
3527  while (tsk.yielded()) {
3528    tsk.coordinator_yield();
3529    conc_workers()->continue_task(&tsk);
3530  }
3531  // If the task was aborted, _restart_addr will be non-NULL
3532  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3533  while (_restart_addr != NULL) {
3534    // XXX For now we do not make use of ABORTED state and have not
3535    // yet implemented the right abort semantics (even in the original
3536    // single-threaded CMS case). That needs some more investigation
3537    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3538    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3539    // If _restart_addr is non-NULL, a marking stack overflow
3540    // occurred; we need to do a fresh marking iteration from the
3541    // indicated restart address.
3542    if (_foregroundGCIsActive) {
3543      // We may be running into repeated stack overflows, having
3544      // reached the limit of the stack size, while making very
3545      // slow forward progress. It may be best to bail out and
3546      // let the foreground collector do its job.
3547      // Clear _restart_addr, so that foreground GC
3548      // works from scratch. This avoids the headache of
3549      // a "rescan" which would otherwise be needed because
3550      // of the dirty mod union table & card table.
3551      _restart_addr = NULL;
3552      return false;
3553    }
3554    // Adjust the task to restart from _restart_addr
3555    tsk.reset(_restart_addr);
3556    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3557                  _restart_addr);
3558    _restart_addr = NULL;
3559    // Get the workers going again
3560    conc_workers()->start_task(&tsk);
3561    while (tsk.yielded()) {
3562      tsk.coordinator_yield();
3563      conc_workers()->continue_task(&tsk);
3564    }
3565  }
3566  assert(tsk.completed(), "Inconsistency");
3567  assert(tsk.result() == true, "Inconsistency");
3568  return true;
3569}
3570
3571bool CMSCollector::do_marking_st() {
3572  ResourceMark rm;
3573  HandleMark   hm;
3574
3575  // Temporarily make refs discovery single threaded (non-MT)
3576  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3577  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3578    &_markStack, CMSYield);
3579  // the last argument to iterate indicates whether the iteration
3580  // should be incremental with periodic yields.
3581  _markBitMap.iterate(&markFromRootsClosure);
3582  // If _restart_addr is non-NULL, a marking stack overflow
3583  // occurred; we need to do a fresh iteration from the
3584  // indicated restart address.
3585  while (_restart_addr != NULL) {
3586    if (_foregroundGCIsActive) {
3587      // We may be running into repeated stack overflows, having
3588      // reached the limit of the stack size, while making very
3589      // slow forward progress. It may be best to bail out and
3590      // let the foreground collector do its job.
3591      // Clear _restart_addr, so that foreground GC
3592      // works from scratch. This avoids the headache of
3593      // a "rescan" which would otherwise be needed because
3594      // of the dirty mod union table & card table.
3595      _restart_addr = NULL;
3596      return false;  // indicating failure to complete marking
3597    }
3598    // Deal with stack overflow:
3599    // we restart marking from _restart_addr
3600    HeapWord* ra = _restart_addr;
3601    markFromRootsClosure.reset(ra);
3602    _restart_addr = NULL;
3603    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3604  }
3605  return true;
3606}
3607
3608void CMSCollector::preclean() {
3609  check_correct_thread_executing();
3610  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3611  verify_work_stacks_empty();
3612  verify_overflow_empty();
3613  _abort_preclean = false;
3614  if (CMSPrecleaningEnabled) {
3615    if (!CMSEdenChunksRecordAlways) {
3616      _eden_chunk_index = 0;
3617    }
3618    size_t used = get_eden_used();
3619    size_t capacity = get_eden_capacity();
3620    // Don't start sampling unless we will get sufficiently
3621    // many samples.
3622    if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3623                * CMSScheduleRemarkEdenPenetration)) {
3624      _start_sampling = true;
3625    } else {
3626      _start_sampling = false;
3627    }
3628    GCTraceCPUTime tcpu;
3629    CMSPhaseAccounting pa(this, "Concurrent Preclean");
3630    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3631  }
3632  CMSTokenSync x(true); // is cms thread
3633  if (CMSPrecleaningEnabled) {
3634    sample_eden();
3635    _collectorState = AbortablePreclean;
3636  } else {
3637    _collectorState = FinalMarking;
3638  }
3639  verify_work_stacks_empty();
3640  verify_overflow_empty();
3641}
3642
3643// Try and schedule the remark such that young gen
3644// occupancy is CMSScheduleRemarkEdenPenetration %.
3645void CMSCollector::abortable_preclean() {
3646  check_correct_thread_executing();
3647  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3648  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3649
3650  // If Eden's current occupancy is below this threshold,
3651  // immediately schedule the remark; else preclean
3652  // past the next scavenge in an effort to
3653  // schedule the pause as described above. By choosing
3654  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3655  // we will never do an actual abortable preclean cycle.
3656  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3657    GCTraceCPUTime tcpu;
3658    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3659    // We need more smarts in the abortable preclean
3660    // loop below to deal with cases where allocation
3661    // in young gen is very very slow, and our precleaning
3662    // is running a losing race against a horde of
3663    // mutators intent on flooding us with CMS updates
3664    // (dirty cards).
3665    // One, admittedly dumb, strategy is to give up
3666    // after a certain number of abortable precleaning loops
3667    // or after a certain maximum time. We want to make
3668    // this smarter in the next iteration.
3669    // XXX FIX ME!!! YSR
3670    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3671    while (!(should_abort_preclean() ||
3672             ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3673      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3674      cumworkdone += workdone;
3675      loops++;
3676      // Voluntarily terminate abortable preclean phase if we have
3677      // been at it for too long.
3678      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3679          loops >= CMSMaxAbortablePrecleanLoops) {
3680        log_debug(gc)(" CMS: abort preclean due to loops ");
3681        break;
3682      }
3683      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3684        log_debug(gc)(" CMS: abort preclean due to time ");
3685        break;
3686      }
3687      // If we are doing little work each iteration, we should
3688      // take a short break.
3689      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3690        // Sleep for some time, waiting for work to accumulate
3691        stopTimer();
3692        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3693        startTimer();
3694        waited++;
3695      }
3696    }
3697    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3698                               loops, waited, cumworkdone);
3699  }
3700  CMSTokenSync x(true); // is cms thread
3701  if (_collectorState != Idling) {
3702    assert(_collectorState == AbortablePreclean,
3703           "Spontaneous state transition?");
3704    _collectorState = FinalMarking;
3705  } // Else, a foreground collection completed this CMS cycle.
3706  return;
3707}
3708
3709// Respond to an Eden sampling opportunity
3710void CMSCollector::sample_eden() {
3711  // Make sure a young gc cannot sneak in between our
3712  // reading and recording of a sample.
3713  assert(Thread::current()->is_ConcurrentGC_thread(),
3714         "Only the cms thread may collect Eden samples");
3715  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3716         "Should collect samples while holding CMS token");
3717  if (!_start_sampling) {
3718    return;
3719  }
3720  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3721  // is populated by the young generation.
3722  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3723    if (_eden_chunk_index < _eden_chunk_capacity) {
3724      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3725      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3726             "Unexpected state of Eden");
3727      // We'd like to check that what we just sampled is an oop-start address;
3728      // however, we cannot do that here since the object may not yet have been
3729      // initialized. So we'll instead do the check when we _use_ this sample
3730      // later.
3731      if (_eden_chunk_index == 0 ||
3732          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3733                         _eden_chunk_array[_eden_chunk_index-1])
3734           >= CMSSamplingGrain)) {
3735        _eden_chunk_index++;  // commit sample
3736      }
3737    }
3738  }
3739  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3740    size_t used = get_eden_used();
3741    size_t capacity = get_eden_capacity();
3742    assert(used <= capacity, "Unexpected state of Eden");
3743    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3744      _abort_preclean = true;
3745    }
3746  }
3747}
3748
3749
3750size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3751  assert(_collectorState == Precleaning ||
3752         _collectorState == AbortablePreclean, "incorrect state");
3753  ResourceMark rm;
3754  HandleMark   hm;
3755
3756  // Precleaning is currently not MT but the reference processor
3757  // may be set for MT.  Disable it temporarily here.
3758  ReferenceProcessor* rp = ref_processor();
3759  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3760
3761  // Do one pass of scrubbing the discovered reference lists
3762  // to remove any reference objects with strongly-reachable
3763  // referents.
3764  if (clean_refs) {
3765    CMSPrecleanRefsYieldClosure yield_cl(this);
3766    assert(rp->span().equals(_span), "Spans should be equal");
3767    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3768                                   &_markStack, true /* preclean */);
3769    CMSDrainMarkingStackClosure complete_trace(this,
3770                                   _span, &_markBitMap, &_markStack,
3771                                   &keep_alive, true /* preclean */);
3772
3773    // We don't want this step to interfere with a young
3774    // collection because we don't want to take CPU
3775    // or memory bandwidth away from the young GC threads
3776    // (which may be as many as there are CPUs).
3777    // Note that we don't need to protect ourselves from
3778    // interference with mutators because they can't
3779    // manipulate the discovered reference lists nor affect
3780    // the computed reachability of the referents, the
3781    // only properties manipulated by the precleaning
3782    // of these reference lists.
3783    stopTimer();
3784    CMSTokenSyncWithLocks x(true /* is cms thread */,
3785                            bitMapLock());
3786    startTimer();
3787    sample_eden();
3788
3789    // The following will yield to allow foreground
3790    // collection to proceed promptly. XXX YSR:
3791    // The code in this method may need further
3792    // tweaking for better performance and some restructuring
3793    // for cleaner interfaces.
3794    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3795    rp->preclean_discovered_references(
3796          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3797          gc_timer);
3798  }
3799
3800  if (clean_survivor) {  // preclean the active survivor space(s)
3801    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3802                             &_markBitMap, &_modUnionTable,
3803                             &_markStack, true /* precleaning phase */);
3804    stopTimer();
3805    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3806                             bitMapLock());
3807    startTimer();
3808    unsigned int before_count =
3809      GenCollectedHeap::heap()->total_collections();
3810    SurvivorSpacePrecleanClosure
3811      sss_cl(this, _span, &_markBitMap, &_markStack,
3812             &pam_cl, before_count, CMSYield);
3813    _young_gen->from()->object_iterate_careful(&sss_cl);
3814    _young_gen->to()->object_iterate_careful(&sss_cl);
3815  }
3816  MarkRefsIntoAndScanClosure
3817    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3818             &_markStack, this, CMSYield,
3819             true /* precleaning phase */);
3820  // CAUTION: The following closure has persistent state that may need to
3821  // be reset upon a decrease in the sequence of addresses it
3822  // processes.
3823  ScanMarkedObjectsAgainCarefullyClosure
3824    smoac_cl(this, _span,
3825      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3826
3827  // Preclean dirty cards in ModUnionTable and CardTable using
3828  // appropriate convergence criterion;
3829  // repeat CMSPrecleanIter times unless we find that
3830  // we are losing.
3831  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3832  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3833         "Bad convergence multiplier");
3834  assert(CMSPrecleanThreshold >= 100,
3835         "Unreasonably low CMSPrecleanThreshold");
3836
3837  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3838  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3839       numIter < CMSPrecleanIter;
3840       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3841    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3842    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3843    // Either there are very few dirty cards, so re-mark
3844    // pause will be small anyway, or our pre-cleaning isn't
3845    // that much faster than the rate at which cards are being
3846    // dirtied, so we might as well stop and re-mark since
3847    // precleaning won't improve our re-mark time by much.
3848    if (curNumCards <= CMSPrecleanThreshold ||
3849        (numIter > 0 &&
3850         (curNumCards * CMSPrecleanDenominator >
3851         lastNumCards * CMSPrecleanNumerator))) {
3852      numIter++;
3853      cumNumCards += curNumCards;
3854      break;
3855    }
3856  }
3857
3858  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3859
3860  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3861  cumNumCards += curNumCards;
3862  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3863                             curNumCards, cumNumCards, numIter);
3864  return cumNumCards;   // as a measure of useful work done
3865}
3866
3867// PRECLEANING NOTES:
3868// Precleaning involves:
3869// . reading the bits of the modUnionTable and clearing the set bits.
3870// . For the cards corresponding to the set bits, we scan the
3871//   objects on those cards. This means we need the free_list_lock
3872//   so that we can safely iterate over the CMS space when scanning
3873//   for oops.
3874// . When we scan the objects, we'll be both reading and setting
3875//   marks in the marking bit map, so we'll need the marking bit map.
3876// . For protecting _collector_state transitions, we take the CGC_lock.
3877//   Note that any races in the reading of of card table entries by the
3878//   CMS thread on the one hand and the clearing of those entries by the
3879//   VM thread or the setting of those entries by the mutator threads on the
3880//   other are quite benign. However, for efficiency it makes sense to keep
3881//   the VM thread from racing with the CMS thread while the latter is
3882//   dirty card info to the modUnionTable. We therefore also use the
3883//   CGC_lock to protect the reading of the card table and the mod union
3884//   table by the CM thread.
3885// . We run concurrently with mutator updates, so scanning
3886//   needs to be done carefully  -- we should not try to scan
3887//   potentially uninitialized objects.
3888//
3889// Locking strategy: While holding the CGC_lock, we scan over and
3890// reset a maximal dirty range of the mod union / card tables, then lock
3891// the free_list_lock and bitmap lock to do a full marking, then
3892// release these locks; and repeat the cycle. This allows for a
3893// certain amount of fairness in the sharing of these locks between
3894// the CMS collector on the one hand, and the VM thread and the
3895// mutators on the other.
3896
3897// NOTE: preclean_mod_union_table() and preclean_card_table()
3898// further below are largely identical; if you need to modify
3899// one of these methods, please check the other method too.
3900
3901size_t CMSCollector::preclean_mod_union_table(
3902  ConcurrentMarkSweepGeneration* old_gen,
3903  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3904  verify_work_stacks_empty();
3905  verify_overflow_empty();
3906
3907  // strategy: starting with the first card, accumulate contiguous
3908  // ranges of dirty cards; clear these cards, then scan the region
3909  // covered by these cards.
3910
3911  // Since all of the MUT is committed ahead, we can just use
3912  // that, in case the generations expand while we are precleaning.
3913  // It might also be fine to just use the committed part of the
3914  // generation, but we might potentially miss cards when the
3915  // generation is rapidly expanding while we are in the midst
3916  // of precleaning.
3917  HeapWord* startAddr = old_gen->reserved().start();
3918  HeapWord* endAddr   = old_gen->reserved().end();
3919
3920  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3921
3922  size_t numDirtyCards, cumNumDirtyCards;
3923  HeapWord *nextAddr, *lastAddr;
3924  for (cumNumDirtyCards = numDirtyCards = 0,
3925       nextAddr = lastAddr = startAddr;
3926       nextAddr < endAddr;
3927       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3928
3929    ResourceMark rm;
3930    HandleMark   hm;
3931
3932    MemRegion dirtyRegion;
3933    {
3934      stopTimer();
3935      // Potential yield point
3936      CMSTokenSync ts(true);
3937      startTimer();
3938      sample_eden();
3939      // Get dirty region starting at nextOffset (inclusive),
3940      // simultaneously clearing it.
3941      dirtyRegion =
3942        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3943      assert(dirtyRegion.start() >= nextAddr,
3944             "returned region inconsistent?");
3945    }
3946    // Remember where the next search should begin.
3947    // The returned region (if non-empty) is a right open interval,
3948    // so lastOffset is obtained from the right end of that
3949    // interval.
3950    lastAddr = dirtyRegion.end();
3951    // Should do something more transparent and less hacky XXX
3952    numDirtyCards =
3953      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3954
3955    // We'll scan the cards in the dirty region (with periodic
3956    // yields for foreground GC as needed).
3957    if (!dirtyRegion.is_empty()) {
3958      assert(numDirtyCards > 0, "consistency check");
3959      HeapWord* stop_point = NULL;
3960      stopTimer();
3961      // Potential yield point
3962      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3963                               bitMapLock());
3964      startTimer();
3965      {
3966        verify_work_stacks_empty();
3967        verify_overflow_empty();
3968        sample_eden();
3969        stop_point =
3970          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3971      }
3972      if (stop_point != NULL) {
3973        // The careful iteration stopped early either because it found an
3974        // uninitialized object, or because we were in the midst of an
3975        // "abortable preclean", which should now be aborted. Redirty
3976        // the bits corresponding to the partially-scanned or unscanned
3977        // cards. We'll either restart at the next block boundary or
3978        // abort the preclean.
3979        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3980               "Should only be AbortablePreclean.");
3981        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3982        if (should_abort_preclean()) {
3983          break; // out of preclean loop
3984        } else {
3985          // Compute the next address at which preclean should pick up;
3986          // might need bitMapLock in order to read P-bits.
3987          lastAddr = next_card_start_after_block(stop_point);
3988        }
3989      }
3990    } else {
3991      assert(lastAddr == endAddr, "consistency check");
3992      assert(numDirtyCards == 0, "consistency check");
3993      break;
3994    }
3995  }
3996  verify_work_stacks_empty();
3997  verify_overflow_empty();
3998  return cumNumDirtyCards;
3999}
4000
4001// NOTE: preclean_mod_union_table() above and preclean_card_table()
4002// below are largely identical; if you need to modify
4003// one of these methods, please check the other method too.
4004
4005size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4006  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4007  // strategy: it's similar to precleamModUnionTable above, in that
4008  // we accumulate contiguous ranges of dirty cards, mark these cards
4009  // precleaned, then scan the region covered by these cards.
4010  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4011  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4012
4013  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4014
4015  size_t numDirtyCards, cumNumDirtyCards;
4016  HeapWord *lastAddr, *nextAddr;
4017
4018  for (cumNumDirtyCards = numDirtyCards = 0,
4019       nextAddr = lastAddr = startAddr;
4020       nextAddr < endAddr;
4021       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4022
4023    ResourceMark rm;
4024    HandleMark   hm;
4025
4026    MemRegion dirtyRegion;
4027    {
4028      // See comments in "Precleaning notes" above on why we
4029      // do this locking. XXX Could the locking overheads be
4030      // too high when dirty cards are sparse? [I don't think so.]
4031      stopTimer();
4032      CMSTokenSync x(true); // is cms thread
4033      startTimer();
4034      sample_eden();
4035      // Get and clear dirty region from card table
4036      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4037                                    MemRegion(nextAddr, endAddr),
4038                                    true,
4039                                    CardTableModRefBS::precleaned_card_val());
4040
4041      assert(dirtyRegion.start() >= nextAddr,
4042             "returned region inconsistent?");
4043    }
4044    lastAddr = dirtyRegion.end();
4045    numDirtyCards =
4046      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4047
4048    if (!dirtyRegion.is_empty()) {
4049      stopTimer();
4050      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4051      startTimer();
4052      sample_eden();
4053      verify_work_stacks_empty();
4054      verify_overflow_empty();
4055      HeapWord* stop_point =
4056        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4057      if (stop_point != NULL) {
4058        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4059               "Should only be AbortablePreclean.");
4060        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4061        if (should_abort_preclean()) {
4062          break; // out of preclean loop
4063        } else {
4064          // Compute the next address at which preclean should pick up.
4065          lastAddr = next_card_start_after_block(stop_point);
4066        }
4067      }
4068    } else {
4069      break;
4070    }
4071  }
4072  verify_work_stacks_empty();
4073  verify_overflow_empty();
4074  return cumNumDirtyCards;
4075}
4076
4077class PrecleanKlassClosure : public KlassClosure {
4078  KlassToOopClosure _cm_klass_closure;
4079 public:
4080  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4081  void do_klass(Klass* k) {
4082    if (k->has_accumulated_modified_oops()) {
4083      k->clear_accumulated_modified_oops();
4084
4085      _cm_klass_closure.do_klass(k);
4086    }
4087  }
4088};
4089
4090// The freelist lock is needed to prevent asserts, is it really needed?
4091void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4092
4093  cl->set_freelistLock(freelistLock);
4094
4095  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4096
4097  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4098  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4099  PrecleanKlassClosure preclean_klass_closure(cl);
4100  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4101
4102  verify_work_stacks_empty();
4103  verify_overflow_empty();
4104}
4105
4106void CMSCollector::checkpointRootsFinal() {
4107  assert(_collectorState == FinalMarking, "incorrect state transition?");
4108  check_correct_thread_executing();
4109  // world is stopped at this checkpoint
4110  assert(SafepointSynchronize::is_at_safepoint(),
4111         "world should be stopped");
4112  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4113
4114  verify_work_stacks_empty();
4115  verify_overflow_empty();
4116
4117  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4118                _young_gen->used() / K, _young_gen->capacity() / K);
4119  {
4120    if (CMSScavengeBeforeRemark) {
4121      GenCollectedHeap* gch = GenCollectedHeap::heap();
4122      // Temporarily set flag to false, GCH->do_collection will
4123      // expect it to be false and set to true
4124      FlagSetting fl(gch->_is_gc_active, false);
4125
4126      gch->do_collection(true,                      // full (i.e. force, see below)
4127                         false,                     // !clear_all_soft_refs
4128                         0,                         // size
4129                         false,                     // is_tlab
4130                         GenCollectedHeap::YoungGen // type
4131        );
4132    }
4133    FreelistLocker x(this);
4134    MutexLockerEx y(bitMapLock(),
4135                    Mutex::_no_safepoint_check_flag);
4136    checkpointRootsFinalWork();
4137  }
4138  verify_work_stacks_empty();
4139  verify_overflow_empty();
4140}
4141
4142void CMSCollector::checkpointRootsFinalWork() {
4143  GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4144
4145  assert(haveFreelistLocks(), "must have free list locks");
4146  assert_lock_strong(bitMapLock());
4147
4148  ResourceMark rm;
4149  HandleMark   hm;
4150
4151  GenCollectedHeap* gch = GenCollectedHeap::heap();
4152
4153  if (should_unload_classes()) {
4154    CodeCache::gc_prologue();
4155  }
4156  assert(haveFreelistLocks(), "must have free list locks");
4157  assert_lock_strong(bitMapLock());
4158
4159  // We might assume that we need not fill TLAB's when
4160  // CMSScavengeBeforeRemark is set, because we may have just done
4161  // a scavenge which would have filled all TLAB's -- and besides
4162  // Eden would be empty. This however may not always be the case --
4163  // for instance although we asked for a scavenge, it may not have
4164  // happened because of a JNI critical section. We probably need
4165  // a policy for deciding whether we can in that case wait until
4166  // the critical section releases and then do the remark following
4167  // the scavenge, and skip it here. In the absence of that policy,
4168  // or of an indication of whether the scavenge did indeed occur,
4169  // we cannot rely on TLAB's having been filled and must do
4170  // so here just in case a scavenge did not happen.
4171  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4172  // Update the saved marks which may affect the root scans.
4173  gch->save_marks();
4174
4175  print_eden_and_survivor_chunk_arrays();
4176
4177  {
4178#if defined(COMPILER2) || INCLUDE_JVMCI
4179    DerivedPointerTableDeactivate dpt_deact;
4180#endif
4181
4182    // Note on the role of the mod union table:
4183    // Since the marker in "markFromRoots" marks concurrently with
4184    // mutators, it is possible for some reachable objects not to have been
4185    // scanned. For instance, an only reference to an object A was
4186    // placed in object B after the marker scanned B. Unless B is rescanned,
4187    // A would be collected. Such updates to references in marked objects
4188    // are detected via the mod union table which is the set of all cards
4189    // dirtied since the first checkpoint in this GC cycle and prior to
4190    // the most recent young generation GC, minus those cleaned up by the
4191    // concurrent precleaning.
4192    if (CMSParallelRemarkEnabled) {
4193      GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4194      do_remark_parallel();
4195    } else {
4196      GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4197      do_remark_non_parallel();
4198    }
4199  }
4200  verify_work_stacks_empty();
4201  verify_overflow_empty();
4202
4203  {
4204    GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4205    refProcessingWork();
4206  }
4207  verify_work_stacks_empty();
4208  verify_overflow_empty();
4209
4210  if (should_unload_classes()) {
4211    CodeCache::gc_epilogue();
4212  }
4213  JvmtiExport::gc_epilogue();
4214
4215  // If we encountered any (marking stack / work queue) overflow
4216  // events during the current CMS cycle, take appropriate
4217  // remedial measures, where possible, so as to try and avoid
4218  // recurrence of that condition.
4219  assert(_markStack.isEmpty(), "No grey objects");
4220  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4221                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4222  if (ser_ovflw > 0) {
4223    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4224                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4225    _markStack.expand();
4226    _ser_pmc_remark_ovflw = 0;
4227    _ser_pmc_preclean_ovflw = 0;
4228    _ser_kac_preclean_ovflw = 0;
4229    _ser_kac_ovflw = 0;
4230  }
4231  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4232     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4233                          _par_pmc_remark_ovflw, _par_kac_ovflw);
4234     _par_pmc_remark_ovflw = 0;
4235    _par_kac_ovflw = 0;
4236  }
4237   if (_markStack._hit_limit > 0) {
4238     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4239                          _markStack._hit_limit);
4240   }
4241   if (_markStack._failed_double > 0) {
4242     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4243                          _markStack._failed_double, _markStack.capacity());
4244   }
4245  _markStack._hit_limit = 0;
4246  _markStack._failed_double = 0;
4247
4248  if ((VerifyAfterGC || VerifyDuringGC) &&
4249      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4250    verify_after_remark();
4251  }
4252
4253  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4254
4255  // Change under the freelistLocks.
4256  _collectorState = Sweeping;
4257  // Call isAllClear() under bitMapLock
4258  assert(_modUnionTable.isAllClear(),
4259      "Should be clear by end of the final marking");
4260  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4261      "Should be clear by end of the final marking");
4262}
4263
4264void CMSParInitialMarkTask::work(uint worker_id) {
4265  elapsedTimer _timer;
4266  ResourceMark rm;
4267  HandleMark   hm;
4268
4269  // ---------- scan from roots --------------
4270  _timer.start();
4271  GenCollectedHeap* gch = GenCollectedHeap::heap();
4272  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4273
4274  // ---------- young gen roots --------------
4275  {
4276    work_on_young_gen_roots(&par_mri_cl);
4277    _timer.stop();
4278    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4279  }
4280
4281  // ---------- remaining roots --------------
4282  _timer.reset();
4283  _timer.start();
4284
4285  CLDToOopClosure cld_closure(&par_mri_cl, true);
4286
4287  gch->cms_process_roots(_strong_roots_scope,
4288                         false,     // yg was scanned above
4289                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4290                         _collector->should_unload_classes(),
4291                         &par_mri_cl,
4292                         &cld_closure);
4293  assert(_collector->should_unload_classes()
4294         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4295         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4296  _timer.stop();
4297  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4298}
4299
4300// Parallel remark task
4301class CMSParRemarkTask: public CMSParMarkTask {
4302  CompactibleFreeListSpace* _cms_space;
4303
4304  // The per-thread work queues, available here for stealing.
4305  OopTaskQueueSet*       _task_queues;
4306  ParallelTaskTerminator _term;
4307  StrongRootsScope*      _strong_roots_scope;
4308
4309 public:
4310  // A value of 0 passed to n_workers will cause the number of
4311  // workers to be taken from the active workers in the work gang.
4312  CMSParRemarkTask(CMSCollector* collector,
4313                   CompactibleFreeListSpace* cms_space,
4314                   uint n_workers, WorkGang* workers,
4315                   OopTaskQueueSet* task_queues,
4316                   StrongRootsScope* strong_roots_scope):
4317    CMSParMarkTask("Rescan roots and grey objects in parallel",
4318                   collector, n_workers),
4319    _cms_space(cms_space),
4320    _task_queues(task_queues),
4321    _term(n_workers, task_queues),
4322    _strong_roots_scope(strong_roots_scope) { }
4323
4324  OopTaskQueueSet* task_queues() { return _task_queues; }
4325
4326  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4327
4328  ParallelTaskTerminator* terminator() { return &_term; }
4329  uint n_workers() { return _n_workers; }
4330
4331  void work(uint worker_id);
4332
4333 private:
4334  // ... of  dirty cards in old space
4335  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4336                                  ParMarkRefsIntoAndScanClosure* cl);
4337
4338  // ... work stealing for the above
4339  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4340};
4341
4342class RemarkKlassClosure : public KlassClosure {
4343  KlassToOopClosure _cm_klass_closure;
4344 public:
4345  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4346  void do_klass(Klass* k) {
4347    // Check if we have modified any oops in the Klass during the concurrent marking.
4348    if (k->has_accumulated_modified_oops()) {
4349      k->clear_accumulated_modified_oops();
4350
4351      // We could have transfered the current modified marks to the accumulated marks,
4352      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4353    } else if (k->has_modified_oops()) {
4354      // Don't clear anything, this info is needed by the next young collection.
4355    } else {
4356      // No modified oops in the Klass.
4357      return;
4358    }
4359
4360    // The klass has modified fields, need to scan the klass.
4361    _cm_klass_closure.do_klass(k);
4362  }
4363};
4364
4365void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4366  ParNewGeneration* young_gen = _collector->_young_gen;
4367  ContiguousSpace* eden_space = young_gen->eden();
4368  ContiguousSpace* from_space = young_gen->from();
4369  ContiguousSpace* to_space   = young_gen->to();
4370
4371  HeapWord** eca = _collector->_eden_chunk_array;
4372  size_t     ect = _collector->_eden_chunk_index;
4373  HeapWord** sca = _collector->_survivor_chunk_array;
4374  size_t     sct = _collector->_survivor_chunk_index;
4375
4376  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4377  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4378
4379  do_young_space_rescan(cl, to_space, NULL, 0);
4380  do_young_space_rescan(cl, from_space, sca, sct);
4381  do_young_space_rescan(cl, eden_space, eca, ect);
4382}
4383
4384// work_queue(i) is passed to the closure
4385// ParMarkRefsIntoAndScanClosure.  The "i" parameter
4386// also is passed to do_dirty_card_rescan_tasks() and to
4387// do_work_steal() to select the i-th task_queue.
4388
4389void CMSParRemarkTask::work(uint worker_id) {
4390  elapsedTimer _timer;
4391  ResourceMark rm;
4392  HandleMark   hm;
4393
4394  // ---------- rescan from roots --------------
4395  _timer.start();
4396  GenCollectedHeap* gch = GenCollectedHeap::heap();
4397  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4398    _collector->_span, _collector->ref_processor(),
4399    &(_collector->_markBitMap),
4400    work_queue(worker_id));
4401
4402  // Rescan young gen roots first since these are likely
4403  // coarsely partitioned and may, on that account, constitute
4404  // the critical path; thus, it's best to start off that
4405  // work first.
4406  // ---------- young gen roots --------------
4407  {
4408    work_on_young_gen_roots(&par_mrias_cl);
4409    _timer.stop();
4410    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4411  }
4412
4413  // ---------- remaining roots --------------
4414  _timer.reset();
4415  _timer.start();
4416  gch->cms_process_roots(_strong_roots_scope,
4417                         false,     // yg was scanned above
4418                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4419                         _collector->should_unload_classes(),
4420                         &par_mrias_cl,
4421                         NULL);     // The dirty klasses will be handled below
4422
4423  assert(_collector->should_unload_classes()
4424         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4425         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4426  _timer.stop();
4427  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4428
4429  // ---------- unhandled CLD scanning ----------
4430  if (worker_id == 0) { // Single threaded at the moment.
4431    _timer.reset();
4432    _timer.start();
4433
4434    // Scan all new class loader data objects and new dependencies that were
4435    // introduced during concurrent marking.
4436    ResourceMark rm;
4437    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4438    for (int i = 0; i < array->length(); i++) {
4439      par_mrias_cl.do_cld_nv(array->at(i));
4440    }
4441
4442    // We don't need to keep track of new CLDs anymore.
4443    ClassLoaderDataGraph::remember_new_clds(false);
4444
4445    _timer.stop();
4446    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4447  }
4448
4449  // ---------- dirty klass scanning ----------
4450  if (worker_id == 0) { // Single threaded at the moment.
4451    _timer.reset();
4452    _timer.start();
4453
4454    // Scan all classes that was dirtied during the concurrent marking phase.
4455    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4456    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4457
4458    _timer.stop();
4459    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4460  }
4461
4462  // We might have added oops to ClassLoaderData::_handles during the
4463  // concurrent marking phase. These oops point to newly allocated objects
4464  // that are guaranteed to be kept alive. Either by the direct allocation
4465  // code, or when the young collector processes the roots. Hence,
4466  // we don't have to revisit the _handles block during the remark phase.
4467
4468  // ---------- rescan dirty cards ------------
4469  _timer.reset();
4470  _timer.start();
4471
4472  // Do the rescan tasks for each of the two spaces
4473  // (cms_space) in turn.
4474  // "worker_id" is passed to select the task_queue for "worker_id"
4475  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4476  _timer.stop();
4477  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4478
4479  // ---------- steal work from other threads ...
4480  // ---------- ... and drain overflow list.
4481  _timer.reset();
4482  _timer.start();
4483  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4484  _timer.stop();
4485  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4486}
4487
4488void
4489CMSParMarkTask::do_young_space_rescan(
4490  OopsInGenClosure* cl, ContiguousSpace* space,
4491  HeapWord** chunk_array, size_t chunk_top) {
4492  // Until all tasks completed:
4493  // . claim an unclaimed task
4494  // . compute region boundaries corresponding to task claimed
4495  //   using chunk_array
4496  // . par_oop_iterate(cl) over that region
4497
4498  ResourceMark rm;
4499  HandleMark   hm;
4500
4501  SequentialSubTasksDone* pst = space->par_seq_tasks();
4502
4503  uint nth_task = 0;
4504  uint n_tasks  = pst->n_tasks();
4505
4506  if (n_tasks > 0) {
4507    assert(pst->valid(), "Uninitialized use?");
4508    HeapWord *start, *end;
4509    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4510      // We claimed task # nth_task; compute its boundaries.
4511      if (chunk_top == 0) {  // no samples were taken
4512        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4513        start = space->bottom();
4514        end   = space->top();
4515      } else if (nth_task == 0) {
4516        start = space->bottom();
4517        end   = chunk_array[nth_task];
4518      } else if (nth_task < (uint)chunk_top) {
4519        assert(nth_task >= 1, "Control point invariant");
4520        start = chunk_array[nth_task - 1];
4521        end   = chunk_array[nth_task];
4522      } else {
4523        assert(nth_task == (uint)chunk_top, "Control point invariant");
4524        start = chunk_array[chunk_top - 1];
4525        end   = space->top();
4526      }
4527      MemRegion mr(start, end);
4528      // Verify that mr is in space
4529      assert(mr.is_empty() || space->used_region().contains(mr),
4530             "Should be in space");
4531      // Verify that "start" is an object boundary
4532      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4533             "Should be an oop");
4534      space->par_oop_iterate(mr, cl);
4535    }
4536    pst->all_tasks_completed();
4537  }
4538}
4539
4540void
4541CMSParRemarkTask::do_dirty_card_rescan_tasks(
4542  CompactibleFreeListSpace* sp, int i,
4543  ParMarkRefsIntoAndScanClosure* cl) {
4544  // Until all tasks completed:
4545  // . claim an unclaimed task
4546  // . compute region boundaries corresponding to task claimed
4547  // . transfer dirty bits ct->mut for that region
4548  // . apply rescanclosure to dirty mut bits for that region
4549
4550  ResourceMark rm;
4551  HandleMark   hm;
4552
4553  OopTaskQueue* work_q = work_queue(i);
4554  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4555  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4556  // CAUTION: This closure has state that persists across calls to
4557  // the work method dirty_range_iterate_clear() in that it has
4558  // embedded in it a (subtype of) UpwardsObjectClosure. The
4559  // use of that state in the embedded UpwardsObjectClosure instance
4560  // assumes that the cards are always iterated (even if in parallel
4561  // by several threads) in monotonically increasing order per each
4562  // thread. This is true of the implementation below which picks
4563  // card ranges (chunks) in monotonically increasing order globally
4564  // and, a-fortiori, in monotonically increasing order per thread
4565  // (the latter order being a subsequence of the former).
4566  // If the work code below is ever reorganized into a more chaotic
4567  // work-partitioning form than the current "sequential tasks"
4568  // paradigm, the use of that persistent state will have to be
4569  // revisited and modified appropriately. See also related
4570  // bug 4756801 work on which should examine this code to make
4571  // sure that the changes there do not run counter to the
4572  // assumptions made here and necessary for correctness and
4573  // efficiency. Note also that this code might yield inefficient
4574  // behavior in the case of very large objects that span one or
4575  // more work chunks. Such objects would potentially be scanned
4576  // several times redundantly. Work on 4756801 should try and
4577  // address that performance anomaly if at all possible. XXX
4578  MemRegion  full_span  = _collector->_span;
4579  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4580  MarkFromDirtyCardsClosure
4581    greyRescanClosure(_collector, full_span, // entire span of interest
4582                      sp, bm, work_q, cl);
4583
4584  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4585  assert(pst->valid(), "Uninitialized use?");
4586  uint nth_task = 0;
4587  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4588  MemRegion span = sp->used_region();
4589  HeapWord* start_addr = span.start();
4590  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4591                                           alignment);
4592  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4593  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4594         start_addr, "Check alignment");
4595  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4596         chunk_size, "Check alignment");
4597
4598  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4599    // Having claimed the nth_task, compute corresponding mem-region,
4600    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4601    // The alignment restriction ensures that we do not need any
4602    // synchronization with other gang-workers while setting or
4603    // clearing bits in thus chunk of the MUT.
4604    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4605                                    start_addr + (nth_task+1)*chunk_size);
4606    // The last chunk's end might be way beyond end of the
4607    // used region. In that case pull back appropriately.
4608    if (this_span.end() > end_addr) {
4609      this_span.set_end(end_addr);
4610      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4611    }
4612    // Iterate over the dirty cards covering this chunk, marking them
4613    // precleaned, and setting the corresponding bits in the mod union
4614    // table. Since we have been careful to partition at Card and MUT-word
4615    // boundaries no synchronization is needed between parallel threads.
4616    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4617                                                 &modUnionClosure);
4618
4619    // Having transferred these marks into the modUnionTable,
4620    // rescan the marked objects on the dirty cards in the modUnionTable.
4621    // Even if this is at a synchronous collection, the initial marking
4622    // may have been done during an asynchronous collection so there
4623    // may be dirty bits in the mod-union table.
4624    _collector->_modUnionTable.dirty_range_iterate_clear(
4625                  this_span, &greyRescanClosure);
4626    _collector->_modUnionTable.verifyNoOneBitsInRange(
4627                                 this_span.start(),
4628                                 this_span.end());
4629  }
4630  pst->all_tasks_completed();  // declare that i am done
4631}
4632
4633// . see if we can share work_queues with ParNew? XXX
4634void
4635CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4636                                int* seed) {
4637  OopTaskQueue* work_q = work_queue(i);
4638  NOT_PRODUCT(int num_steals = 0;)
4639  oop obj_to_scan;
4640  CMSBitMap* bm = &(_collector->_markBitMap);
4641
4642  while (true) {
4643    // Completely finish any left over work from (an) earlier round(s)
4644    cl->trim_queue(0);
4645    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4646                                         (size_t)ParGCDesiredObjsFromOverflowList);
4647    // Now check if there's any work in the overflow list
4648    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4649    // only affects the number of attempts made to get work from the
4650    // overflow list and does not affect the number of workers.  Just
4651    // pass ParallelGCThreads so this behavior is unchanged.
4652    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4653                                                work_q,
4654                                                ParallelGCThreads)) {
4655      // found something in global overflow list;
4656      // not yet ready to go stealing work from others.
4657      // We'd like to assert(work_q->size() != 0, ...)
4658      // because we just took work from the overflow list,
4659      // but of course we can't since all of that could have
4660      // been already stolen from us.
4661      // "He giveth and He taketh away."
4662      continue;
4663    }
4664    // Verify that we have no work before we resort to stealing
4665    assert(work_q->size() == 0, "Have work, shouldn't steal");
4666    // Try to steal from other queues that have work
4667    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4668      NOT_PRODUCT(num_steals++;)
4669      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4670      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4671      // Do scanning work
4672      obj_to_scan->oop_iterate(cl);
4673      // Loop around, finish this work, and try to steal some more
4674    } else if (terminator()->offer_termination()) {
4675        break;  // nirvana from the infinite cycle
4676    }
4677  }
4678  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4679  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4680         "Else our work is not yet done");
4681}
4682
4683// Record object boundaries in _eden_chunk_array by sampling the eden
4684// top in the slow-path eden object allocation code path and record
4685// the boundaries, if CMSEdenChunksRecordAlways is true. If
4686// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4687// sampling in sample_eden() that activates during the part of the
4688// preclean phase.
4689void CMSCollector::sample_eden_chunk() {
4690  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4691    if (_eden_chunk_lock->try_lock()) {
4692      // Record a sample. This is the critical section. The contents
4693      // of the _eden_chunk_array have to be non-decreasing in the
4694      // address order.
4695      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4696      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4697             "Unexpected state of Eden");
4698      if (_eden_chunk_index == 0 ||
4699          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4700           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4701                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4702        _eden_chunk_index++;  // commit sample
4703      }
4704      _eden_chunk_lock->unlock();
4705    }
4706  }
4707}
4708
4709// Return a thread-local PLAB recording array, as appropriate.
4710void* CMSCollector::get_data_recorder(int thr_num) {
4711  if (_survivor_plab_array != NULL &&
4712      (CMSPLABRecordAlways ||
4713       (_collectorState > Marking && _collectorState < FinalMarking))) {
4714    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4715    ChunkArray* ca = &_survivor_plab_array[thr_num];
4716    ca->reset();   // clear it so that fresh data is recorded
4717    return (void*) ca;
4718  } else {
4719    return NULL;
4720  }
4721}
4722
4723// Reset all the thread-local PLAB recording arrays
4724void CMSCollector::reset_survivor_plab_arrays() {
4725  for (uint i = 0; i < ParallelGCThreads; i++) {
4726    _survivor_plab_array[i].reset();
4727  }
4728}
4729
4730// Merge the per-thread plab arrays into the global survivor chunk
4731// array which will provide the partitioning of the survivor space
4732// for CMS initial scan and rescan.
4733void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4734                                              int no_of_gc_threads) {
4735  assert(_survivor_plab_array  != NULL, "Error");
4736  assert(_survivor_chunk_array != NULL, "Error");
4737  assert(_collectorState == FinalMarking ||
4738         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4739  for (int j = 0; j < no_of_gc_threads; j++) {
4740    _cursor[j] = 0;
4741  }
4742  HeapWord* top = surv->top();
4743  size_t i;
4744  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4745    HeapWord* min_val = top;          // Higher than any PLAB address
4746    uint      min_tid = 0;            // position of min_val this round
4747    for (int j = 0; j < no_of_gc_threads; j++) {
4748      ChunkArray* cur_sca = &_survivor_plab_array[j];
4749      if (_cursor[j] == cur_sca->end()) {
4750        continue;
4751      }
4752      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4753      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4754      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4755      if (cur_val < min_val) {
4756        min_tid = j;
4757        min_val = cur_val;
4758      } else {
4759        assert(cur_val < top, "All recorded addresses should be less");
4760      }
4761    }
4762    // At this point min_val and min_tid are respectively
4763    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4764    // and the thread (j) that witnesses that address.
4765    // We record this address in the _survivor_chunk_array[i]
4766    // and increment _cursor[min_tid] prior to the next round i.
4767    if (min_val == top) {
4768      break;
4769    }
4770    _survivor_chunk_array[i] = min_val;
4771    _cursor[min_tid]++;
4772  }
4773  // We are all done; record the size of the _survivor_chunk_array
4774  _survivor_chunk_index = i; // exclusive: [0, i)
4775  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4776  // Verify that we used up all the recorded entries
4777  #ifdef ASSERT
4778    size_t total = 0;
4779    for (int j = 0; j < no_of_gc_threads; j++) {
4780      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4781      total += _cursor[j];
4782    }
4783    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4784    // Check that the merged array is in sorted order
4785    if (total > 0) {
4786      for (size_t i = 0; i < total - 1; i++) {
4787        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4788                                     i, p2i(_survivor_chunk_array[i]));
4789        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4790               "Not sorted");
4791      }
4792    }
4793  #endif // ASSERT
4794}
4795
4796// Set up the space's par_seq_tasks structure for work claiming
4797// for parallel initial scan and rescan of young gen.
4798// See ParRescanTask where this is currently used.
4799void
4800CMSCollector::
4801initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4802  assert(n_threads > 0, "Unexpected n_threads argument");
4803
4804  // Eden space
4805  if (!_young_gen->eden()->is_empty()) {
4806    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4807    assert(!pst->valid(), "Clobbering existing data?");
4808    // Each valid entry in [0, _eden_chunk_index) represents a task.
4809    size_t n_tasks = _eden_chunk_index + 1;
4810    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4811    // Sets the condition for completion of the subtask (how many threads
4812    // need to finish in order to be done).
4813    pst->set_n_threads(n_threads);
4814    pst->set_n_tasks((int)n_tasks);
4815  }
4816
4817  // Merge the survivor plab arrays into _survivor_chunk_array
4818  if (_survivor_plab_array != NULL) {
4819    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4820  } else {
4821    assert(_survivor_chunk_index == 0, "Error");
4822  }
4823
4824  // To space
4825  {
4826    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4827    assert(!pst->valid(), "Clobbering existing data?");
4828    // Sets the condition for completion of the subtask (how many threads
4829    // need to finish in order to be done).
4830    pst->set_n_threads(n_threads);
4831    pst->set_n_tasks(1);
4832    assert(pst->valid(), "Error");
4833  }
4834
4835  // From space
4836  {
4837    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4838    assert(!pst->valid(), "Clobbering existing data?");
4839    size_t n_tasks = _survivor_chunk_index + 1;
4840    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4841    // Sets the condition for completion of the subtask (how many threads
4842    // need to finish in order to be done).
4843    pst->set_n_threads(n_threads);
4844    pst->set_n_tasks((int)n_tasks);
4845    assert(pst->valid(), "Error");
4846  }
4847}
4848
4849// Parallel version of remark
4850void CMSCollector::do_remark_parallel() {
4851  GenCollectedHeap* gch = GenCollectedHeap::heap();
4852  WorkGang* workers = gch->workers();
4853  assert(workers != NULL, "Need parallel worker threads.");
4854  // Choose to use the number of GC workers most recently set
4855  // into "active_workers".
4856  uint n_workers = workers->active_workers();
4857
4858  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4859
4860  StrongRootsScope srs(n_workers);
4861
4862  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4863
4864  // We won't be iterating over the cards in the card table updating
4865  // the younger_gen cards, so we shouldn't call the following else
4866  // the verification code as well as subsequent younger_refs_iterate
4867  // code would get confused. XXX
4868  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4869
4870  // The young gen rescan work will not be done as part of
4871  // process_roots (which currently doesn't know how to
4872  // parallelize such a scan), but rather will be broken up into
4873  // a set of parallel tasks (via the sampling that the [abortable]
4874  // preclean phase did of eden, plus the [two] tasks of
4875  // scanning the [two] survivor spaces. Further fine-grain
4876  // parallelization of the scanning of the survivor spaces
4877  // themselves, and of precleaning of the young gen itself
4878  // is deferred to the future.
4879  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4880
4881  // The dirty card rescan work is broken up into a "sequence"
4882  // of parallel tasks (per constituent space) that are dynamically
4883  // claimed by the parallel threads.
4884  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4885
4886  // It turns out that even when we're using 1 thread, doing the work in a
4887  // separate thread causes wide variance in run times.  We can't help this
4888  // in the multi-threaded case, but we special-case n=1 here to get
4889  // repeatable measurements of the 1-thread overhead of the parallel code.
4890  if (n_workers > 1) {
4891    // Make refs discovery MT-safe, if it isn't already: it may not
4892    // necessarily be so, since it's possible that we are doing
4893    // ST marking.
4894    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4895    workers->run_task(&tsk);
4896  } else {
4897    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4898    tsk.work(0);
4899  }
4900
4901  // restore, single-threaded for now, any preserved marks
4902  // as a result of work_q overflow
4903  restore_preserved_marks_if_any();
4904}
4905
4906// Non-parallel version of remark
4907void CMSCollector::do_remark_non_parallel() {
4908  ResourceMark rm;
4909  HandleMark   hm;
4910  GenCollectedHeap* gch = GenCollectedHeap::heap();
4911  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4912
4913  MarkRefsIntoAndScanClosure
4914    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4915             &_markStack, this,
4916             false /* should_yield */, false /* not precleaning */);
4917  MarkFromDirtyCardsClosure
4918    markFromDirtyCardsClosure(this, _span,
4919                              NULL,  // space is set further below
4920                              &_markBitMap, &_markStack, &mrias_cl);
4921  {
4922    GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4923    // Iterate over the dirty cards, setting the corresponding bits in the
4924    // mod union table.
4925    {
4926      ModUnionClosure modUnionClosure(&_modUnionTable);
4927      _ct->ct_bs()->dirty_card_iterate(
4928                      _cmsGen->used_region(),
4929                      &modUnionClosure);
4930    }
4931    // Having transferred these marks into the modUnionTable, we just need
4932    // to rescan the marked objects on the dirty cards in the modUnionTable.
4933    // The initial marking may have been done during an asynchronous
4934    // collection so there may be dirty bits in the mod-union table.
4935    const int alignment =
4936      CardTableModRefBS::card_size * BitsPerWord;
4937    {
4938      // ... First handle dirty cards in CMS gen
4939      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4940      MemRegion ur = _cmsGen->used_region();
4941      HeapWord* lb = ur.start();
4942      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4943      MemRegion cms_span(lb, ub);
4944      _modUnionTable.dirty_range_iterate_clear(cms_span,
4945                                               &markFromDirtyCardsClosure);
4946      verify_work_stacks_empty();
4947      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4948    }
4949  }
4950  if (VerifyDuringGC &&
4951      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4952    HandleMark hm;  // Discard invalid handles created during verification
4953    Universe::verify();
4954  }
4955  {
4956    GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
4957
4958    verify_work_stacks_empty();
4959
4960    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4961    StrongRootsScope srs(1);
4962
4963    gch->cms_process_roots(&srs,
4964                           true,  // young gen as roots
4965                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
4966                           should_unload_classes(),
4967                           &mrias_cl,
4968                           NULL); // The dirty klasses will be handled below
4969
4970    assert(should_unload_classes()
4971           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4972           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4973  }
4974
4975  {
4976    GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4977
4978    verify_work_stacks_empty();
4979
4980    // Scan all class loader data objects that might have been introduced
4981    // during concurrent marking.
4982    ResourceMark rm;
4983    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4984    for (int i = 0; i < array->length(); i++) {
4985      mrias_cl.do_cld_nv(array->at(i));
4986    }
4987
4988    // We don't need to keep track of new CLDs anymore.
4989    ClassLoaderDataGraph::remember_new_clds(false);
4990
4991    verify_work_stacks_empty();
4992  }
4993
4994  {
4995    GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
4996
4997    verify_work_stacks_empty();
4998
4999    RemarkKlassClosure remark_klass_closure(&mrias_cl);
5000    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5001
5002    verify_work_stacks_empty();
5003  }
5004
5005  // We might have added oops to ClassLoaderData::_handles during the
5006  // concurrent marking phase. These oops point to newly allocated objects
5007  // that are guaranteed to be kept alive. Either by the direct allocation
5008  // code, or when the young collector processes the roots. Hence,
5009  // we don't have to revisit the _handles block during the remark phase.
5010
5011  verify_work_stacks_empty();
5012  // Restore evacuated mark words, if any, used for overflow list links
5013  restore_preserved_marks_if_any();
5014
5015  verify_overflow_empty();
5016}
5017
5018////////////////////////////////////////////////////////
5019// Parallel Reference Processing Task Proxy Class
5020////////////////////////////////////////////////////////
5021class AbstractGangTaskWOopQueues : public AbstractGangTask {
5022  OopTaskQueueSet*       _queues;
5023  ParallelTaskTerminator _terminator;
5024 public:
5025  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5026    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5027  ParallelTaskTerminator* terminator() { return &_terminator; }
5028  OopTaskQueueSet* queues() { return _queues; }
5029};
5030
5031class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5032  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5033  CMSCollector*          _collector;
5034  CMSBitMap*             _mark_bit_map;
5035  const MemRegion        _span;
5036  ProcessTask&           _task;
5037
5038public:
5039  CMSRefProcTaskProxy(ProcessTask&     task,
5040                      CMSCollector*    collector,
5041                      const MemRegion& span,
5042                      CMSBitMap*       mark_bit_map,
5043                      AbstractWorkGang* workers,
5044                      OopTaskQueueSet* task_queues):
5045    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5046      task_queues,
5047      workers->active_workers()),
5048    _task(task),
5049    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5050  {
5051    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5052           "Inconsistency in _span");
5053  }
5054
5055  OopTaskQueueSet* task_queues() { return queues(); }
5056
5057  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5058
5059  void do_work_steal(int i,
5060                     CMSParDrainMarkingStackClosure* drain,
5061                     CMSParKeepAliveClosure* keep_alive,
5062                     int* seed);
5063
5064  virtual void work(uint worker_id);
5065};
5066
5067void CMSRefProcTaskProxy::work(uint worker_id) {
5068  ResourceMark rm;
5069  HandleMark hm;
5070  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5071  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5072                                        _mark_bit_map,
5073                                        work_queue(worker_id));
5074  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5075                                                 _mark_bit_map,
5076                                                 work_queue(worker_id));
5077  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5078  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5079  if (_task.marks_oops_alive()) {
5080    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5081                  _collector->hash_seed(worker_id));
5082  }
5083  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5084  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5085}
5086
5087class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5088  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5089  EnqueueTask& _task;
5090
5091public:
5092  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5093    : AbstractGangTask("Enqueue reference objects in parallel"),
5094      _task(task)
5095  { }
5096
5097  virtual void work(uint worker_id)
5098  {
5099    _task.work(worker_id);
5100  }
5101};
5102
5103CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5104  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5105   _span(span),
5106   _bit_map(bit_map),
5107   _work_queue(work_queue),
5108   _mark_and_push(collector, span, bit_map, work_queue),
5109   _low_water_mark(MIN2((work_queue->max_elems()/4),
5110                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5111{ }
5112
5113// . see if we can share work_queues with ParNew? XXX
5114void CMSRefProcTaskProxy::do_work_steal(int i,
5115  CMSParDrainMarkingStackClosure* drain,
5116  CMSParKeepAliveClosure* keep_alive,
5117  int* seed) {
5118  OopTaskQueue* work_q = work_queue(i);
5119  NOT_PRODUCT(int num_steals = 0;)
5120  oop obj_to_scan;
5121
5122  while (true) {
5123    // Completely finish any left over work from (an) earlier round(s)
5124    drain->trim_queue(0);
5125    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5126                                         (size_t)ParGCDesiredObjsFromOverflowList);
5127    // Now check if there's any work in the overflow list
5128    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5129    // only affects the number of attempts made to get work from the
5130    // overflow list and does not affect the number of workers.  Just
5131    // pass ParallelGCThreads so this behavior is unchanged.
5132    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5133                                                work_q,
5134                                                ParallelGCThreads)) {
5135      // Found something in global overflow list;
5136      // not yet ready to go stealing work from others.
5137      // We'd like to assert(work_q->size() != 0, ...)
5138      // because we just took work from the overflow list,
5139      // but of course we can't, since all of that might have
5140      // been already stolen from us.
5141      continue;
5142    }
5143    // Verify that we have no work before we resort to stealing
5144    assert(work_q->size() == 0, "Have work, shouldn't steal");
5145    // Try to steal from other queues that have work
5146    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5147      NOT_PRODUCT(num_steals++;)
5148      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5149      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5150      // Do scanning work
5151      obj_to_scan->oop_iterate(keep_alive);
5152      // Loop around, finish this work, and try to steal some more
5153    } else if (terminator()->offer_termination()) {
5154      break;  // nirvana from the infinite cycle
5155    }
5156  }
5157  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5158}
5159
5160void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5161{
5162  GenCollectedHeap* gch = GenCollectedHeap::heap();
5163  WorkGang* workers = gch->workers();
5164  assert(workers != NULL, "Need parallel worker threads.");
5165  CMSRefProcTaskProxy rp_task(task, &_collector,
5166                              _collector.ref_processor()->span(),
5167                              _collector.markBitMap(),
5168                              workers, _collector.task_queues());
5169  workers->run_task(&rp_task);
5170}
5171
5172void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5173{
5174
5175  GenCollectedHeap* gch = GenCollectedHeap::heap();
5176  WorkGang* workers = gch->workers();
5177  assert(workers != NULL, "Need parallel worker threads.");
5178  CMSRefEnqueueTaskProxy enq_task(task);
5179  workers->run_task(&enq_task);
5180}
5181
5182void CMSCollector::refProcessingWork() {
5183  ResourceMark rm;
5184  HandleMark   hm;
5185
5186  ReferenceProcessor* rp = ref_processor();
5187  assert(rp->span().equals(_span), "Spans should be equal");
5188  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5189  // Process weak references.
5190  rp->setup_policy(false);
5191  verify_work_stacks_empty();
5192
5193  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5194                                          &_markStack, false /* !preclean */);
5195  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5196                                _span, &_markBitMap, &_markStack,
5197                                &cmsKeepAliveClosure, false /* !preclean */);
5198  {
5199    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5200
5201    ReferenceProcessorStats stats;
5202    if (rp->processing_is_mt()) {
5203      // Set the degree of MT here.  If the discovery is done MT, there
5204      // may have been a different number of threads doing the discovery
5205      // and a different number of discovered lists may have Ref objects.
5206      // That is OK as long as the Reference lists are balanced (see
5207      // balance_all_queues() and balance_queues()).
5208      GenCollectedHeap* gch = GenCollectedHeap::heap();
5209      uint active_workers = ParallelGCThreads;
5210      WorkGang* workers = gch->workers();
5211      if (workers != NULL) {
5212        active_workers = workers->active_workers();
5213        // The expectation is that active_workers will have already
5214        // been set to a reasonable value.  If it has not been set,
5215        // investigate.
5216        assert(active_workers > 0, "Should have been set during scavenge");
5217      }
5218      rp->set_active_mt_degree(active_workers);
5219      CMSRefProcTaskExecutor task_executor(*this);
5220      stats = rp->process_discovered_references(&_is_alive_closure,
5221                                        &cmsKeepAliveClosure,
5222                                        &cmsDrainMarkingStackClosure,
5223                                        &task_executor,
5224                                        _gc_timer_cm);
5225    } else {
5226      stats = rp->process_discovered_references(&_is_alive_closure,
5227                                        &cmsKeepAliveClosure,
5228                                        &cmsDrainMarkingStackClosure,
5229                                        NULL,
5230                                        _gc_timer_cm);
5231    }
5232    _gc_tracer_cm->report_gc_reference_stats(stats);
5233
5234  }
5235
5236  // This is the point where the entire marking should have completed.
5237  verify_work_stacks_empty();
5238
5239  if (should_unload_classes()) {
5240    {
5241      GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5242
5243      // Unload classes and purge the SystemDictionary.
5244      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5245
5246      // Unload nmethods.
5247      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5248
5249      // Prune dead klasses from subklass/sibling/implementor lists.
5250      Klass::clean_weak_klass_links(&_is_alive_closure);
5251    }
5252
5253    {
5254      GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
5255      // Clean up unreferenced symbols in symbol table.
5256      SymbolTable::unlink();
5257    }
5258
5259    {
5260      GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
5261      // Delete entries for dead interned strings.
5262      StringTable::unlink(&_is_alive_closure);
5263    }
5264  }
5265
5266
5267  // Restore any preserved marks as a result of mark stack or
5268  // work queue overflow
5269  restore_preserved_marks_if_any();  // done single-threaded for now
5270
5271  rp->set_enqueuing_is_done(true);
5272  if (rp->processing_is_mt()) {
5273    rp->balance_all_queues();
5274    CMSRefProcTaskExecutor task_executor(*this);
5275    rp->enqueue_discovered_references(&task_executor);
5276  } else {
5277    rp->enqueue_discovered_references(NULL);
5278  }
5279  rp->verify_no_references_recorded();
5280  assert(!rp->discovery_enabled(), "should have been disabled");
5281}
5282
5283#ifndef PRODUCT
5284void CMSCollector::check_correct_thread_executing() {
5285  Thread* t = Thread::current();
5286  // Only the VM thread or the CMS thread should be here.
5287  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5288         "Unexpected thread type");
5289  // If this is the vm thread, the foreground process
5290  // should not be waiting.  Note that _foregroundGCIsActive is
5291  // true while the foreground collector is waiting.
5292  if (_foregroundGCShouldWait) {
5293    // We cannot be the VM thread
5294    assert(t->is_ConcurrentGC_thread(),
5295           "Should be CMS thread");
5296  } else {
5297    // We can be the CMS thread only if we are in a stop-world
5298    // phase of CMS collection.
5299    if (t->is_ConcurrentGC_thread()) {
5300      assert(_collectorState == InitialMarking ||
5301             _collectorState == FinalMarking,
5302             "Should be a stop-world phase");
5303      // The CMS thread should be holding the CMS_token.
5304      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5305             "Potential interference with concurrently "
5306             "executing VM thread");
5307    }
5308  }
5309}
5310#endif
5311
5312void CMSCollector::sweep() {
5313  assert(_collectorState == Sweeping, "just checking");
5314  check_correct_thread_executing();
5315  verify_work_stacks_empty();
5316  verify_overflow_empty();
5317  increment_sweep_count();
5318  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5319
5320  _inter_sweep_timer.stop();
5321  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5322
5323  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5324  _intra_sweep_timer.reset();
5325  _intra_sweep_timer.start();
5326  {
5327    GCTraceCPUTime tcpu;
5328    CMSPhaseAccounting pa(this, "Concurrent Sweep");
5329    // First sweep the old gen
5330    {
5331      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5332                               bitMapLock());
5333      sweepWork(_cmsGen);
5334    }
5335
5336    // Update Universe::_heap_*_at_gc figures.
5337    // We need all the free list locks to make the abstract state
5338    // transition from Sweeping to Resetting. See detailed note
5339    // further below.
5340    {
5341      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5342      // Update heap occupancy information which is used as
5343      // input to soft ref clearing policy at the next gc.
5344      Universe::update_heap_info_at_gc();
5345      _collectorState = Resizing;
5346    }
5347  }
5348  verify_work_stacks_empty();
5349  verify_overflow_empty();
5350
5351  if (should_unload_classes()) {
5352    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5353    // requires that the virtual spaces are stable and not deleted.
5354    ClassLoaderDataGraph::set_should_purge(true);
5355  }
5356
5357  _intra_sweep_timer.stop();
5358  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5359
5360  _inter_sweep_timer.reset();
5361  _inter_sweep_timer.start();
5362
5363  // We need to use a monotonically non-decreasing time in ms
5364  // or we will see time-warp warnings and os::javaTimeMillis()
5365  // does not guarantee monotonicity.
5366  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5367  update_time_of_last_gc(now);
5368
5369  // NOTE on abstract state transitions:
5370  // Mutators allocate-live and/or mark the mod-union table dirty
5371  // based on the state of the collection.  The former is done in
5372  // the interval [Marking, Sweeping] and the latter in the interval
5373  // [Marking, Sweeping).  Thus the transitions into the Marking state
5374  // and out of the Sweeping state must be synchronously visible
5375  // globally to the mutators.
5376  // The transition into the Marking state happens with the world
5377  // stopped so the mutators will globally see it.  Sweeping is
5378  // done asynchronously by the background collector so the transition
5379  // from the Sweeping state to the Resizing state must be done
5380  // under the freelistLock (as is the check for whether to
5381  // allocate-live and whether to dirty the mod-union table).
5382  assert(_collectorState == Resizing, "Change of collector state to"
5383    " Resizing must be done under the freelistLocks (plural)");
5384
5385  // Now that sweeping has been completed, we clear
5386  // the incremental_collection_failed flag,
5387  // thus inviting a younger gen collection to promote into
5388  // this generation. If such a promotion may still fail,
5389  // the flag will be set again when a young collection is
5390  // attempted.
5391  GenCollectedHeap* gch = GenCollectedHeap::heap();
5392  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5393  gch->update_full_collections_completed(_collection_count_start);
5394}
5395
5396// FIX ME!!! Looks like this belongs in CFLSpace, with
5397// CMSGen merely delegating to it.
5398void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5399  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5400  HeapWord*  minAddr        = _cmsSpace->bottom();
5401  HeapWord*  largestAddr    =
5402    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5403  if (largestAddr == NULL) {
5404    // The dictionary appears to be empty.  In this case
5405    // try to coalesce at the end of the heap.
5406    largestAddr = _cmsSpace->end();
5407  }
5408  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5409  size_t nearLargestOffset =
5410    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5411  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5412                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5413  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5414}
5415
5416bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5417  return addr >= _cmsSpace->nearLargestChunk();
5418}
5419
5420FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5421  return _cmsSpace->find_chunk_at_end();
5422}
5423
5424void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5425                                                    bool full) {
5426  // If the young generation has been collected, gather any statistics
5427  // that are of interest at this point.
5428  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5429  if (!full && current_is_young) {
5430    // Gather statistics on the young generation collection.
5431    collector()->stats().record_gc0_end(used());
5432  }
5433}
5434
5435void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5436  // We iterate over the space(s) underlying this generation,
5437  // checking the mark bit map to see if the bits corresponding
5438  // to specific blocks are marked or not. Blocks that are
5439  // marked are live and are not swept up. All remaining blocks
5440  // are swept up, with coalescing on-the-fly as we sweep up
5441  // contiguous free and/or garbage blocks:
5442  // We need to ensure that the sweeper synchronizes with allocators
5443  // and stop-the-world collectors. In particular, the following
5444  // locks are used:
5445  // . CMS token: if this is held, a stop the world collection cannot occur
5446  // . freelistLock: if this is held no allocation can occur from this
5447  //                 generation by another thread
5448  // . bitMapLock: if this is held, no other thread can access or update
5449  //
5450
5451  // Note that we need to hold the freelistLock if we use
5452  // block iterate below; else the iterator might go awry if
5453  // a mutator (or promotion) causes block contents to change
5454  // (for instance if the allocator divvies up a block).
5455  // If we hold the free list lock, for all practical purposes
5456  // young generation GC's can't occur (they'll usually need to
5457  // promote), so we might as well prevent all young generation
5458  // GC's while we do a sweeping step. For the same reason, we might
5459  // as well take the bit map lock for the entire duration
5460
5461  // check that we hold the requisite locks
5462  assert(have_cms_token(), "Should hold cms token");
5463  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5464  assert_lock_strong(old_gen->freelistLock());
5465  assert_lock_strong(bitMapLock());
5466
5467  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5468  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5469  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5470                                          _inter_sweep_estimate.padded_average(),
5471                                          _intra_sweep_estimate.padded_average());
5472  old_gen->setNearLargestChunk();
5473
5474  {
5475    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5476    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5477    // We need to free-up/coalesce garbage/blocks from a
5478    // co-terminal free run. This is done in the SweepClosure
5479    // destructor; so, do not remove this scope, else the
5480    // end-of-sweep-census below will be off by a little bit.
5481  }
5482  old_gen->cmsSpace()->sweep_completed();
5483  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5484  if (should_unload_classes()) {                // unloaded classes this cycle,
5485    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5486  } else {                                      // did not unload classes,
5487    _concurrent_cycles_since_last_unload++;     // ... increment count
5488  }
5489}
5490
5491// Reset CMS data structures (for now just the marking bit map)
5492// preparatory for the next cycle.
5493void CMSCollector::reset_concurrent() {
5494  CMSTokenSyncWithLocks ts(true, bitMapLock());
5495
5496  // If the state is not "Resetting", the foreground  thread
5497  // has done a collection and the resetting.
5498  if (_collectorState != Resetting) {
5499    assert(_collectorState == Idling, "The state should only change"
5500      " because the foreground collector has finished the collection");
5501    return;
5502  }
5503
5504  {
5505    // Clear the mark bitmap (no grey objects to start with)
5506    // for the next cycle.
5507    GCTraceCPUTime tcpu;
5508    CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5509
5510    HeapWord* curAddr = _markBitMap.startWord();
5511    while (curAddr < _markBitMap.endWord()) {
5512      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5513      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5514      _markBitMap.clear_large_range(chunk);
5515      if (ConcurrentMarkSweepThread::should_yield() &&
5516          !foregroundGCIsActive() &&
5517          CMSYield) {
5518        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5519               "CMS thread should hold CMS token");
5520        assert_lock_strong(bitMapLock());
5521        bitMapLock()->unlock();
5522        ConcurrentMarkSweepThread::desynchronize(true);
5523        stopTimer();
5524        incrementYields();
5525
5526        // See the comment in coordinator_yield()
5527        for (unsigned i = 0; i < CMSYieldSleepCount &&
5528                         ConcurrentMarkSweepThread::should_yield() &&
5529                         !CMSCollector::foregroundGCIsActive(); ++i) {
5530          os::sleep(Thread::current(), 1, false);
5531        }
5532
5533        ConcurrentMarkSweepThread::synchronize(true);
5534        bitMapLock()->lock_without_safepoint_check();
5535        startTimer();
5536      }
5537      curAddr = chunk.end();
5538    }
5539    // A successful mostly concurrent collection has been done.
5540    // Because only the full (i.e., concurrent mode failure) collections
5541    // are being measured for gc overhead limits, clean the "near" flag
5542    // and count.
5543    size_policy()->reset_gc_overhead_limit_count();
5544    _collectorState = Idling;
5545  }
5546
5547  register_gc_end();
5548}
5549
5550// Same as above but for STW paths
5551void CMSCollector::reset_stw() {
5552  // already have the lock
5553  assert(_collectorState == Resetting, "just checking");
5554  assert_lock_strong(bitMapLock());
5555  GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5556  _markBitMap.clear_all();
5557  _collectorState = Idling;
5558  register_gc_end();
5559}
5560
5561void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5562  GCTraceCPUTime tcpu;
5563  TraceCollectorStats tcs(counters());
5564
5565  switch (op) {
5566    case CMS_op_checkpointRootsInitial: {
5567      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5568      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5569      checkpointRootsInitial();
5570      break;
5571    }
5572    case CMS_op_checkpointRootsFinal: {
5573      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5574      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5575      checkpointRootsFinal();
5576      break;
5577    }
5578    default:
5579      fatal("No such CMS_op");
5580  }
5581}
5582
5583#ifndef PRODUCT
5584size_t const CMSCollector::skip_header_HeapWords() {
5585  return FreeChunk::header_size();
5586}
5587
5588// Try and collect here conditions that should hold when
5589// CMS thread is exiting. The idea is that the foreground GC
5590// thread should not be blocked if it wants to terminate
5591// the CMS thread and yet continue to run the VM for a while
5592// after that.
5593void CMSCollector::verify_ok_to_terminate() const {
5594  assert(Thread::current()->is_ConcurrentGC_thread(),
5595         "should be called by CMS thread");
5596  assert(!_foregroundGCShouldWait, "should be false");
5597  // We could check here that all the various low-level locks
5598  // are not held by the CMS thread, but that is overkill; see
5599  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5600  // is checked.
5601}
5602#endif
5603
5604size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5605   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5606          "missing Printezis mark?");
5607  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5608  size_t size = pointer_delta(nextOneAddr + 1, addr);
5609  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5610         "alignment problem");
5611  assert(size >= 3, "Necessary for Printezis marks to work");
5612  return size;
5613}
5614
5615// A variant of the above (block_size_using_printezis_bits()) except
5616// that we return 0 if the P-bits are not yet set.
5617size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5618  if (_markBitMap.isMarked(addr + 1)) {
5619    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5620    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5621    size_t size = pointer_delta(nextOneAddr + 1, addr);
5622    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5623           "alignment problem");
5624    assert(size >= 3, "Necessary for Printezis marks to work");
5625    return size;
5626  }
5627  return 0;
5628}
5629
5630HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5631  size_t sz = 0;
5632  oop p = (oop)addr;
5633  if (p->klass_or_null_acquire() != NULL) {
5634    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5635  } else {
5636    sz = block_size_using_printezis_bits(addr);
5637  }
5638  assert(sz > 0, "size must be nonzero");
5639  HeapWord* next_block = addr + sz;
5640  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5641                                             CardTableModRefBS::card_size);
5642  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5643         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5644         "must be different cards");
5645  return next_card;
5646}
5647
5648
5649// CMS Bit Map Wrapper /////////////////////////////////////////
5650
5651// Construct a CMS bit map infrastructure, but don't create the
5652// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5653// further below.
5654CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5655  _bm(),
5656  _shifter(shifter),
5657  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5658                                    Monitor::_safepoint_check_sometimes) : NULL)
5659{
5660  _bmStartWord = 0;
5661  _bmWordSize  = 0;
5662}
5663
5664bool CMSBitMap::allocate(MemRegion mr) {
5665  _bmStartWord = mr.start();
5666  _bmWordSize  = mr.word_size();
5667  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5668                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5669  if (!brs.is_reserved()) {
5670    log_warning(gc)("CMS bit map allocation failure");
5671    return false;
5672  }
5673  // For now we'll just commit all of the bit map up front.
5674  // Later on we'll try to be more parsimonious with swap.
5675  if (!_virtual_space.initialize(brs, brs.size())) {
5676    log_warning(gc)("CMS bit map backing store failure");
5677    return false;
5678  }
5679  assert(_virtual_space.committed_size() == brs.size(),
5680         "didn't reserve backing store for all of CMS bit map?");
5681  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5682         _bmWordSize, "inconsistency in bit map sizing");
5683  _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5684
5685  // bm.clear(); // can we rely on getting zero'd memory? verify below
5686  assert(isAllClear(),
5687         "Expected zero'd memory from ReservedSpace constructor");
5688  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5689         "consistency check");
5690  return true;
5691}
5692
5693void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5694  HeapWord *next_addr, *end_addr, *last_addr;
5695  assert_locked();
5696  assert(covers(mr), "out-of-range error");
5697  // XXX assert that start and end are appropriately aligned
5698  for (next_addr = mr.start(), end_addr = mr.end();
5699       next_addr < end_addr; next_addr = last_addr) {
5700    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5701    last_addr = dirty_region.end();
5702    if (!dirty_region.is_empty()) {
5703      cl->do_MemRegion(dirty_region);
5704    } else {
5705      assert(last_addr == end_addr, "program logic");
5706      return;
5707    }
5708  }
5709}
5710
5711void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5712  _bm.print_on_error(st, prefix);
5713}
5714
5715#ifndef PRODUCT
5716void CMSBitMap::assert_locked() const {
5717  CMSLockVerifier::assert_locked(lock());
5718}
5719
5720bool CMSBitMap::covers(MemRegion mr) const {
5721  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5722  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5723         "size inconsistency");
5724  return (mr.start() >= _bmStartWord) &&
5725         (mr.end()   <= endWord());
5726}
5727
5728bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5729    return (start >= _bmStartWord && (start + size) <= endWord());
5730}
5731
5732void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5733  // verify that there are no 1 bits in the interval [left, right)
5734  FalseBitMapClosure falseBitMapClosure;
5735  iterate(&falseBitMapClosure, left, right);
5736}
5737
5738void CMSBitMap::region_invariant(MemRegion mr)
5739{
5740  assert_locked();
5741  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5742  assert(!mr.is_empty(), "unexpected empty region");
5743  assert(covers(mr), "mr should be covered by bit map");
5744  // convert address range into offset range
5745  size_t start_ofs = heapWordToOffset(mr.start());
5746  // Make sure that end() is appropriately aligned
5747  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5748                        (1 << (_shifter+LogHeapWordSize))),
5749         "Misaligned mr.end()");
5750  size_t end_ofs   = heapWordToOffset(mr.end());
5751  assert(end_ofs > start_ofs, "Should mark at least one bit");
5752}
5753
5754#endif
5755
5756bool CMSMarkStack::allocate(size_t size) {
5757  // allocate a stack of the requisite depth
5758  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5759                   size * sizeof(oop)));
5760  if (!rs.is_reserved()) {
5761    log_warning(gc)("CMSMarkStack allocation failure");
5762    return false;
5763  }
5764  if (!_virtual_space.initialize(rs, rs.size())) {
5765    log_warning(gc)("CMSMarkStack backing store failure");
5766    return false;
5767  }
5768  assert(_virtual_space.committed_size() == rs.size(),
5769         "didn't reserve backing store for all of CMS stack?");
5770  _base = (oop*)(_virtual_space.low());
5771  _index = 0;
5772  _capacity = size;
5773  NOT_PRODUCT(_max_depth = 0);
5774  return true;
5775}
5776
5777// XXX FIX ME !!! In the MT case we come in here holding a
5778// leaf lock. For printing we need to take a further lock
5779// which has lower rank. We need to recalibrate the two
5780// lock-ranks involved in order to be able to print the
5781// messages below. (Or defer the printing to the caller.
5782// For now we take the expedient path of just disabling the
5783// messages for the problematic case.)
5784void CMSMarkStack::expand() {
5785  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5786  if (_capacity == MarkStackSizeMax) {
5787    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5788      // We print a warning message only once per CMS cycle.
5789      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5790    }
5791    return;
5792  }
5793  // Double capacity if possible
5794  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5795  // Do not give up existing stack until we have managed to
5796  // get the double capacity that we desired.
5797  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5798                   new_capacity * sizeof(oop)));
5799  if (rs.is_reserved()) {
5800    // Release the backing store associated with old stack
5801    _virtual_space.release();
5802    // Reinitialize virtual space for new stack
5803    if (!_virtual_space.initialize(rs, rs.size())) {
5804      fatal("Not enough swap for expanded marking stack");
5805    }
5806    _base = (oop*)(_virtual_space.low());
5807    _index = 0;
5808    _capacity = new_capacity;
5809  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5810    // Failed to double capacity, continue;
5811    // we print a detail message only once per CMS cycle.
5812    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5813                        _capacity / K, new_capacity / K);
5814  }
5815}
5816
5817
5818// Closures
5819// XXX: there seems to be a lot of code  duplication here;
5820// should refactor and consolidate common code.
5821
5822// This closure is used to mark refs into the CMS generation in
5823// the CMS bit map. Called at the first checkpoint. This closure
5824// assumes that we do not need to re-mark dirty cards; if the CMS
5825// generation on which this is used is not an oldest
5826// generation then this will lose younger_gen cards!
5827
5828MarkRefsIntoClosure::MarkRefsIntoClosure(
5829  MemRegion span, CMSBitMap* bitMap):
5830    _span(span),
5831    _bitMap(bitMap)
5832{
5833  assert(ref_processor() == NULL, "deliberately left NULL");
5834  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5835}
5836
5837void MarkRefsIntoClosure::do_oop(oop obj) {
5838  // if p points into _span, then mark corresponding bit in _markBitMap
5839  assert(obj->is_oop(), "expected an oop");
5840  HeapWord* addr = (HeapWord*)obj;
5841  if (_span.contains(addr)) {
5842    // this should be made more efficient
5843    _bitMap->mark(addr);
5844  }
5845}
5846
5847void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5848void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5849
5850ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5851  MemRegion span, CMSBitMap* bitMap):
5852    _span(span),
5853    _bitMap(bitMap)
5854{
5855  assert(ref_processor() == NULL, "deliberately left NULL");
5856  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5857}
5858
5859void ParMarkRefsIntoClosure::do_oop(oop obj) {
5860  // if p points into _span, then mark corresponding bit in _markBitMap
5861  assert(obj->is_oop(), "expected an oop");
5862  HeapWord* addr = (HeapWord*)obj;
5863  if (_span.contains(addr)) {
5864    // this should be made more efficient
5865    _bitMap->par_mark(addr);
5866  }
5867}
5868
5869void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5870void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5871
5872// A variant of the above, used for CMS marking verification.
5873MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5874  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5875    _span(span),
5876    _verification_bm(verification_bm),
5877    _cms_bm(cms_bm)
5878{
5879  assert(ref_processor() == NULL, "deliberately left NULL");
5880  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5881}
5882
5883void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5884  // if p points into _span, then mark corresponding bit in _markBitMap
5885  assert(obj->is_oop(), "expected an oop");
5886  HeapWord* addr = (HeapWord*)obj;
5887  if (_span.contains(addr)) {
5888    _verification_bm->mark(addr);
5889    if (!_cms_bm->isMarked(addr)) {
5890      Log(gc, verify) log;
5891      ResourceMark rm;
5892      oop(addr)->print_on(log.error_stream());
5893      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5894      fatal("... aborting");
5895    }
5896  }
5897}
5898
5899void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5900void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5901
5902//////////////////////////////////////////////////
5903// MarkRefsIntoAndScanClosure
5904//////////////////////////////////////////////////
5905
5906MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5907                                                       ReferenceProcessor* rp,
5908                                                       CMSBitMap* bit_map,
5909                                                       CMSBitMap* mod_union_table,
5910                                                       CMSMarkStack*  mark_stack,
5911                                                       CMSCollector* collector,
5912                                                       bool should_yield,
5913                                                       bool concurrent_precleaning):
5914  _collector(collector),
5915  _span(span),
5916  _bit_map(bit_map),
5917  _mark_stack(mark_stack),
5918  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5919                      mark_stack, concurrent_precleaning),
5920  _yield(should_yield),
5921  _concurrent_precleaning(concurrent_precleaning),
5922  _freelistLock(NULL)
5923{
5924  // FIXME: Should initialize in base class constructor.
5925  assert(rp != NULL, "ref_processor shouldn't be NULL");
5926  set_ref_processor_internal(rp);
5927}
5928
5929// This closure is used to mark refs into the CMS generation at the
5930// second (final) checkpoint, and to scan and transitively follow
5931// the unmarked oops. It is also used during the concurrent precleaning
5932// phase while scanning objects on dirty cards in the CMS generation.
5933// The marks are made in the marking bit map and the marking stack is
5934// used for keeping the (newly) grey objects during the scan.
5935// The parallel version (Par_...) appears further below.
5936void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5937  if (obj != NULL) {
5938    assert(obj->is_oop(), "expected an oop");
5939    HeapWord* addr = (HeapWord*)obj;
5940    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5941    assert(_collector->overflow_list_is_empty(),
5942           "overflow list should be empty");
5943    if (_span.contains(addr) &&
5944        !_bit_map->isMarked(addr)) {
5945      // mark bit map (object is now grey)
5946      _bit_map->mark(addr);
5947      // push on marking stack (stack should be empty), and drain the
5948      // stack by applying this closure to the oops in the oops popped
5949      // from the stack (i.e. blacken the grey objects)
5950      bool res = _mark_stack->push(obj);
5951      assert(res, "Should have space to push on empty stack");
5952      do {
5953        oop new_oop = _mark_stack->pop();
5954        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5955        assert(_bit_map->isMarked((HeapWord*)new_oop),
5956               "only grey objects on this stack");
5957        // iterate over the oops in this oop, marking and pushing
5958        // the ones in CMS heap (i.e. in _span).
5959        new_oop->oop_iterate(&_pushAndMarkClosure);
5960        // check if it's time to yield
5961        do_yield_check();
5962      } while (!_mark_stack->isEmpty() ||
5963               (!_concurrent_precleaning && take_from_overflow_list()));
5964        // if marking stack is empty, and we are not doing this
5965        // during precleaning, then check the overflow list
5966    }
5967    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5968    assert(_collector->overflow_list_is_empty(),
5969           "overflow list was drained above");
5970
5971    assert(_collector->no_preserved_marks(),
5972           "All preserved marks should have been restored above");
5973  }
5974}
5975
5976void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5977void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5978
5979void MarkRefsIntoAndScanClosure::do_yield_work() {
5980  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5981         "CMS thread should hold CMS token");
5982  assert_lock_strong(_freelistLock);
5983  assert_lock_strong(_bit_map->lock());
5984  // relinquish the free_list_lock and bitMaplock()
5985  _bit_map->lock()->unlock();
5986  _freelistLock->unlock();
5987  ConcurrentMarkSweepThread::desynchronize(true);
5988  _collector->stopTimer();
5989  _collector->incrementYields();
5990
5991  // See the comment in coordinator_yield()
5992  for (unsigned i = 0;
5993       i < CMSYieldSleepCount &&
5994       ConcurrentMarkSweepThread::should_yield() &&
5995       !CMSCollector::foregroundGCIsActive();
5996       ++i) {
5997    os::sleep(Thread::current(), 1, false);
5998  }
5999
6000  ConcurrentMarkSweepThread::synchronize(true);
6001  _freelistLock->lock_without_safepoint_check();
6002  _bit_map->lock()->lock_without_safepoint_check();
6003  _collector->startTimer();
6004}
6005
6006///////////////////////////////////////////////////////////
6007// ParMarkRefsIntoAndScanClosure: a parallel version of
6008//                                MarkRefsIntoAndScanClosure
6009///////////////////////////////////////////////////////////
6010ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6011  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6012  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6013  _span(span),
6014  _bit_map(bit_map),
6015  _work_queue(work_queue),
6016  _low_water_mark(MIN2((work_queue->max_elems()/4),
6017                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6018  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6019{
6020  // FIXME: Should initialize in base class constructor.
6021  assert(rp != NULL, "ref_processor shouldn't be NULL");
6022  set_ref_processor_internal(rp);
6023}
6024
6025// This closure is used to mark refs into the CMS generation at the
6026// second (final) checkpoint, and to scan and transitively follow
6027// the unmarked oops. The marks are made in the marking bit map and
6028// the work_queue is used for keeping the (newly) grey objects during
6029// the scan phase whence they are also available for stealing by parallel
6030// threads. Since the marking bit map is shared, updates are
6031// synchronized (via CAS).
6032void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6033  if (obj != NULL) {
6034    // Ignore mark word because this could be an already marked oop
6035    // that may be chained at the end of the overflow list.
6036    assert(obj->is_oop(true), "expected an oop");
6037    HeapWord* addr = (HeapWord*)obj;
6038    if (_span.contains(addr) &&
6039        !_bit_map->isMarked(addr)) {
6040      // mark bit map (object will become grey):
6041      // It is possible for several threads to be
6042      // trying to "claim" this object concurrently;
6043      // the unique thread that succeeds in marking the
6044      // object first will do the subsequent push on
6045      // to the work queue (or overflow list).
6046      if (_bit_map->par_mark(addr)) {
6047        // push on work_queue (which may not be empty), and trim the
6048        // queue to an appropriate length by applying this closure to
6049        // the oops in the oops popped from the stack (i.e. blacken the
6050        // grey objects)
6051        bool res = _work_queue->push(obj);
6052        assert(res, "Low water mark should be less than capacity?");
6053        trim_queue(_low_water_mark);
6054      } // Else, another thread claimed the object
6055    }
6056  }
6057}
6058
6059void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6060void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6061
6062// This closure is used to rescan the marked objects on the dirty cards
6063// in the mod union table and the card table proper.
6064size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6065  oop p, MemRegion mr) {
6066
6067  size_t size = 0;
6068  HeapWord* addr = (HeapWord*)p;
6069  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6070  assert(_span.contains(addr), "we are scanning the CMS generation");
6071  // check if it's time to yield
6072  if (do_yield_check()) {
6073    // We yielded for some foreground stop-world work,
6074    // and we have been asked to abort this ongoing preclean cycle.
6075    return 0;
6076  }
6077  if (_bitMap->isMarked(addr)) {
6078    // it's marked; is it potentially uninitialized?
6079    if (p->klass_or_null_acquire() != NULL) {
6080        // an initialized object; ignore mark word in verification below
6081        // since we are running concurrent with mutators
6082        assert(p->is_oop(true), "should be an oop");
6083        if (p->is_objArray()) {
6084          // objArrays are precisely marked; restrict scanning
6085          // to dirty cards only.
6086          size = CompactibleFreeListSpace::adjustObjectSize(
6087                   p->oop_iterate_size(_scanningClosure, mr));
6088        } else {
6089          // A non-array may have been imprecisely marked; we need
6090          // to scan object in its entirety.
6091          size = CompactibleFreeListSpace::adjustObjectSize(
6092                   p->oop_iterate_size(_scanningClosure));
6093        }
6094      #ifdef ASSERT
6095        size_t direct_size =
6096          CompactibleFreeListSpace::adjustObjectSize(p->size());
6097        assert(size == direct_size, "Inconsistency in size");
6098        assert(size >= 3, "Necessary for Printezis marks to work");
6099        HeapWord* start_pbit = addr + 1;
6100        HeapWord* end_pbit = addr + size - 1;
6101        assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6102               "inconsistent Printezis mark");
6103        // Verify inner mark bits (between Printezis bits) are clear,
6104        // but don't repeat if there are multiple dirty regions for
6105        // the same object, to avoid potential O(N^2) performance.
6106        if (addr != _last_scanned_object) {
6107          _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6108          _last_scanned_object = addr;
6109        }
6110      #endif // ASSERT
6111    } else {
6112      // An uninitialized object.
6113      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6114      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6115      size = pointer_delta(nextOneAddr + 1, addr);
6116      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6117             "alignment problem");
6118      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6119      // will dirty the card when the klass pointer is installed in the
6120      // object (signaling the completion of initialization).
6121    }
6122  } else {
6123    // Either a not yet marked object or an uninitialized object
6124    if (p->klass_or_null_acquire() == NULL) {
6125      // An uninitialized object, skip to the next card, since
6126      // we may not be able to read its P-bits yet.
6127      assert(size == 0, "Initial value");
6128    } else {
6129      // An object not (yet) reached by marking: we merely need to
6130      // compute its size so as to go look at the next block.
6131      assert(p->is_oop(true), "should be an oop");
6132      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6133    }
6134  }
6135  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6136  return size;
6137}
6138
6139void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6140  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6141         "CMS thread should hold CMS token");
6142  assert_lock_strong(_freelistLock);
6143  assert_lock_strong(_bitMap->lock());
6144  // relinquish the free_list_lock and bitMaplock()
6145  _bitMap->lock()->unlock();
6146  _freelistLock->unlock();
6147  ConcurrentMarkSweepThread::desynchronize(true);
6148  _collector->stopTimer();
6149  _collector->incrementYields();
6150
6151  // See the comment in coordinator_yield()
6152  for (unsigned i = 0; i < CMSYieldSleepCount &&
6153                   ConcurrentMarkSweepThread::should_yield() &&
6154                   !CMSCollector::foregroundGCIsActive(); ++i) {
6155    os::sleep(Thread::current(), 1, false);
6156  }
6157
6158  ConcurrentMarkSweepThread::synchronize(true);
6159  _freelistLock->lock_without_safepoint_check();
6160  _bitMap->lock()->lock_without_safepoint_check();
6161  _collector->startTimer();
6162}
6163
6164
6165//////////////////////////////////////////////////////////////////
6166// SurvivorSpacePrecleanClosure
6167//////////////////////////////////////////////////////////////////
6168// This (single-threaded) closure is used to preclean the oops in
6169// the survivor spaces.
6170size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6171
6172  HeapWord* addr = (HeapWord*)p;
6173  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6174  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6175  assert(p->klass_or_null() != NULL, "object should be initialized");
6176  // an initialized object; ignore mark word in verification below
6177  // since we are running concurrent with mutators
6178  assert(p->is_oop(true), "should be an oop");
6179  // Note that we do not yield while we iterate over
6180  // the interior oops of p, pushing the relevant ones
6181  // on our marking stack.
6182  size_t size = p->oop_iterate_size(_scanning_closure);
6183  do_yield_check();
6184  // Observe that below, we do not abandon the preclean
6185  // phase as soon as we should; rather we empty the
6186  // marking stack before returning. This is to satisfy
6187  // some existing assertions. In general, it may be a
6188  // good idea to abort immediately and complete the marking
6189  // from the grey objects at a later time.
6190  while (!_mark_stack->isEmpty()) {
6191    oop new_oop = _mark_stack->pop();
6192    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6193    assert(_bit_map->isMarked((HeapWord*)new_oop),
6194           "only grey objects on this stack");
6195    // iterate over the oops in this oop, marking and pushing
6196    // the ones in CMS heap (i.e. in _span).
6197    new_oop->oop_iterate(_scanning_closure);
6198    // check if it's time to yield
6199    do_yield_check();
6200  }
6201  unsigned int after_count =
6202    GenCollectedHeap::heap()->total_collections();
6203  bool abort = (_before_count != after_count) ||
6204               _collector->should_abort_preclean();
6205  return abort ? 0 : size;
6206}
6207
6208void SurvivorSpacePrecleanClosure::do_yield_work() {
6209  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6210         "CMS thread should hold CMS token");
6211  assert_lock_strong(_bit_map->lock());
6212  // Relinquish the bit map lock
6213  _bit_map->lock()->unlock();
6214  ConcurrentMarkSweepThread::desynchronize(true);
6215  _collector->stopTimer();
6216  _collector->incrementYields();
6217
6218  // See the comment in coordinator_yield()
6219  for (unsigned i = 0; i < CMSYieldSleepCount &&
6220                       ConcurrentMarkSweepThread::should_yield() &&
6221                       !CMSCollector::foregroundGCIsActive(); ++i) {
6222    os::sleep(Thread::current(), 1, false);
6223  }
6224
6225  ConcurrentMarkSweepThread::synchronize(true);
6226  _bit_map->lock()->lock_without_safepoint_check();
6227  _collector->startTimer();
6228}
6229
6230// This closure is used to rescan the marked objects on the dirty cards
6231// in the mod union table and the card table proper. In the parallel
6232// case, although the bitMap is shared, we do a single read so the
6233// isMarked() query is "safe".
6234bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6235  // Ignore mark word because we are running concurrent with mutators
6236  assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6237  HeapWord* addr = (HeapWord*)p;
6238  assert(_span.contains(addr), "we are scanning the CMS generation");
6239  bool is_obj_array = false;
6240  #ifdef ASSERT
6241    if (!_parallel) {
6242      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6243      assert(_collector->overflow_list_is_empty(),
6244             "overflow list should be empty");
6245
6246    }
6247  #endif // ASSERT
6248  if (_bit_map->isMarked(addr)) {
6249    // Obj arrays are precisely marked, non-arrays are not;
6250    // so we scan objArrays precisely and non-arrays in their
6251    // entirety.
6252    if (p->is_objArray()) {
6253      is_obj_array = true;
6254      if (_parallel) {
6255        p->oop_iterate(_par_scan_closure, mr);
6256      } else {
6257        p->oop_iterate(_scan_closure, mr);
6258      }
6259    } else {
6260      if (_parallel) {
6261        p->oop_iterate(_par_scan_closure);
6262      } else {
6263        p->oop_iterate(_scan_closure);
6264      }
6265    }
6266  }
6267  #ifdef ASSERT
6268    if (!_parallel) {
6269      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6270      assert(_collector->overflow_list_is_empty(),
6271             "overflow list should be empty");
6272
6273    }
6274  #endif // ASSERT
6275  return is_obj_array;
6276}
6277
6278MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6279                        MemRegion span,
6280                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6281                        bool should_yield, bool verifying):
6282  _collector(collector),
6283  _span(span),
6284  _bitMap(bitMap),
6285  _mut(&collector->_modUnionTable),
6286  _markStack(markStack),
6287  _yield(should_yield),
6288  _skipBits(0)
6289{
6290  assert(_markStack->isEmpty(), "stack should be empty");
6291  _finger = _bitMap->startWord();
6292  _threshold = _finger;
6293  assert(_collector->_restart_addr == NULL, "Sanity check");
6294  assert(_span.contains(_finger), "Out of bounds _finger?");
6295  DEBUG_ONLY(_verifying = verifying;)
6296}
6297
6298void MarkFromRootsClosure::reset(HeapWord* addr) {
6299  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6300  assert(_span.contains(addr), "Out of bounds _finger?");
6301  _finger = addr;
6302  _threshold = (HeapWord*)round_to(
6303                 (intptr_t)_finger, CardTableModRefBS::card_size);
6304}
6305
6306// Should revisit to see if this should be restructured for
6307// greater efficiency.
6308bool MarkFromRootsClosure::do_bit(size_t offset) {
6309  if (_skipBits > 0) {
6310    _skipBits--;
6311    return true;
6312  }
6313  // convert offset into a HeapWord*
6314  HeapWord* addr = _bitMap->startWord() + offset;
6315  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6316         "address out of range");
6317  assert(_bitMap->isMarked(addr), "tautology");
6318  if (_bitMap->isMarked(addr+1)) {
6319    // this is an allocated but not yet initialized object
6320    assert(_skipBits == 0, "tautology");
6321    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6322    oop p = oop(addr);
6323    if (p->klass_or_null_acquire() == NULL) {
6324      DEBUG_ONLY(if (!_verifying) {)
6325        // We re-dirty the cards on which this object lies and increase
6326        // the _threshold so that we'll come back to scan this object
6327        // during the preclean or remark phase. (CMSCleanOnEnter)
6328        if (CMSCleanOnEnter) {
6329          size_t sz = _collector->block_size_using_printezis_bits(addr);
6330          HeapWord* end_card_addr   = (HeapWord*)round_to(
6331                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6332          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6333          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6334          // Bump _threshold to end_card_addr; note that
6335          // _threshold cannot possibly exceed end_card_addr, anyhow.
6336          // This prevents future clearing of the card as the scan proceeds
6337          // to the right.
6338          assert(_threshold <= end_card_addr,
6339                 "Because we are just scanning into this object");
6340          if (_threshold < end_card_addr) {
6341            _threshold = end_card_addr;
6342          }
6343          if (p->klass_or_null_acquire() != NULL) {
6344            // Redirty the range of cards...
6345            _mut->mark_range(redirty_range);
6346          } // ...else the setting of klass will dirty the card anyway.
6347        }
6348      DEBUG_ONLY(})
6349      return true;
6350    }
6351  }
6352  scanOopsInOop(addr);
6353  return true;
6354}
6355
6356// We take a break if we've been at this for a while,
6357// so as to avoid monopolizing the locks involved.
6358void MarkFromRootsClosure::do_yield_work() {
6359  // First give up the locks, then yield, then re-lock
6360  // We should probably use a constructor/destructor idiom to
6361  // do this unlock/lock or modify the MutexUnlocker class to
6362  // serve our purpose. XXX
6363  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6364         "CMS thread should hold CMS token");
6365  assert_lock_strong(_bitMap->lock());
6366  _bitMap->lock()->unlock();
6367  ConcurrentMarkSweepThread::desynchronize(true);
6368  _collector->stopTimer();
6369  _collector->incrementYields();
6370
6371  // See the comment in coordinator_yield()
6372  for (unsigned i = 0; i < CMSYieldSleepCount &&
6373                       ConcurrentMarkSweepThread::should_yield() &&
6374                       !CMSCollector::foregroundGCIsActive(); ++i) {
6375    os::sleep(Thread::current(), 1, false);
6376  }
6377
6378  ConcurrentMarkSweepThread::synchronize(true);
6379  _bitMap->lock()->lock_without_safepoint_check();
6380  _collector->startTimer();
6381}
6382
6383void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6384  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6385  assert(_markStack->isEmpty(),
6386         "should drain stack to limit stack usage");
6387  // convert ptr to an oop preparatory to scanning
6388  oop obj = oop(ptr);
6389  // Ignore mark word in verification below, since we
6390  // may be running concurrent with mutators.
6391  assert(obj->is_oop(true), "should be an oop");
6392  assert(_finger <= ptr, "_finger runneth ahead");
6393  // advance the finger to right end of this object
6394  _finger = ptr + obj->size();
6395  assert(_finger > ptr, "we just incremented it above");
6396  // On large heaps, it may take us some time to get through
6397  // the marking phase. During
6398  // this time it's possible that a lot of mutations have
6399  // accumulated in the card table and the mod union table --
6400  // these mutation records are redundant until we have
6401  // actually traced into the corresponding card.
6402  // Here, we check whether advancing the finger would make
6403  // us cross into a new card, and if so clear corresponding
6404  // cards in the MUT (preclean them in the card-table in the
6405  // future).
6406
6407  DEBUG_ONLY(if (!_verifying) {)
6408    // The clean-on-enter optimization is disabled by default,
6409    // until we fix 6178663.
6410    if (CMSCleanOnEnter && (_finger > _threshold)) {
6411      // [_threshold, _finger) represents the interval
6412      // of cards to be cleared  in MUT (or precleaned in card table).
6413      // The set of cards to be cleared is all those that overlap
6414      // with the interval [_threshold, _finger); note that
6415      // _threshold is always kept card-aligned but _finger isn't
6416      // always card-aligned.
6417      HeapWord* old_threshold = _threshold;
6418      assert(old_threshold == (HeapWord*)round_to(
6419              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6420             "_threshold should always be card-aligned");
6421      _threshold = (HeapWord*)round_to(
6422                     (intptr_t)_finger, CardTableModRefBS::card_size);
6423      MemRegion mr(old_threshold, _threshold);
6424      assert(!mr.is_empty(), "Control point invariant");
6425      assert(_span.contains(mr), "Should clear within span");
6426      _mut->clear_range(mr);
6427    }
6428  DEBUG_ONLY(})
6429  // Note: the finger doesn't advance while we drain
6430  // the stack below.
6431  PushOrMarkClosure pushOrMarkClosure(_collector,
6432                                      _span, _bitMap, _markStack,
6433                                      _finger, this);
6434  bool res = _markStack->push(obj);
6435  assert(res, "Empty non-zero size stack should have space for single push");
6436  while (!_markStack->isEmpty()) {
6437    oop new_oop = _markStack->pop();
6438    // Skip verifying header mark word below because we are
6439    // running concurrent with mutators.
6440    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6441    // now scan this oop's oops
6442    new_oop->oop_iterate(&pushOrMarkClosure);
6443    do_yield_check();
6444  }
6445  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6446}
6447
6448ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6449                       CMSCollector* collector, MemRegion span,
6450                       CMSBitMap* bit_map,
6451                       OopTaskQueue* work_queue,
6452                       CMSMarkStack*  overflow_stack):
6453  _collector(collector),
6454  _whole_span(collector->_span),
6455  _span(span),
6456  _bit_map(bit_map),
6457  _mut(&collector->_modUnionTable),
6458  _work_queue(work_queue),
6459  _overflow_stack(overflow_stack),
6460  _skip_bits(0),
6461  _task(task)
6462{
6463  assert(_work_queue->size() == 0, "work_queue should be empty");
6464  _finger = span.start();
6465  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6466  assert(_span.contains(_finger), "Out of bounds _finger?");
6467}
6468
6469// Should revisit to see if this should be restructured for
6470// greater efficiency.
6471bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6472  if (_skip_bits > 0) {
6473    _skip_bits--;
6474    return true;
6475  }
6476  // convert offset into a HeapWord*
6477  HeapWord* addr = _bit_map->startWord() + offset;
6478  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6479         "address out of range");
6480  assert(_bit_map->isMarked(addr), "tautology");
6481  if (_bit_map->isMarked(addr+1)) {
6482    // this is an allocated object that might not yet be initialized
6483    assert(_skip_bits == 0, "tautology");
6484    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6485    oop p = oop(addr);
6486    if (p->klass_or_null_acquire() == NULL) {
6487      // in the case of Clean-on-Enter optimization, redirty card
6488      // and avoid clearing card by increasing  the threshold.
6489      return true;
6490    }
6491  }
6492  scan_oops_in_oop(addr);
6493  return true;
6494}
6495
6496void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6497  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6498  // Should we assert that our work queue is empty or
6499  // below some drain limit?
6500  assert(_work_queue->size() == 0,
6501         "should drain stack to limit stack usage");
6502  // convert ptr to an oop preparatory to scanning
6503  oop obj = oop(ptr);
6504  // Ignore mark word in verification below, since we
6505  // may be running concurrent with mutators.
6506  assert(obj->is_oop(true), "should be an oop");
6507  assert(_finger <= ptr, "_finger runneth ahead");
6508  // advance the finger to right end of this object
6509  _finger = ptr + obj->size();
6510  assert(_finger > ptr, "we just incremented it above");
6511  // On large heaps, it may take us some time to get through
6512  // the marking phase. During
6513  // this time it's possible that a lot of mutations have
6514  // accumulated in the card table and the mod union table --
6515  // these mutation records are redundant until we have
6516  // actually traced into the corresponding card.
6517  // Here, we check whether advancing the finger would make
6518  // us cross into a new card, and if so clear corresponding
6519  // cards in the MUT (preclean them in the card-table in the
6520  // future).
6521
6522  // The clean-on-enter optimization is disabled by default,
6523  // until we fix 6178663.
6524  if (CMSCleanOnEnter && (_finger > _threshold)) {
6525    // [_threshold, _finger) represents the interval
6526    // of cards to be cleared  in MUT (or precleaned in card table).
6527    // The set of cards to be cleared is all those that overlap
6528    // with the interval [_threshold, _finger); note that
6529    // _threshold is always kept card-aligned but _finger isn't
6530    // always card-aligned.
6531    HeapWord* old_threshold = _threshold;
6532    assert(old_threshold == (HeapWord*)round_to(
6533            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6534           "_threshold should always be card-aligned");
6535    _threshold = (HeapWord*)round_to(
6536                   (intptr_t)_finger, CardTableModRefBS::card_size);
6537    MemRegion mr(old_threshold, _threshold);
6538    assert(!mr.is_empty(), "Control point invariant");
6539    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6540    _mut->clear_range(mr);
6541  }
6542
6543  // Note: the local finger doesn't advance while we drain
6544  // the stack below, but the global finger sure can and will.
6545  HeapWord* volatile* gfa = _task->global_finger_addr();
6546  ParPushOrMarkClosure pushOrMarkClosure(_collector,
6547                                         _span, _bit_map,
6548                                         _work_queue,
6549                                         _overflow_stack,
6550                                         _finger,
6551                                         gfa, this);
6552  bool res = _work_queue->push(obj);   // overflow could occur here
6553  assert(res, "Will hold once we use workqueues");
6554  while (true) {
6555    oop new_oop;
6556    if (!_work_queue->pop_local(new_oop)) {
6557      // We emptied our work_queue; check if there's stuff that can
6558      // be gotten from the overflow stack.
6559      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6560            _overflow_stack, _work_queue)) {
6561        do_yield_check();
6562        continue;
6563      } else {  // done
6564        break;
6565      }
6566    }
6567    // Skip verifying header mark word below because we are
6568    // running concurrent with mutators.
6569    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6570    // now scan this oop's oops
6571    new_oop->oop_iterate(&pushOrMarkClosure);
6572    do_yield_check();
6573  }
6574  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6575}
6576
6577// Yield in response to a request from VM Thread or
6578// from mutators.
6579void ParMarkFromRootsClosure::do_yield_work() {
6580  assert(_task != NULL, "sanity");
6581  _task->yield();
6582}
6583
6584// A variant of the above used for verifying CMS marking work.
6585MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6586                        MemRegion span,
6587                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6588                        CMSMarkStack*  mark_stack):
6589  _collector(collector),
6590  _span(span),
6591  _verification_bm(verification_bm),
6592  _cms_bm(cms_bm),
6593  _mark_stack(mark_stack),
6594  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6595                      mark_stack)
6596{
6597  assert(_mark_stack->isEmpty(), "stack should be empty");
6598  _finger = _verification_bm->startWord();
6599  assert(_collector->_restart_addr == NULL, "Sanity check");
6600  assert(_span.contains(_finger), "Out of bounds _finger?");
6601}
6602
6603void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6604  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6605  assert(_span.contains(addr), "Out of bounds _finger?");
6606  _finger = addr;
6607}
6608
6609// Should revisit to see if this should be restructured for
6610// greater efficiency.
6611bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6612  // convert offset into a HeapWord*
6613  HeapWord* addr = _verification_bm->startWord() + offset;
6614  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6615         "address out of range");
6616  assert(_verification_bm->isMarked(addr), "tautology");
6617  assert(_cms_bm->isMarked(addr), "tautology");
6618
6619  assert(_mark_stack->isEmpty(),
6620         "should drain stack to limit stack usage");
6621  // convert addr to an oop preparatory to scanning
6622  oop obj = oop(addr);
6623  assert(obj->is_oop(), "should be an oop");
6624  assert(_finger <= addr, "_finger runneth ahead");
6625  // advance the finger to right end of this object
6626  _finger = addr + obj->size();
6627  assert(_finger > addr, "we just incremented it above");
6628  // Note: the finger doesn't advance while we drain
6629  // the stack below.
6630  bool res = _mark_stack->push(obj);
6631  assert(res, "Empty non-zero size stack should have space for single push");
6632  while (!_mark_stack->isEmpty()) {
6633    oop new_oop = _mark_stack->pop();
6634    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6635    // now scan this oop's oops
6636    new_oop->oop_iterate(&_pam_verify_closure);
6637  }
6638  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6639  return true;
6640}
6641
6642PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6643  CMSCollector* collector, MemRegion span,
6644  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6645  CMSMarkStack*  mark_stack):
6646  MetadataAwareOopClosure(collector->ref_processor()),
6647  _collector(collector),
6648  _span(span),
6649  _verification_bm(verification_bm),
6650  _cms_bm(cms_bm),
6651  _mark_stack(mark_stack)
6652{ }
6653
6654void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6655void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6656
6657// Upon stack overflow, we discard (part of) the stack,
6658// remembering the least address amongst those discarded
6659// in CMSCollector's _restart_address.
6660void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6661  // Remember the least grey address discarded
6662  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6663  _collector->lower_restart_addr(ra);
6664  _mark_stack->reset();  // discard stack contents
6665  _mark_stack->expand(); // expand the stack if possible
6666}
6667
6668void PushAndMarkVerifyClosure::do_oop(oop obj) {
6669  assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6670  HeapWord* addr = (HeapWord*)obj;
6671  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6672    // Oop lies in _span and isn't yet grey or black
6673    _verification_bm->mark(addr);            // now grey
6674    if (!_cms_bm->isMarked(addr)) {
6675      Log(gc, verify) log;
6676      ResourceMark rm;
6677      oop(addr)->print_on(log.error_stream());
6678      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6679      fatal("... aborting");
6680    }
6681
6682    if (!_mark_stack->push(obj)) { // stack overflow
6683      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6684      assert(_mark_stack->isFull(), "Else push should have succeeded");
6685      handle_stack_overflow(addr);
6686    }
6687    // anything including and to the right of _finger
6688    // will be scanned as we iterate over the remainder of the
6689    // bit map
6690  }
6691}
6692
6693PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6694                     MemRegion span,
6695                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6696                     HeapWord* finger, MarkFromRootsClosure* parent) :
6697  MetadataAwareOopClosure(collector->ref_processor()),
6698  _collector(collector),
6699  _span(span),
6700  _bitMap(bitMap),
6701  _markStack(markStack),
6702  _finger(finger),
6703  _parent(parent)
6704{ }
6705
6706ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6707                                           MemRegion span,
6708                                           CMSBitMap* bit_map,
6709                                           OopTaskQueue* work_queue,
6710                                           CMSMarkStack*  overflow_stack,
6711                                           HeapWord* finger,
6712                                           HeapWord* volatile* global_finger_addr,
6713                                           ParMarkFromRootsClosure* parent) :
6714  MetadataAwareOopClosure(collector->ref_processor()),
6715  _collector(collector),
6716  _whole_span(collector->_span),
6717  _span(span),
6718  _bit_map(bit_map),
6719  _work_queue(work_queue),
6720  _overflow_stack(overflow_stack),
6721  _finger(finger),
6722  _global_finger_addr(global_finger_addr),
6723  _parent(parent)
6724{ }
6725
6726// Assumes thread-safe access by callers, who are
6727// responsible for mutual exclusion.
6728void CMSCollector::lower_restart_addr(HeapWord* low) {
6729  assert(_span.contains(low), "Out of bounds addr");
6730  if (_restart_addr == NULL) {
6731    _restart_addr = low;
6732  } else {
6733    _restart_addr = MIN2(_restart_addr, low);
6734  }
6735}
6736
6737// Upon stack overflow, we discard (part of) the stack,
6738// remembering the least address amongst those discarded
6739// in CMSCollector's _restart_address.
6740void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6741  // Remember the least grey address discarded
6742  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6743  _collector->lower_restart_addr(ra);
6744  _markStack->reset();  // discard stack contents
6745  _markStack->expand(); // expand the stack if possible
6746}
6747
6748// Upon stack overflow, we discard (part of) the stack,
6749// remembering the least address amongst those discarded
6750// in CMSCollector's _restart_address.
6751void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6752  // We need to do this under a mutex to prevent other
6753  // workers from interfering with the work done below.
6754  MutexLockerEx ml(_overflow_stack->par_lock(),
6755                   Mutex::_no_safepoint_check_flag);
6756  // Remember the least grey address discarded
6757  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6758  _collector->lower_restart_addr(ra);
6759  _overflow_stack->reset();  // discard stack contents
6760  _overflow_stack->expand(); // expand the stack if possible
6761}
6762
6763void PushOrMarkClosure::do_oop(oop obj) {
6764  // Ignore mark word because we are running concurrent with mutators.
6765  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6766  HeapWord* addr = (HeapWord*)obj;
6767  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6768    // Oop lies in _span and isn't yet grey or black
6769    _bitMap->mark(addr);            // now grey
6770    if (addr < _finger) {
6771      // the bit map iteration has already either passed, or
6772      // sampled, this bit in the bit map; we'll need to
6773      // use the marking stack to scan this oop's oops.
6774      bool simulate_overflow = false;
6775      NOT_PRODUCT(
6776        if (CMSMarkStackOverflowALot &&
6777            _collector->simulate_overflow()) {
6778          // simulate a stack overflow
6779          simulate_overflow = true;
6780        }
6781      )
6782      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6783        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6784        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6785        handle_stack_overflow(addr);
6786      }
6787    }
6788    // anything including and to the right of _finger
6789    // will be scanned as we iterate over the remainder of the
6790    // bit map
6791    do_yield_check();
6792  }
6793}
6794
6795void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6796void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6797
6798void ParPushOrMarkClosure::do_oop(oop obj) {
6799  // Ignore mark word because we are running concurrent with mutators.
6800  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6801  HeapWord* addr = (HeapWord*)obj;
6802  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6803    // Oop lies in _span and isn't yet grey or black
6804    // We read the global_finger (volatile read) strictly after marking oop
6805    bool res = _bit_map->par_mark(addr);    // now grey
6806    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6807    // Should we push this marked oop on our stack?
6808    // -- if someone else marked it, nothing to do
6809    // -- if target oop is above global finger nothing to do
6810    // -- if target oop is in chunk and above local finger
6811    //      then nothing to do
6812    // -- else push on work queue
6813    if (   !res       // someone else marked it, they will deal with it
6814        || (addr >= *gfa)  // will be scanned in a later task
6815        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6816      return;
6817    }
6818    // the bit map iteration has already either passed, or
6819    // sampled, this bit in the bit map; we'll need to
6820    // use the marking stack to scan this oop's oops.
6821    bool simulate_overflow = false;
6822    NOT_PRODUCT(
6823      if (CMSMarkStackOverflowALot &&
6824          _collector->simulate_overflow()) {
6825        // simulate a stack overflow
6826        simulate_overflow = true;
6827      }
6828    )
6829    if (simulate_overflow ||
6830        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6831      // stack overflow
6832      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6833      // We cannot assert that the overflow stack is full because
6834      // it may have been emptied since.
6835      assert(simulate_overflow ||
6836             _work_queue->size() == _work_queue->max_elems(),
6837            "Else push should have succeeded");
6838      handle_stack_overflow(addr);
6839    }
6840    do_yield_check();
6841  }
6842}
6843
6844void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6845void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6846
6847PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6848                                       MemRegion span,
6849                                       ReferenceProcessor* rp,
6850                                       CMSBitMap* bit_map,
6851                                       CMSBitMap* mod_union_table,
6852                                       CMSMarkStack*  mark_stack,
6853                                       bool           concurrent_precleaning):
6854  MetadataAwareOopClosure(rp),
6855  _collector(collector),
6856  _span(span),
6857  _bit_map(bit_map),
6858  _mod_union_table(mod_union_table),
6859  _mark_stack(mark_stack),
6860  _concurrent_precleaning(concurrent_precleaning)
6861{
6862  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6863}
6864
6865// Grey object rescan during pre-cleaning and second checkpoint phases --
6866// the non-parallel version (the parallel version appears further below.)
6867void PushAndMarkClosure::do_oop(oop obj) {
6868  // Ignore mark word verification. If during concurrent precleaning,
6869  // the object monitor may be locked. If during the checkpoint
6870  // phases, the object may already have been reached by a  different
6871  // path and may be at the end of the global overflow list (so
6872  // the mark word may be NULL).
6873  assert(obj->is_oop_or_null(true /* ignore mark word */),
6874         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6875  HeapWord* addr = (HeapWord*)obj;
6876  // Check if oop points into the CMS generation
6877  // and is not marked
6878  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6879    // a white object ...
6880    _bit_map->mark(addr);         // ... now grey
6881    // push on the marking stack (grey set)
6882    bool simulate_overflow = false;
6883    NOT_PRODUCT(
6884      if (CMSMarkStackOverflowALot &&
6885          _collector->simulate_overflow()) {
6886        // simulate a stack overflow
6887        simulate_overflow = true;
6888      }
6889    )
6890    if (simulate_overflow || !_mark_stack->push(obj)) {
6891      if (_concurrent_precleaning) {
6892         // During precleaning we can just dirty the appropriate card(s)
6893         // in the mod union table, thus ensuring that the object remains
6894         // in the grey set  and continue. In the case of object arrays
6895         // we need to dirty all of the cards that the object spans,
6896         // since the rescan of object arrays will be limited to the
6897         // dirty cards.
6898         // Note that no one can be interfering with us in this action
6899         // of dirtying the mod union table, so no locking or atomics
6900         // are required.
6901         if (obj->is_objArray()) {
6902           size_t sz = obj->size();
6903           HeapWord* end_card_addr = (HeapWord*)round_to(
6904                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6905           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6906           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6907           _mod_union_table->mark_range(redirty_range);
6908         } else {
6909           _mod_union_table->mark(addr);
6910         }
6911         _collector->_ser_pmc_preclean_ovflw++;
6912      } else {
6913         // During the remark phase, we need to remember this oop
6914         // in the overflow list.
6915         _collector->push_on_overflow_list(obj);
6916         _collector->_ser_pmc_remark_ovflw++;
6917      }
6918    }
6919  }
6920}
6921
6922ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6923                                             MemRegion span,
6924                                             ReferenceProcessor* rp,
6925                                             CMSBitMap* bit_map,
6926                                             OopTaskQueue* work_queue):
6927  MetadataAwareOopClosure(rp),
6928  _collector(collector),
6929  _span(span),
6930  _bit_map(bit_map),
6931  _work_queue(work_queue)
6932{
6933  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6934}
6935
6936void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6937void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6938
6939// Grey object rescan during second checkpoint phase --
6940// the parallel version.
6941void ParPushAndMarkClosure::do_oop(oop obj) {
6942  // In the assert below, we ignore the mark word because
6943  // this oop may point to an already visited object that is
6944  // on the overflow stack (in which case the mark word has
6945  // been hijacked for chaining into the overflow stack --
6946  // if this is the last object in the overflow stack then
6947  // its mark word will be NULL). Because this object may
6948  // have been subsequently popped off the global overflow
6949  // stack, and the mark word possibly restored to the prototypical
6950  // value, by the time we get to examined this failing assert in
6951  // the debugger, is_oop_or_null(false) may subsequently start
6952  // to hold.
6953  assert(obj->is_oop_or_null(true),
6954         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6955  HeapWord* addr = (HeapWord*)obj;
6956  // Check if oop points into the CMS generation
6957  // and is not marked
6958  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6959    // a white object ...
6960    // If we manage to "claim" the object, by being the
6961    // first thread to mark it, then we push it on our
6962    // marking stack
6963    if (_bit_map->par_mark(addr)) {     // ... now grey
6964      // push on work queue (grey set)
6965      bool simulate_overflow = false;
6966      NOT_PRODUCT(
6967        if (CMSMarkStackOverflowALot &&
6968            _collector->par_simulate_overflow()) {
6969          // simulate a stack overflow
6970          simulate_overflow = true;
6971        }
6972      )
6973      if (simulate_overflow || !_work_queue->push(obj)) {
6974        _collector->par_push_on_overflow_list(obj);
6975        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6976      }
6977    } // Else, some other thread got there first
6978  }
6979}
6980
6981void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6982void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6983
6984void CMSPrecleanRefsYieldClosure::do_yield_work() {
6985  Mutex* bml = _collector->bitMapLock();
6986  assert_lock_strong(bml);
6987  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6988         "CMS thread should hold CMS token");
6989
6990  bml->unlock();
6991  ConcurrentMarkSweepThread::desynchronize(true);
6992
6993  _collector->stopTimer();
6994  _collector->incrementYields();
6995
6996  // See the comment in coordinator_yield()
6997  for (unsigned i = 0; i < CMSYieldSleepCount &&
6998                       ConcurrentMarkSweepThread::should_yield() &&
6999                       !CMSCollector::foregroundGCIsActive(); ++i) {
7000    os::sleep(Thread::current(), 1, false);
7001  }
7002
7003  ConcurrentMarkSweepThread::synchronize(true);
7004  bml->lock();
7005
7006  _collector->startTimer();
7007}
7008
7009bool CMSPrecleanRefsYieldClosure::should_return() {
7010  if (ConcurrentMarkSweepThread::should_yield()) {
7011    do_yield_work();
7012  }
7013  return _collector->foregroundGCIsActive();
7014}
7015
7016void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7017  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7018         "mr should be aligned to start at a card boundary");
7019  // We'd like to assert:
7020  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7021  //        "mr should be a range of cards");
7022  // However, that would be too strong in one case -- the last
7023  // partition ends at _unallocated_block which, in general, can be
7024  // an arbitrary boundary, not necessarily card aligned.
7025  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7026  _space->object_iterate_mem(mr, &_scan_cl);
7027}
7028
7029SweepClosure::SweepClosure(CMSCollector* collector,
7030                           ConcurrentMarkSweepGeneration* g,
7031                           CMSBitMap* bitMap, bool should_yield) :
7032  _collector(collector),
7033  _g(g),
7034  _sp(g->cmsSpace()),
7035  _limit(_sp->sweep_limit()),
7036  _freelistLock(_sp->freelistLock()),
7037  _bitMap(bitMap),
7038  _yield(should_yield),
7039  _inFreeRange(false),           // No free range at beginning of sweep
7040  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7041  _lastFreeRangeCoalesced(false),
7042  _freeFinger(g->used_region().start())
7043{
7044  NOT_PRODUCT(
7045    _numObjectsFreed = 0;
7046    _numWordsFreed   = 0;
7047    _numObjectsLive = 0;
7048    _numWordsLive = 0;
7049    _numObjectsAlreadyFree = 0;
7050    _numWordsAlreadyFree = 0;
7051    _last_fc = NULL;
7052
7053    _sp->initializeIndexedFreeListArrayReturnedBytes();
7054    _sp->dictionary()->initialize_dict_returned_bytes();
7055  )
7056  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7057         "sweep _limit out of bounds");
7058  log_develop_trace(gc, sweep)("====================");
7059  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7060}
7061
7062void SweepClosure::print_on(outputStream* st) const {
7063  st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7064               p2i(_sp->bottom()), p2i(_sp->end()));
7065  st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7066  st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7067  NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7068  st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7069               _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7070}
7071
7072#ifndef PRODUCT
7073// Assertion checking only:  no useful work in product mode --
7074// however, if any of the flags below become product flags,
7075// you may need to review this code to see if it needs to be
7076// enabled in product mode.
7077SweepClosure::~SweepClosure() {
7078  assert_lock_strong(_freelistLock);
7079  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7080         "sweep _limit out of bounds");
7081  if (inFreeRange()) {
7082    Log(gc, sweep) log;
7083    log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7084    ResourceMark rm;
7085    print_on(log.error_stream());
7086    ShouldNotReachHere();
7087  }
7088
7089  if (log_is_enabled(Debug, gc, sweep)) {
7090    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7091                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7092    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7093                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7094    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7095    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7096  }
7097
7098  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7099    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7100    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7101    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7102    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7103                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7104  }
7105  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7106  log_develop_trace(gc, sweep)("================");
7107}
7108#endif  // PRODUCT
7109
7110void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7111    bool freeRangeInFreeLists) {
7112  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7113                               p2i(freeFinger), freeRangeInFreeLists);
7114  assert(!inFreeRange(), "Trampling existing free range");
7115  set_inFreeRange(true);
7116  set_lastFreeRangeCoalesced(false);
7117
7118  set_freeFinger(freeFinger);
7119  set_freeRangeInFreeLists(freeRangeInFreeLists);
7120  if (CMSTestInFreeList) {
7121    if (freeRangeInFreeLists) {
7122      FreeChunk* fc = (FreeChunk*) freeFinger;
7123      assert(fc->is_free(), "A chunk on the free list should be free.");
7124      assert(fc->size() > 0, "Free range should have a size");
7125      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7126    }
7127  }
7128}
7129
7130// Note that the sweeper runs concurrently with mutators. Thus,
7131// it is possible for direct allocation in this generation to happen
7132// in the middle of the sweep. Note that the sweeper also coalesces
7133// contiguous free blocks. Thus, unless the sweeper and the allocator
7134// synchronize appropriately freshly allocated blocks may get swept up.
7135// This is accomplished by the sweeper locking the free lists while
7136// it is sweeping. Thus blocks that are determined to be free are
7137// indeed free. There is however one additional complication:
7138// blocks that have been allocated since the final checkpoint and
7139// mark, will not have been marked and so would be treated as
7140// unreachable and swept up. To prevent this, the allocator marks
7141// the bit map when allocating during the sweep phase. This leads,
7142// however, to a further complication -- objects may have been allocated
7143// but not yet initialized -- in the sense that the header isn't yet
7144// installed. The sweeper can not then determine the size of the block
7145// in order to skip over it. To deal with this case, we use a technique
7146// (due to Printezis) to encode such uninitialized block sizes in the
7147// bit map. Since the bit map uses a bit per every HeapWord, but the
7148// CMS generation has a minimum object size of 3 HeapWords, it follows
7149// that "normal marks" won't be adjacent in the bit map (there will
7150// always be at least two 0 bits between successive 1 bits). We make use
7151// of these "unused" bits to represent uninitialized blocks -- the bit
7152// corresponding to the start of the uninitialized object and the next
7153// bit are both set. Finally, a 1 bit marks the end of the object that
7154// started with the two consecutive 1 bits to indicate its potentially
7155// uninitialized state.
7156
7157size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7158  FreeChunk* fc = (FreeChunk*)addr;
7159  size_t res;
7160
7161  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7162  // than "addr == _limit" because although _limit was a block boundary when
7163  // we started the sweep, it may no longer be one because heap expansion
7164  // may have caused us to coalesce the block ending at the address _limit
7165  // with a newly expanded chunk (this happens when _limit was set to the
7166  // previous _end of the space), so we may have stepped past _limit:
7167  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7168  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7169    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7170           "sweep _limit out of bounds");
7171    assert(addr < _sp->end(), "addr out of bounds");
7172    // Flush any free range we might be holding as a single
7173    // coalesced chunk to the appropriate free list.
7174    if (inFreeRange()) {
7175      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7176             "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7177      flush_cur_free_chunk(freeFinger(),
7178                           pointer_delta(addr, freeFinger()));
7179      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7180                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7181                                   lastFreeRangeCoalesced() ? 1 : 0);
7182    }
7183
7184    // help the iterator loop finish
7185    return pointer_delta(_sp->end(), addr);
7186  }
7187
7188  assert(addr < _limit, "sweep invariant");
7189  // check if we should yield
7190  do_yield_check(addr);
7191  if (fc->is_free()) {
7192    // Chunk that is already free
7193    res = fc->size();
7194    do_already_free_chunk(fc);
7195    debug_only(_sp->verifyFreeLists());
7196    // If we flush the chunk at hand in lookahead_and_flush()
7197    // and it's coalesced with a preceding chunk, then the
7198    // process of "mangling" the payload of the coalesced block
7199    // will cause erasure of the size information from the
7200    // (erstwhile) header of all the coalesced blocks but the
7201    // first, so the first disjunct in the assert will not hold
7202    // in that specific case (in which case the second disjunct
7203    // will hold).
7204    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7205           "Otherwise the size info doesn't change at this step");
7206    NOT_PRODUCT(
7207      _numObjectsAlreadyFree++;
7208      _numWordsAlreadyFree += res;
7209    )
7210    NOT_PRODUCT(_last_fc = fc;)
7211  } else if (!_bitMap->isMarked(addr)) {
7212    // Chunk is fresh garbage
7213    res = do_garbage_chunk(fc);
7214    debug_only(_sp->verifyFreeLists());
7215    NOT_PRODUCT(
7216      _numObjectsFreed++;
7217      _numWordsFreed += res;
7218    )
7219  } else {
7220    // Chunk that is alive.
7221    res = do_live_chunk(fc);
7222    debug_only(_sp->verifyFreeLists());
7223    NOT_PRODUCT(
7224        _numObjectsLive++;
7225        _numWordsLive += res;
7226    )
7227  }
7228  return res;
7229}
7230
7231// For the smart allocation, record following
7232//  split deaths - a free chunk is removed from its free list because
7233//      it is being split into two or more chunks.
7234//  split birth - a free chunk is being added to its free list because
7235//      a larger free chunk has been split and resulted in this free chunk.
7236//  coal death - a free chunk is being removed from its free list because
7237//      it is being coalesced into a large free chunk.
7238//  coal birth - a free chunk is being added to its free list because
7239//      it was created when two or more free chunks where coalesced into
7240//      this free chunk.
7241//
7242// These statistics are used to determine the desired number of free
7243// chunks of a given size.  The desired number is chosen to be relative
7244// to the end of a CMS sweep.  The desired number at the end of a sweep
7245// is the
7246//      count-at-end-of-previous-sweep (an amount that was enough)
7247//              - count-at-beginning-of-current-sweep  (the excess)
7248//              + split-births  (gains in this size during interval)
7249//              - split-deaths  (demands on this size during interval)
7250// where the interval is from the end of one sweep to the end of the
7251// next.
7252//
7253// When sweeping the sweeper maintains an accumulated chunk which is
7254// the chunk that is made up of chunks that have been coalesced.  That
7255// will be termed the left-hand chunk.  A new chunk of garbage that
7256// is being considered for coalescing will be referred to as the
7257// right-hand chunk.
7258//
7259// When making a decision on whether to coalesce a right-hand chunk with
7260// the current left-hand chunk, the current count vs. the desired count
7261// of the left-hand chunk is considered.  Also if the right-hand chunk
7262// is near the large chunk at the end of the heap (see
7263// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7264// left-hand chunk is coalesced.
7265//
7266// When making a decision about whether to split a chunk, the desired count
7267// vs. the current count of the candidate to be split is also considered.
7268// If the candidate is underpopulated (currently fewer chunks than desired)
7269// a chunk of an overpopulated (currently more chunks than desired) size may
7270// be chosen.  The "hint" associated with a free list, if non-null, points
7271// to a free list which may be overpopulated.
7272//
7273
7274void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7275  const size_t size = fc->size();
7276  // Chunks that cannot be coalesced are not in the
7277  // free lists.
7278  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7279    assert(_sp->verify_chunk_in_free_list(fc),
7280           "free chunk should be in free lists");
7281  }
7282  // a chunk that is already free, should not have been
7283  // marked in the bit map
7284  HeapWord* const addr = (HeapWord*) fc;
7285  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7286  // Verify that the bit map has no bits marked between
7287  // addr and purported end of this block.
7288  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7289
7290  // Some chunks cannot be coalesced under any circumstances.
7291  // See the definition of cantCoalesce().
7292  if (!fc->cantCoalesce()) {
7293    // This chunk can potentially be coalesced.
7294    // All the work is done in
7295    do_post_free_or_garbage_chunk(fc, size);
7296    // Note that if the chunk is not coalescable (the else arm
7297    // below), we unconditionally flush, without needing to do
7298    // a "lookahead," as we do below.
7299    if (inFreeRange()) lookahead_and_flush(fc, size);
7300  } else {
7301    // Code path common to both original and adaptive free lists.
7302
7303    // cant coalesce with previous block; this should be treated
7304    // as the end of a free run if any
7305    if (inFreeRange()) {
7306      // we kicked some butt; time to pick up the garbage
7307      assert(freeFinger() < addr, "freeFinger points too high");
7308      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7309    }
7310    // else, nothing to do, just continue
7311  }
7312}
7313
7314size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7315  // This is a chunk of garbage.  It is not in any free list.
7316  // Add it to a free list or let it possibly be coalesced into
7317  // a larger chunk.
7318  HeapWord* const addr = (HeapWord*) fc;
7319  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7320
7321  // Verify that the bit map has no bits marked between
7322  // addr and purported end of just dead object.
7323  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7324  do_post_free_or_garbage_chunk(fc, size);
7325
7326  assert(_limit >= addr + size,
7327         "A freshly garbage chunk can't possibly straddle over _limit");
7328  if (inFreeRange()) lookahead_and_flush(fc, size);
7329  return size;
7330}
7331
7332size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7333  HeapWord* addr = (HeapWord*) fc;
7334  // The sweeper has just found a live object. Return any accumulated
7335  // left hand chunk to the free lists.
7336  if (inFreeRange()) {
7337    assert(freeFinger() < addr, "freeFinger points too high");
7338    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7339  }
7340
7341  // This object is live: we'd normally expect this to be
7342  // an oop, and like to assert the following:
7343  // assert(oop(addr)->is_oop(), "live block should be an oop");
7344  // However, as we commented above, this may be an object whose
7345  // header hasn't yet been initialized.
7346  size_t size;
7347  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7348  if (_bitMap->isMarked(addr + 1)) {
7349    // Determine the size from the bit map, rather than trying to
7350    // compute it from the object header.
7351    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7352    size = pointer_delta(nextOneAddr + 1, addr);
7353    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7354           "alignment problem");
7355
7356#ifdef ASSERT
7357      if (oop(addr)->klass_or_null_acquire() != NULL) {
7358        // Ignore mark word because we are running concurrent with mutators
7359        assert(oop(addr)->is_oop(true), "live block should be an oop");
7360        assert(size ==
7361               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7362               "P-mark and computed size do not agree");
7363      }
7364#endif
7365
7366  } else {
7367    // This should be an initialized object that's alive.
7368    assert(oop(addr)->klass_or_null_acquire() != NULL,
7369           "Should be an initialized object");
7370    // Ignore mark word because we are running concurrent with mutators
7371    assert(oop(addr)->is_oop(true), "live block should be an oop");
7372    // Verify that the bit map has no bits marked between
7373    // addr and purported end of this block.
7374    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7375    assert(size >= 3, "Necessary for Printezis marks to work");
7376    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7377    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7378  }
7379  return size;
7380}
7381
7382void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7383                                                 size_t chunkSize) {
7384  // do_post_free_or_garbage_chunk() should only be called in the case
7385  // of the adaptive free list allocator.
7386  const bool fcInFreeLists = fc->is_free();
7387  assert((HeapWord*)fc <= _limit, "sweep invariant");
7388  if (CMSTestInFreeList && fcInFreeLists) {
7389    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7390  }
7391
7392  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7393
7394  HeapWord* const fc_addr = (HeapWord*) fc;
7395
7396  bool coalesce = false;
7397  const size_t left  = pointer_delta(fc_addr, freeFinger());
7398  const size_t right = chunkSize;
7399  switch (FLSCoalescePolicy) {
7400    // numeric value forms a coalition aggressiveness metric
7401    case 0:  { // never coalesce
7402      coalesce = false;
7403      break;
7404    }
7405    case 1: { // coalesce if left & right chunks on overpopulated lists
7406      coalesce = _sp->coalOverPopulated(left) &&
7407                 _sp->coalOverPopulated(right);
7408      break;
7409    }
7410    case 2: { // coalesce if left chunk on overpopulated list (default)
7411      coalesce = _sp->coalOverPopulated(left);
7412      break;
7413    }
7414    case 3: { // coalesce if left OR right chunk on overpopulated list
7415      coalesce = _sp->coalOverPopulated(left) ||
7416                 _sp->coalOverPopulated(right);
7417      break;
7418    }
7419    case 4: { // always coalesce
7420      coalesce = true;
7421      break;
7422    }
7423    default:
7424     ShouldNotReachHere();
7425  }
7426
7427  // Should the current free range be coalesced?
7428  // If the chunk is in a free range and either we decided to coalesce above
7429  // or the chunk is near the large block at the end of the heap
7430  // (isNearLargestChunk() returns true), then coalesce this chunk.
7431  const bool doCoalesce = inFreeRange()
7432                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7433  if (doCoalesce) {
7434    // Coalesce the current free range on the left with the new
7435    // chunk on the right.  If either is on a free list,
7436    // it must be removed from the list and stashed in the closure.
7437    if (freeRangeInFreeLists()) {
7438      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7439      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7440             "Size of free range is inconsistent with chunk size.");
7441      if (CMSTestInFreeList) {
7442        assert(_sp->verify_chunk_in_free_list(ffc),
7443               "Chunk is not in free lists");
7444      }
7445      _sp->coalDeath(ffc->size());
7446      _sp->removeFreeChunkFromFreeLists(ffc);
7447      set_freeRangeInFreeLists(false);
7448    }
7449    if (fcInFreeLists) {
7450      _sp->coalDeath(chunkSize);
7451      assert(fc->size() == chunkSize,
7452        "The chunk has the wrong size or is not in the free lists");
7453      _sp->removeFreeChunkFromFreeLists(fc);
7454    }
7455    set_lastFreeRangeCoalesced(true);
7456    print_free_block_coalesced(fc);
7457  } else {  // not in a free range and/or should not coalesce
7458    // Return the current free range and start a new one.
7459    if (inFreeRange()) {
7460      // In a free range but cannot coalesce with the right hand chunk.
7461      // Put the current free range into the free lists.
7462      flush_cur_free_chunk(freeFinger(),
7463                           pointer_delta(fc_addr, freeFinger()));
7464    }
7465    // Set up for new free range.  Pass along whether the right hand
7466    // chunk is in the free lists.
7467    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7468  }
7469}
7470
7471// Lookahead flush:
7472// If we are tracking a free range, and this is the last chunk that
7473// we'll look at because its end crosses past _limit, we'll preemptively
7474// flush it along with any free range we may be holding on to. Note that
7475// this can be the case only for an already free or freshly garbage
7476// chunk. If this block is an object, it can never straddle
7477// over _limit. The "straddling" occurs when _limit is set at
7478// the previous end of the space when this cycle started, and
7479// a subsequent heap expansion caused the previously co-terminal
7480// free block to be coalesced with the newly expanded portion,
7481// thus rendering _limit a non-block-boundary making it dangerous
7482// for the sweeper to step over and examine.
7483void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7484  assert(inFreeRange(), "Should only be called if currently in a free range.");
7485  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7486  assert(_sp->used_region().contains(eob - 1),
7487         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7488         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7489         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7490         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7491  if (eob >= _limit) {
7492    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7493    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7494                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7495                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
7496                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7497    // Return the storage we are tracking back into the free lists.
7498    log_develop_trace(gc, sweep)("Flushing ... ");
7499    assert(freeFinger() < eob, "Error");
7500    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7501  }
7502}
7503
7504void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7505  assert(inFreeRange(), "Should only be called if currently in a free range.");
7506  assert(size > 0,
7507    "A zero sized chunk cannot be added to the free lists.");
7508  if (!freeRangeInFreeLists()) {
7509    if (CMSTestInFreeList) {
7510      FreeChunk* fc = (FreeChunk*) chunk;
7511      fc->set_size(size);
7512      assert(!_sp->verify_chunk_in_free_list(fc),
7513             "chunk should not be in free lists yet");
7514    }
7515    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7516    // A new free range is going to be starting.  The current
7517    // free range has not been added to the free lists yet or
7518    // was removed so add it back.
7519    // If the current free range was coalesced, then the death
7520    // of the free range was recorded.  Record a birth now.
7521    if (lastFreeRangeCoalesced()) {
7522      _sp->coalBirth(size);
7523    }
7524    _sp->addChunkAndRepairOffsetTable(chunk, size,
7525            lastFreeRangeCoalesced());
7526  } else {
7527    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7528  }
7529  set_inFreeRange(false);
7530  set_freeRangeInFreeLists(false);
7531}
7532
7533// We take a break if we've been at this for a while,
7534// so as to avoid monopolizing the locks involved.
7535void SweepClosure::do_yield_work(HeapWord* addr) {
7536  // Return current free chunk being used for coalescing (if any)
7537  // to the appropriate freelist.  After yielding, the next
7538  // free block encountered will start a coalescing range of
7539  // free blocks.  If the next free block is adjacent to the
7540  // chunk just flushed, they will need to wait for the next
7541  // sweep to be coalesced.
7542  if (inFreeRange()) {
7543    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7544  }
7545
7546  // First give up the locks, then yield, then re-lock.
7547  // We should probably use a constructor/destructor idiom to
7548  // do this unlock/lock or modify the MutexUnlocker class to
7549  // serve our purpose. XXX
7550  assert_lock_strong(_bitMap->lock());
7551  assert_lock_strong(_freelistLock);
7552  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7553         "CMS thread should hold CMS token");
7554  _bitMap->lock()->unlock();
7555  _freelistLock->unlock();
7556  ConcurrentMarkSweepThread::desynchronize(true);
7557  _collector->stopTimer();
7558  _collector->incrementYields();
7559
7560  // See the comment in coordinator_yield()
7561  for (unsigned i = 0; i < CMSYieldSleepCount &&
7562                       ConcurrentMarkSweepThread::should_yield() &&
7563                       !CMSCollector::foregroundGCIsActive(); ++i) {
7564    os::sleep(Thread::current(), 1, false);
7565  }
7566
7567  ConcurrentMarkSweepThread::synchronize(true);
7568  _freelistLock->lock();
7569  _bitMap->lock()->lock_without_safepoint_check();
7570  _collector->startTimer();
7571}
7572
7573#ifndef PRODUCT
7574// This is actually very useful in a product build if it can
7575// be called from the debugger.  Compile it into the product
7576// as needed.
7577bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7578  return debug_cms_space->verify_chunk_in_free_list(fc);
7579}
7580#endif
7581
7582void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7583  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7584                               p2i(fc), fc->size());
7585}
7586
7587// CMSIsAliveClosure
7588bool CMSIsAliveClosure::do_object_b(oop obj) {
7589  HeapWord* addr = (HeapWord*)obj;
7590  return addr != NULL &&
7591         (!_span.contains(addr) || _bit_map->isMarked(addr));
7592}
7593
7594
7595CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7596                      MemRegion span,
7597                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7598                      bool cpc):
7599  _collector(collector),
7600  _span(span),
7601  _bit_map(bit_map),
7602  _mark_stack(mark_stack),
7603  _concurrent_precleaning(cpc) {
7604  assert(!_span.is_empty(), "Empty span could spell trouble");
7605}
7606
7607
7608// CMSKeepAliveClosure: the serial version
7609void CMSKeepAliveClosure::do_oop(oop obj) {
7610  HeapWord* addr = (HeapWord*)obj;
7611  if (_span.contains(addr) &&
7612      !_bit_map->isMarked(addr)) {
7613    _bit_map->mark(addr);
7614    bool simulate_overflow = false;
7615    NOT_PRODUCT(
7616      if (CMSMarkStackOverflowALot &&
7617          _collector->simulate_overflow()) {
7618        // simulate a stack overflow
7619        simulate_overflow = true;
7620      }
7621    )
7622    if (simulate_overflow || !_mark_stack->push(obj)) {
7623      if (_concurrent_precleaning) {
7624        // We dirty the overflown object and let the remark
7625        // phase deal with it.
7626        assert(_collector->overflow_list_is_empty(), "Error");
7627        // In the case of object arrays, we need to dirty all of
7628        // the cards that the object spans. No locking or atomics
7629        // are needed since no one else can be mutating the mod union
7630        // table.
7631        if (obj->is_objArray()) {
7632          size_t sz = obj->size();
7633          HeapWord* end_card_addr =
7634            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7635          MemRegion redirty_range = MemRegion(addr, end_card_addr);
7636          assert(!redirty_range.is_empty(), "Arithmetical tautology");
7637          _collector->_modUnionTable.mark_range(redirty_range);
7638        } else {
7639          _collector->_modUnionTable.mark(addr);
7640        }
7641        _collector->_ser_kac_preclean_ovflw++;
7642      } else {
7643        _collector->push_on_overflow_list(obj);
7644        _collector->_ser_kac_ovflw++;
7645      }
7646    }
7647  }
7648}
7649
7650void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7651void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7652
7653// CMSParKeepAliveClosure: a parallel version of the above.
7654// The work queues are private to each closure (thread),
7655// but (may be) available for stealing by other threads.
7656void CMSParKeepAliveClosure::do_oop(oop obj) {
7657  HeapWord* addr = (HeapWord*)obj;
7658  if (_span.contains(addr) &&
7659      !_bit_map->isMarked(addr)) {
7660    // In general, during recursive tracing, several threads
7661    // may be concurrently getting here; the first one to
7662    // "tag" it, claims it.
7663    if (_bit_map->par_mark(addr)) {
7664      bool res = _work_queue->push(obj);
7665      assert(res, "Low water mark should be much less than capacity");
7666      // Do a recursive trim in the hope that this will keep
7667      // stack usage lower, but leave some oops for potential stealers
7668      trim_queue(_low_water_mark);
7669    } // Else, another thread got there first
7670  }
7671}
7672
7673void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7674void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7675
7676void CMSParKeepAliveClosure::trim_queue(uint max) {
7677  while (_work_queue->size() > max) {
7678    oop new_oop;
7679    if (_work_queue->pop_local(new_oop)) {
7680      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7681      assert(_bit_map->isMarked((HeapWord*)new_oop),
7682             "no white objects on this stack!");
7683      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7684      // iterate over the oops in this oop, marking and pushing
7685      // the ones in CMS heap (i.e. in _span).
7686      new_oop->oop_iterate(&_mark_and_push);
7687    }
7688  }
7689}
7690
7691CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7692                                CMSCollector* collector,
7693                                MemRegion span, CMSBitMap* bit_map,
7694                                OopTaskQueue* work_queue):
7695  _collector(collector),
7696  _span(span),
7697  _bit_map(bit_map),
7698  _work_queue(work_queue) { }
7699
7700void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7701  HeapWord* addr = (HeapWord*)obj;
7702  if (_span.contains(addr) &&
7703      !_bit_map->isMarked(addr)) {
7704    if (_bit_map->par_mark(addr)) {
7705      bool simulate_overflow = false;
7706      NOT_PRODUCT(
7707        if (CMSMarkStackOverflowALot &&
7708            _collector->par_simulate_overflow()) {
7709          // simulate a stack overflow
7710          simulate_overflow = true;
7711        }
7712      )
7713      if (simulate_overflow || !_work_queue->push(obj)) {
7714        _collector->par_push_on_overflow_list(obj);
7715        _collector->_par_kac_ovflw++;
7716      }
7717    } // Else another thread got there already
7718  }
7719}
7720
7721void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7722void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7723
7724//////////////////////////////////////////////////////////////////
7725//  CMSExpansionCause                /////////////////////////////
7726//////////////////////////////////////////////////////////////////
7727const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7728  switch (cause) {
7729    case _no_expansion:
7730      return "No expansion";
7731    case _satisfy_free_ratio:
7732      return "Free ratio";
7733    case _satisfy_promotion:
7734      return "Satisfy promotion";
7735    case _satisfy_allocation:
7736      return "allocation";
7737    case _allocate_par_lab:
7738      return "Par LAB";
7739    case _allocate_par_spooling_space:
7740      return "Par Spooling Space";
7741    case _adaptive_size_policy:
7742      return "Ergonomics";
7743    default:
7744      return "unknown";
7745  }
7746}
7747
7748void CMSDrainMarkingStackClosure::do_void() {
7749  // the max number to take from overflow list at a time
7750  const size_t num = _mark_stack->capacity()/4;
7751  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7752         "Overflow list should be NULL during concurrent phases");
7753  while (!_mark_stack->isEmpty() ||
7754         // if stack is empty, check the overflow list
7755         _collector->take_from_overflow_list(num, _mark_stack)) {
7756    oop obj = _mark_stack->pop();
7757    HeapWord* addr = (HeapWord*)obj;
7758    assert(_span.contains(addr), "Should be within span");
7759    assert(_bit_map->isMarked(addr), "Should be marked");
7760    assert(obj->is_oop(), "Should be an oop");
7761    obj->oop_iterate(_keep_alive);
7762  }
7763}
7764
7765void CMSParDrainMarkingStackClosure::do_void() {
7766  // drain queue
7767  trim_queue(0);
7768}
7769
7770// Trim our work_queue so its length is below max at return
7771void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7772  while (_work_queue->size() > max) {
7773    oop new_oop;
7774    if (_work_queue->pop_local(new_oop)) {
7775      assert(new_oop->is_oop(), "Expected an oop");
7776      assert(_bit_map->isMarked((HeapWord*)new_oop),
7777             "no white objects on this stack!");
7778      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7779      // iterate over the oops in this oop, marking and pushing
7780      // the ones in CMS heap (i.e. in _span).
7781      new_oop->oop_iterate(&_mark_and_push);
7782    }
7783  }
7784}
7785
7786////////////////////////////////////////////////////////////////////
7787// Support for Marking Stack Overflow list handling and related code
7788////////////////////////////////////////////////////////////////////
7789// Much of the following code is similar in shape and spirit to the
7790// code used in ParNewGC. We should try and share that code
7791// as much as possible in the future.
7792
7793#ifndef PRODUCT
7794// Debugging support for CMSStackOverflowALot
7795
7796// It's OK to call this multi-threaded;  the worst thing
7797// that can happen is that we'll get a bunch of closely
7798// spaced simulated overflows, but that's OK, in fact
7799// probably good as it would exercise the overflow code
7800// under contention.
7801bool CMSCollector::simulate_overflow() {
7802  if (_overflow_counter-- <= 0) { // just being defensive
7803    _overflow_counter = CMSMarkStackOverflowInterval;
7804    return true;
7805  } else {
7806    return false;
7807  }
7808}
7809
7810bool CMSCollector::par_simulate_overflow() {
7811  return simulate_overflow();
7812}
7813#endif
7814
7815// Single-threaded
7816bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7817  assert(stack->isEmpty(), "Expected precondition");
7818  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7819  size_t i = num;
7820  oop  cur = _overflow_list;
7821  const markOop proto = markOopDesc::prototype();
7822  NOT_PRODUCT(ssize_t n = 0;)
7823  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7824    next = oop(cur->mark());
7825    cur->set_mark(proto);   // until proven otherwise
7826    assert(cur->is_oop(), "Should be an oop");
7827    bool res = stack->push(cur);
7828    assert(res, "Bit off more than can chew?");
7829    NOT_PRODUCT(n++;)
7830  }
7831  _overflow_list = cur;
7832#ifndef PRODUCT
7833  assert(_num_par_pushes >= n, "Too many pops?");
7834  _num_par_pushes -=n;
7835#endif
7836  return !stack->isEmpty();
7837}
7838
7839#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7840// (MT-safe) Get a prefix of at most "num" from the list.
7841// The overflow list is chained through the mark word of
7842// each object in the list. We fetch the entire list,
7843// break off a prefix of the right size and return the
7844// remainder. If other threads try to take objects from
7845// the overflow list at that time, they will wait for
7846// some time to see if data becomes available. If (and
7847// only if) another thread places one or more object(s)
7848// on the global list before we have returned the suffix
7849// to the global list, we will walk down our local list
7850// to find its end and append the global list to
7851// our suffix before returning it. This suffix walk can
7852// prove to be expensive (quadratic in the amount of traffic)
7853// when there are many objects in the overflow list and
7854// there is much producer-consumer contention on the list.
7855// *NOTE*: The overflow list manipulation code here and
7856// in ParNewGeneration:: are very similar in shape,
7857// except that in the ParNew case we use the old (from/eden)
7858// copy of the object to thread the list via its klass word.
7859// Because of the common code, if you make any changes in
7860// the code below, please check the ParNew version to see if
7861// similar changes might be needed.
7862// CR 6797058 has been filed to consolidate the common code.
7863bool CMSCollector::par_take_from_overflow_list(size_t num,
7864                                               OopTaskQueue* work_q,
7865                                               int no_of_gc_threads) {
7866  assert(work_q->size() == 0, "First empty local work queue");
7867  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7868  if (_overflow_list == NULL) {
7869    return false;
7870  }
7871  // Grab the entire list; we'll put back a suffix
7872  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7873  Thread* tid = Thread::current();
7874  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7875  // set to ParallelGCThreads.
7876  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7877  size_t sleep_time_millis = MAX2((size_t)1, num/100);
7878  // If the list is busy, we spin for a short while,
7879  // sleeping between attempts to get the list.
7880  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7881    os::sleep(tid, sleep_time_millis, false);
7882    if (_overflow_list == NULL) {
7883      // Nothing left to take
7884      return false;
7885    } else if (_overflow_list != BUSY) {
7886      // Try and grab the prefix
7887      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7888    }
7889  }
7890  // If the list was found to be empty, or we spun long
7891  // enough, we give up and return empty-handed. If we leave
7892  // the list in the BUSY state below, it must be the case that
7893  // some other thread holds the overflow list and will set it
7894  // to a non-BUSY state in the future.
7895  if (prefix == NULL || prefix == BUSY) {
7896     // Nothing to take or waited long enough
7897     if (prefix == NULL) {
7898       // Write back the NULL in case we overwrote it with BUSY above
7899       // and it is still the same value.
7900       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7901     }
7902     return false;
7903  }
7904  assert(prefix != NULL && prefix != BUSY, "Error");
7905  size_t i = num;
7906  oop cur = prefix;
7907  // Walk down the first "num" objects, unless we reach the end.
7908  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7909  if (cur->mark() == NULL) {
7910    // We have "num" or fewer elements in the list, so there
7911    // is nothing to return to the global list.
7912    // Write back the NULL in lieu of the BUSY we wrote
7913    // above, if it is still the same value.
7914    if (_overflow_list == BUSY) {
7915      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7916    }
7917  } else {
7918    // Chop off the suffix and return it to the global list.
7919    assert(cur->mark() != BUSY, "Error");
7920    oop suffix_head = cur->mark(); // suffix will be put back on global list
7921    cur->set_mark(NULL);           // break off suffix
7922    // It's possible that the list is still in the empty(busy) state
7923    // we left it in a short while ago; in that case we may be
7924    // able to place back the suffix without incurring the cost
7925    // of a walk down the list.
7926    oop observed_overflow_list = _overflow_list;
7927    oop cur_overflow_list = observed_overflow_list;
7928    bool attached = false;
7929    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7930      observed_overflow_list =
7931        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7932      if (cur_overflow_list == observed_overflow_list) {
7933        attached = true;
7934        break;
7935      } else cur_overflow_list = observed_overflow_list;
7936    }
7937    if (!attached) {
7938      // Too bad, someone else sneaked in (at least) an element; we'll need
7939      // to do a splice. Find tail of suffix so we can prepend suffix to global
7940      // list.
7941      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7942      oop suffix_tail = cur;
7943      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7944             "Tautology");
7945      observed_overflow_list = _overflow_list;
7946      do {
7947        cur_overflow_list = observed_overflow_list;
7948        if (cur_overflow_list != BUSY) {
7949          // Do the splice ...
7950          suffix_tail->set_mark(markOop(cur_overflow_list));
7951        } else { // cur_overflow_list == BUSY
7952          suffix_tail->set_mark(NULL);
7953        }
7954        // ... and try to place spliced list back on overflow_list ...
7955        observed_overflow_list =
7956          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7957      } while (cur_overflow_list != observed_overflow_list);
7958      // ... until we have succeeded in doing so.
7959    }
7960  }
7961
7962  // Push the prefix elements on work_q
7963  assert(prefix != NULL, "control point invariant");
7964  const markOop proto = markOopDesc::prototype();
7965  oop next;
7966  NOT_PRODUCT(ssize_t n = 0;)
7967  for (cur = prefix; cur != NULL; cur = next) {
7968    next = oop(cur->mark());
7969    cur->set_mark(proto);   // until proven otherwise
7970    assert(cur->is_oop(), "Should be an oop");
7971    bool res = work_q->push(cur);
7972    assert(res, "Bit off more than we can chew?");
7973    NOT_PRODUCT(n++;)
7974  }
7975#ifndef PRODUCT
7976  assert(_num_par_pushes >= n, "Too many pops?");
7977  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7978#endif
7979  return true;
7980}
7981
7982// Single-threaded
7983void CMSCollector::push_on_overflow_list(oop p) {
7984  NOT_PRODUCT(_num_par_pushes++;)
7985  assert(p->is_oop(), "Not an oop");
7986  preserve_mark_if_necessary(p);
7987  p->set_mark((markOop)_overflow_list);
7988  _overflow_list = p;
7989}
7990
7991// Multi-threaded; use CAS to prepend to overflow list
7992void CMSCollector::par_push_on_overflow_list(oop p) {
7993  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7994  assert(p->is_oop(), "Not an oop");
7995  par_preserve_mark_if_necessary(p);
7996  oop observed_overflow_list = _overflow_list;
7997  oop cur_overflow_list;
7998  do {
7999    cur_overflow_list = observed_overflow_list;
8000    if (cur_overflow_list != BUSY) {
8001      p->set_mark(markOop(cur_overflow_list));
8002    } else {
8003      p->set_mark(NULL);
8004    }
8005    observed_overflow_list =
8006      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8007  } while (cur_overflow_list != observed_overflow_list);
8008}
8009#undef BUSY
8010
8011// Single threaded
8012// General Note on GrowableArray: pushes may silently fail
8013// because we are (temporarily) out of C-heap for expanding
8014// the stack. The problem is quite ubiquitous and affects
8015// a lot of code in the JVM. The prudent thing for GrowableArray
8016// to do (for now) is to exit with an error. However, that may
8017// be too draconian in some cases because the caller may be
8018// able to recover without much harm. For such cases, we
8019// should probably introduce a "soft_push" method which returns
8020// an indication of success or failure with the assumption that
8021// the caller may be able to recover from a failure; code in
8022// the VM can then be changed, incrementally, to deal with such
8023// failures where possible, thus, incrementally hardening the VM
8024// in such low resource situations.
8025void CMSCollector::preserve_mark_work(oop p, markOop m) {
8026  _preserved_oop_stack.push(p);
8027  _preserved_mark_stack.push(m);
8028  assert(m == p->mark(), "Mark word changed");
8029  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8030         "bijection");
8031}
8032
8033// Single threaded
8034void CMSCollector::preserve_mark_if_necessary(oop p) {
8035  markOop m = p->mark();
8036  if (m->must_be_preserved(p)) {
8037    preserve_mark_work(p, m);
8038  }
8039}
8040
8041void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8042  markOop m = p->mark();
8043  if (m->must_be_preserved(p)) {
8044    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8045    // Even though we read the mark word without holding
8046    // the lock, we are assured that it will not change
8047    // because we "own" this oop, so no other thread can
8048    // be trying to push it on the overflow list; see
8049    // the assertion in preserve_mark_work() that checks
8050    // that m == p->mark().
8051    preserve_mark_work(p, m);
8052  }
8053}
8054
8055// We should be able to do this multi-threaded,
8056// a chunk of stack being a task (this is
8057// correct because each oop only ever appears
8058// once in the overflow list. However, it's
8059// not very easy to completely overlap this with
8060// other operations, so will generally not be done
8061// until all work's been completed. Because we
8062// expect the preserved oop stack (set) to be small,
8063// it's probably fine to do this single-threaded.
8064// We can explore cleverer concurrent/overlapped/parallel
8065// processing of preserved marks if we feel the
8066// need for this in the future. Stack overflow should
8067// be so rare in practice and, when it happens, its
8068// effect on performance so great that this will
8069// likely just be in the noise anyway.
8070void CMSCollector::restore_preserved_marks_if_any() {
8071  assert(SafepointSynchronize::is_at_safepoint(),
8072         "world should be stopped");
8073  assert(Thread::current()->is_ConcurrentGC_thread() ||
8074         Thread::current()->is_VM_thread(),
8075         "should be single-threaded");
8076  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8077         "bijection");
8078
8079  while (!_preserved_oop_stack.is_empty()) {
8080    oop p = _preserved_oop_stack.pop();
8081    assert(p->is_oop(), "Should be an oop");
8082    assert(_span.contains(p), "oop should be in _span");
8083    assert(p->mark() == markOopDesc::prototype(),
8084           "Set when taken from overflow list");
8085    markOop m = _preserved_mark_stack.pop();
8086    p->set_mark(m);
8087  }
8088  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8089         "stacks were cleared above");
8090}
8091
8092#ifndef PRODUCT
8093bool CMSCollector::no_preserved_marks() const {
8094  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8095}
8096#endif
8097
8098// Transfer some number of overflown objects to usual marking
8099// stack. Return true if some objects were transferred.
8100bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8101  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8102                    (size_t)ParGCDesiredObjsFromOverflowList);
8103
8104  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8105  assert(_collector->overflow_list_is_empty() || res,
8106         "If list is not empty, we should have taken something");
8107  assert(!res || !_mark_stack->isEmpty(),
8108         "If we took something, it should now be on our stack");
8109  return res;
8110}
8111
8112size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8113  size_t res = _sp->block_size_no_stall(addr, _collector);
8114  if (_sp->block_is_obj(addr)) {
8115    if (_live_bit_map->isMarked(addr)) {
8116      // It can't have been dead in a previous cycle
8117      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8118    } else {
8119      _dead_bit_map->mark(addr);      // mark the dead object
8120    }
8121  }
8122  // Could be 0, if the block size could not be computed without stalling.
8123  return res;
8124}
8125
8126TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8127
8128  switch (phase) {
8129    case CMSCollector::InitialMarking:
8130      initialize(true  /* fullGC */ ,
8131                 cause /* cause of the GC */,
8132                 true  /* recordGCBeginTime */,
8133                 true  /* recordPreGCUsage */,
8134                 false /* recordPeakUsage */,
8135                 false /* recordPostGCusage */,
8136                 true  /* recordAccumulatedGCTime */,
8137                 false /* recordGCEndTime */,
8138                 false /* countCollection */  );
8139      break;
8140
8141    case CMSCollector::FinalMarking:
8142      initialize(true  /* fullGC */ ,
8143                 cause /* cause of the GC */,
8144                 false /* recordGCBeginTime */,
8145                 false /* recordPreGCUsage */,
8146                 false /* recordPeakUsage */,
8147                 false /* recordPostGCusage */,
8148                 true  /* recordAccumulatedGCTime */,
8149                 false /* recordGCEndTime */,
8150                 false /* countCollection */  );
8151      break;
8152
8153    case CMSCollector::Sweeping:
8154      initialize(true  /* fullGC */ ,
8155                 cause /* cause of the GC */,
8156                 false /* recordGCBeginTime */,
8157                 false /* recordPreGCUsage */,
8158                 true  /* recordPeakUsage */,
8159                 true  /* recordPostGCusage */,
8160                 false /* recordAccumulatedGCTime */,
8161                 true  /* recordGCEndTime */,
8162                 true  /* countCollection */  );
8163      break;
8164
8165    default:
8166      ShouldNotReachHere();
8167  }
8168}
8169