concurrentMarkSweepGeneration.cpp revision 11945:6d3c44100184
1/*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/cms/cmsCollectorPolicy.hpp"
32#include "gc/cms/cmsOopClosures.inline.hpp"
33#include "gc/cms/compactibleFreeListSpace.hpp"
34#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
35#include "gc/cms/concurrentMarkSweepThread.hpp"
36#include "gc/cms/parNewGeneration.hpp"
37#include "gc/cms/vmCMSOperations.hpp"
38#include "gc/serial/genMarkSweep.hpp"
39#include "gc/serial/tenuredGeneration.hpp"
40#include "gc/shared/adaptiveSizePolicy.hpp"
41#include "gc/shared/cardGeneration.inline.hpp"
42#include "gc/shared/cardTableRS.hpp"
43#include "gc/shared/collectedHeap.inline.hpp"
44#include "gc/shared/collectorCounters.hpp"
45#include "gc/shared/collectorPolicy.hpp"
46#include "gc/shared/gcLocker.inline.hpp"
47#include "gc/shared/gcPolicyCounters.hpp"
48#include "gc/shared/gcTimer.hpp"
49#include "gc/shared/gcTrace.hpp"
50#include "gc/shared/gcTraceTime.inline.hpp"
51#include "gc/shared/genCollectedHeap.hpp"
52#include "gc/shared/genOopClosures.inline.hpp"
53#include "gc/shared/isGCActiveMark.hpp"
54#include "gc/shared/referencePolicy.hpp"
55#include "gc/shared/strongRootsScope.hpp"
56#include "gc/shared/taskqueue.inline.hpp"
57#include "logging/log.hpp"
58#include "memory/allocation.hpp"
59#include "memory/iterator.inline.hpp"
60#include "memory/padded.hpp"
61#include "memory/resourceArea.hpp"
62#include "oops/oop.inline.hpp"
63#include "prims/jvmtiExport.hpp"
64#include "runtime/atomic.hpp"
65#include "runtime/globals_extension.hpp"
66#include "runtime/handles.inline.hpp"
67#include "runtime/java.hpp"
68#include "runtime/orderAccess.inline.hpp"
69#include "runtime/timer.hpp"
70#include "runtime/vmThread.hpp"
71#include "services/memoryService.hpp"
72#include "services/runtimeService.hpp"
73#include "utilities/stack.inline.hpp"
74
75// statics
76CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
77bool CMSCollector::_full_gc_requested = false;
78GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
79
80//////////////////////////////////////////////////////////////////
81// In support of CMS/VM thread synchronization
82//////////////////////////////////////////////////////////////////
83// We split use of the CGC_lock into 2 "levels".
84// The low-level locking is of the usual CGC_lock monitor. We introduce
85// a higher level "token" (hereafter "CMS token") built on top of the
86// low level monitor (hereafter "CGC lock").
87// The token-passing protocol gives priority to the VM thread. The
88// CMS-lock doesn't provide any fairness guarantees, but clients
89// should ensure that it is only held for very short, bounded
90// durations.
91//
92// When either of the CMS thread or the VM thread is involved in
93// collection operations during which it does not want the other
94// thread to interfere, it obtains the CMS token.
95//
96// If either thread tries to get the token while the other has
97// it, that thread waits. However, if the VM thread and CMS thread
98// both want the token, then the VM thread gets priority while the
99// CMS thread waits. This ensures, for instance, that the "concurrent"
100// phases of the CMS thread's work do not block out the VM thread
101// for long periods of time as the CMS thread continues to hog
102// the token. (See bug 4616232).
103//
104// The baton-passing functions are, however, controlled by the
105// flags _foregroundGCShouldWait and _foregroundGCIsActive,
106// and here the low-level CMS lock, not the high level token,
107// ensures mutual exclusion.
108//
109// Two important conditions that we have to satisfy:
110// 1. if a thread does a low-level wait on the CMS lock, then it
111//    relinquishes the CMS token if it were holding that token
112//    when it acquired the low-level CMS lock.
113// 2. any low-level notifications on the low-level lock
114//    should only be sent when a thread has relinquished the token.
115//
116// In the absence of either property, we'd have potential deadlock.
117//
118// We protect each of the CMS (concurrent and sequential) phases
119// with the CMS _token_, not the CMS _lock_.
120//
121// The only code protected by CMS lock is the token acquisition code
122// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
123// baton-passing code.
124//
125// Unfortunately, i couldn't come up with a good abstraction to factor and
126// hide the naked CGC_lock manipulation in the baton-passing code
127// further below. That's something we should try to do. Also, the proof
128// of correctness of this 2-level locking scheme is far from obvious,
129// and potentially quite slippery. We have an uneasy suspicion, for instance,
130// that there may be a theoretical possibility of delay/starvation in the
131// low-level lock/wait/notify scheme used for the baton-passing because of
132// potential interference with the priority scheme embodied in the
133// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
134// invocation further below and marked with "XXX 20011219YSR".
135// Indeed, as we note elsewhere, this may become yet more slippery
136// in the presence of multiple CMS and/or multiple VM threads. XXX
137
138class CMSTokenSync: public StackObj {
139 private:
140  bool _is_cms_thread;
141 public:
142  CMSTokenSync(bool is_cms_thread):
143    _is_cms_thread(is_cms_thread) {
144    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
145           "Incorrect argument to constructor");
146    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
147  }
148
149  ~CMSTokenSync() {
150    assert(_is_cms_thread ?
151             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
152             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
153          "Incorrect state");
154    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
155  }
156};
157
158// Convenience class that does a CMSTokenSync, and then acquires
159// upto three locks.
160class CMSTokenSyncWithLocks: public CMSTokenSync {
161 private:
162  // Note: locks are acquired in textual declaration order
163  // and released in the opposite order
164  MutexLockerEx _locker1, _locker2, _locker3;
165 public:
166  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
167                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
168    CMSTokenSync(is_cms_thread),
169    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
170    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
171    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
172  { }
173};
174
175
176//////////////////////////////////////////////////////////////////
177//  Concurrent Mark-Sweep Generation /////////////////////////////
178//////////////////////////////////////////////////////////////////
179
180NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
181
182// This struct contains per-thread things necessary to support parallel
183// young-gen collection.
184class CMSParGCThreadState: public CHeapObj<mtGC> {
185 public:
186  CompactibleFreeListSpaceLAB lab;
187  PromotionInfo promo;
188
189  // Constructor.
190  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
191    promo.setSpace(cfls);
192  }
193};
194
195ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
196     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
197  CardGeneration(rs, initial_byte_size, ct),
198  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
199  _did_compact(false)
200{
201  HeapWord* bottom = (HeapWord*) _virtual_space.low();
202  HeapWord* end    = (HeapWord*) _virtual_space.high();
203
204  _direct_allocated_words = 0;
205  NOT_PRODUCT(
206    _numObjectsPromoted = 0;
207    _numWordsPromoted = 0;
208    _numObjectsAllocated = 0;
209    _numWordsAllocated = 0;
210  )
211
212  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
213  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
214  _cmsSpace->_old_gen = this;
215
216  _gc_stats = new CMSGCStats();
217
218  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
219  // offsets match. The ability to tell free chunks from objects
220  // depends on this property.
221  debug_only(
222    FreeChunk* junk = NULL;
223    assert(UseCompressedClassPointers ||
224           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
225           "Offset of FreeChunk::_prev within FreeChunk must match"
226           "  that of OopDesc::_klass within OopDesc");
227  )
228
229  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
230  for (uint i = 0; i < ParallelGCThreads; i++) {
231    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
232  }
233
234  _incremental_collection_failed = false;
235  // The "dilatation_factor" is the expansion that can occur on
236  // account of the fact that the minimum object size in the CMS
237  // generation may be larger than that in, say, a contiguous young
238  //  generation.
239  // Ideally, in the calculation below, we'd compute the dilatation
240  // factor as: MinChunkSize/(promoting_gen's min object size)
241  // Since we do not have such a general query interface for the
242  // promoting generation, we'll instead just use the minimum
243  // object size (which today is a header's worth of space);
244  // note that all arithmetic is in units of HeapWords.
245  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
246  assert(_dilatation_factor >= 1.0, "from previous assert");
247}
248
249
250// The field "_initiating_occupancy" represents the occupancy percentage
251// at which we trigger a new collection cycle.  Unless explicitly specified
252// via CMSInitiatingOccupancyFraction (argument "io" below), it
253// is calculated by:
254//
255//   Let "f" be MinHeapFreeRatio in
256//
257//    _initiating_occupancy = 100-f +
258//                           f * (CMSTriggerRatio/100)
259//   where CMSTriggerRatio is the argument "tr" below.
260//
261// That is, if we assume the heap is at its desired maximum occupancy at the
262// end of a collection, we let CMSTriggerRatio of the (purported) free
263// space be allocated before initiating a new collection cycle.
264//
265void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
266  assert(io <= 100 && tr <= 100, "Check the arguments");
267  if (io >= 0) {
268    _initiating_occupancy = (double)io / 100.0;
269  } else {
270    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
271                             (double)(tr * MinHeapFreeRatio) / 100.0)
272                            / 100.0;
273  }
274}
275
276void ConcurrentMarkSweepGeneration::ref_processor_init() {
277  assert(collector() != NULL, "no collector");
278  collector()->ref_processor_init();
279}
280
281void CMSCollector::ref_processor_init() {
282  if (_ref_processor == NULL) {
283    // Allocate and initialize a reference processor
284    _ref_processor =
285      new ReferenceProcessor(_span,                               // span
286                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
287                             ParallelGCThreads,                   // mt processing degree
288                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
289                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
290                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
291                             &_is_alive_closure);                 // closure for liveness info
292    // Initialize the _ref_processor field of CMSGen
293    _cmsGen->set_ref_processor(_ref_processor);
294
295  }
296}
297
298AdaptiveSizePolicy* CMSCollector::size_policy() {
299  GenCollectedHeap* gch = GenCollectedHeap::heap();
300  return gch->gen_policy()->size_policy();
301}
302
303void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
304
305  const char* gen_name = "old";
306  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
307  // Generation Counters - generation 1, 1 subspace
308  _gen_counters = new GenerationCounters(gen_name, 1, 1,
309      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
310
311  _space_counters = new GSpaceCounters(gen_name, 0,
312                                       _virtual_space.reserved_size(),
313                                       this, _gen_counters);
314}
315
316CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317  _cms_gen(cms_gen)
318{
319  assert(alpha <= 100, "bad value");
320  _saved_alpha = alpha;
321
322  // Initialize the alphas to the bootstrap value of 100.
323  _gc0_alpha = _cms_alpha = 100;
324
325  _cms_begin_time.update();
326  _cms_end_time.update();
327
328  _gc0_duration = 0.0;
329  _gc0_period = 0.0;
330  _gc0_promoted = 0;
331
332  _cms_duration = 0.0;
333  _cms_period = 0.0;
334  _cms_allocated = 0;
335
336  _cms_used_at_gc0_begin = 0;
337  _cms_used_at_gc0_end = 0;
338  _allow_duty_cycle_reduction = false;
339  _valid_bits = 0;
340}
341
342double CMSStats::cms_free_adjustment_factor(size_t free) const {
343  // TBD: CR 6909490
344  return 1.0;
345}
346
347void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
348}
349
350// If promotion failure handling is on use
351// the padded average size of the promotion for each
352// young generation collection.
353double CMSStats::time_until_cms_gen_full() const {
354  size_t cms_free = _cms_gen->cmsSpace()->free();
355  GenCollectedHeap* gch = GenCollectedHeap::heap();
356  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
357                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
358  if (cms_free > expected_promotion) {
359    // Start a cms collection if there isn't enough space to promote
360    // for the next young collection.  Use the padded average as
361    // a safety factor.
362    cms_free -= expected_promotion;
363
364    // Adjust by the safety factor.
365    double cms_free_dbl = (double)cms_free;
366    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
367    // Apply a further correction factor which tries to adjust
368    // for recent occurance of concurrent mode failures.
369    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
370    cms_free_dbl = cms_free_dbl * cms_adjustment;
371
372    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
373                  cms_free, expected_promotion);
374    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
375    // Add 1 in case the consumption rate goes to zero.
376    return cms_free_dbl / (cms_consumption_rate() + 1.0);
377  }
378  return 0.0;
379}
380
381// Compare the duration of the cms collection to the
382// time remaining before the cms generation is empty.
383// Note that the time from the start of the cms collection
384// to the start of the cms sweep (less than the total
385// duration of the cms collection) can be used.  This
386// has been tried and some applications experienced
387// promotion failures early in execution.  This was
388// possibly because the averages were not accurate
389// enough at the beginning.
390double CMSStats::time_until_cms_start() const {
391  // We add "gc0_period" to the "work" calculation
392  // below because this query is done (mostly) at the
393  // end of a scavenge, so we need to conservatively
394  // account for that much possible delay
395  // in the query so as to avoid concurrent mode failures
396  // due to starting the collection just a wee bit too
397  // late.
398  double work = cms_duration() + gc0_period();
399  double deadline = time_until_cms_gen_full();
400  // If a concurrent mode failure occurred recently, we want to be
401  // more conservative and halve our expected time_until_cms_gen_full()
402  if (work > deadline) {
403    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
404                          cms_duration(), gc0_period(), time_until_cms_gen_full());
405    return 0.0;
406  }
407  return work - deadline;
408}
409
410#ifndef PRODUCT
411void CMSStats::print_on(outputStream *st) const {
412  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
413  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
414               gc0_duration(), gc0_period(), gc0_promoted());
415  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
416            cms_duration(), cms_period(), cms_allocated());
417  st->print(",cms_since_beg=%g,cms_since_end=%g",
418            cms_time_since_begin(), cms_time_since_end());
419  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
420            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
421
422  if (valid()) {
423    st->print(",promo_rate=%g,cms_alloc_rate=%g",
424              promotion_rate(), cms_allocation_rate());
425    st->print(",cms_consumption_rate=%g,time_until_full=%g",
426              cms_consumption_rate(), time_until_cms_gen_full());
427  }
428  st->cr();
429}
430#endif // #ifndef PRODUCT
431
432CMSCollector::CollectorState CMSCollector::_collectorState =
433                             CMSCollector::Idling;
434bool CMSCollector::_foregroundGCIsActive = false;
435bool CMSCollector::_foregroundGCShouldWait = false;
436
437CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
438                           CardTableRS*                   ct,
439                           ConcurrentMarkSweepPolicy*     cp):
440  _cmsGen(cmsGen),
441  _ct(ct),
442  _ref_processor(NULL),    // will be set later
443  _conc_workers(NULL),     // may be set later
444  _abort_preclean(false),
445  _start_sampling(false),
446  _between_prologue_and_epilogue(false),
447  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
448  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
449                 -1 /* lock-free */, "No_lock" /* dummy */),
450  _modUnionClosurePar(&_modUnionTable),
451  // Adjust my span to cover old (cms) gen
452  _span(cmsGen->reserved()),
453  // Construct the is_alive_closure with _span & markBitMap
454  _is_alive_closure(_span, &_markBitMap),
455  _restart_addr(NULL),
456  _overflow_list(NULL),
457  _stats(cmsGen),
458  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
459                             //verify that this lock should be acquired with safepoint check.
460                             Monitor::_safepoint_check_sometimes)),
461  _eden_chunk_array(NULL),     // may be set in ctor body
462  _eden_chunk_capacity(0),     // -- ditto --
463  _eden_chunk_index(0),        // -- ditto --
464  _survivor_plab_array(NULL),  // -- ditto --
465  _survivor_chunk_array(NULL), // -- ditto --
466  _survivor_chunk_capacity(0), // -- ditto --
467  _survivor_chunk_index(0),    // -- ditto --
468  _ser_pmc_preclean_ovflw(0),
469  _ser_kac_preclean_ovflw(0),
470  _ser_pmc_remark_ovflw(0),
471  _par_pmc_remark_ovflw(0),
472  _ser_kac_ovflw(0),
473  _par_kac_ovflw(0),
474#ifndef PRODUCT
475  _num_par_pushes(0),
476#endif
477  _collection_count_start(0),
478  _verifying(false),
479  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
480  _completed_initialization(false),
481  _collector_policy(cp),
482  _should_unload_classes(CMSClassUnloadingEnabled),
483  _concurrent_cycles_since_last_unload(0),
484  _roots_scanning_options(GenCollectedHeap::SO_None),
485  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
486  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
487  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
488  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
489  _cms_start_registered(false)
490{
491  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
492    ExplicitGCInvokesConcurrent = true;
493  }
494  // Now expand the span and allocate the collection support structures
495  // (MUT, marking bit map etc.) to cover both generations subject to
496  // collection.
497
498  // For use by dirty card to oop closures.
499  _cmsGen->cmsSpace()->set_collector(this);
500
501  // Allocate MUT and marking bit map
502  {
503    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
504    if (!_markBitMap.allocate(_span)) {
505      log_warning(gc)("Failed to allocate CMS Bit Map");
506      return;
507    }
508    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
509  }
510  {
511    _modUnionTable.allocate(_span);
512    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
513  }
514
515  if (!_markStack.allocate(MarkStackSize)) {
516    log_warning(gc)("Failed to allocate CMS Marking Stack");
517    return;
518  }
519
520  // Support for multi-threaded concurrent phases
521  if (CMSConcurrentMTEnabled) {
522    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
523      // just for now
524      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
525    }
526    if (ConcGCThreads > 1) {
527      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
528                                 ConcGCThreads, true);
529      if (_conc_workers == NULL) {
530        log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
531        CMSConcurrentMTEnabled = false;
532      } else {
533        _conc_workers->initialize_workers();
534      }
535    } else {
536      CMSConcurrentMTEnabled = false;
537    }
538  }
539  if (!CMSConcurrentMTEnabled) {
540    ConcGCThreads = 0;
541  } else {
542    // Turn off CMSCleanOnEnter optimization temporarily for
543    // the MT case where it's not fixed yet; see 6178663.
544    CMSCleanOnEnter = false;
545  }
546  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
547         "Inconsistency");
548  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
549  log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
550
551  // Parallel task queues; these are shared for the
552  // concurrent and stop-world phases of CMS, but
553  // are not shared with parallel scavenge (ParNew).
554  {
555    uint i;
556    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
557
558    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
559         || ParallelRefProcEnabled)
560        && num_queues > 0) {
561      _task_queues = new OopTaskQueueSet(num_queues);
562      if (_task_queues == NULL) {
563        log_warning(gc)("task_queues allocation failure.");
564        return;
565      }
566      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
567      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
568      for (i = 0; i < num_queues; i++) {
569        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
570        if (q == NULL) {
571          log_warning(gc)("work_queue allocation failure.");
572          return;
573        }
574        _task_queues->register_queue(i, q);
575      }
576      for (i = 0; i < num_queues; i++) {
577        _task_queues->queue(i)->initialize();
578        _hash_seed[i] = 17;  // copied from ParNew
579      }
580    }
581  }
582
583  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
584
585  // Clip CMSBootstrapOccupancy between 0 and 100.
586  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
587
588  // Now tell CMS generations the identity of their collector
589  ConcurrentMarkSweepGeneration::set_collector(this);
590
591  // Create & start a CMS thread for this CMS collector
592  _cmsThread = ConcurrentMarkSweepThread::start(this);
593  assert(cmsThread() != NULL, "CMS Thread should have been created");
594  assert(cmsThread()->collector() == this,
595         "CMS Thread should refer to this gen");
596  assert(CGC_lock != NULL, "Where's the CGC_lock?");
597
598  // Support for parallelizing young gen rescan
599  GenCollectedHeap* gch = GenCollectedHeap::heap();
600  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
601  _young_gen = (ParNewGeneration*)gch->young_gen();
602  if (gch->supports_inline_contig_alloc()) {
603    _top_addr = gch->top_addr();
604    _end_addr = gch->end_addr();
605    assert(_young_gen != NULL, "no _young_gen");
606    _eden_chunk_index = 0;
607    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
608    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
609  }
610
611  // Support for parallelizing survivor space rescan
612  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
613    const size_t max_plab_samples =
614      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
615
616    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
617    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
618    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
619    _survivor_chunk_capacity = max_plab_samples;
620    for (uint i = 0; i < ParallelGCThreads; i++) {
621      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
622      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
623      assert(cur->end() == 0, "Should be 0");
624      assert(cur->array() == vec, "Should be vec");
625      assert(cur->capacity() == max_plab_samples, "Error");
626    }
627  }
628
629  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
630  _gc_counters = new CollectorCounters("CMS", 1);
631  _completed_initialization = true;
632  _inter_sweep_timer.start();  // start of time
633}
634
635const char* ConcurrentMarkSweepGeneration::name() const {
636  return "concurrent mark-sweep generation";
637}
638void ConcurrentMarkSweepGeneration::update_counters() {
639  if (UsePerfData) {
640    _space_counters->update_all();
641    _gen_counters->update_all();
642  }
643}
644
645// this is an optimized version of update_counters(). it takes the
646// used value as a parameter rather than computing it.
647//
648void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
649  if (UsePerfData) {
650    _space_counters->update_used(used);
651    _space_counters->update_capacity();
652    _gen_counters->update_all();
653  }
654}
655
656void ConcurrentMarkSweepGeneration::print() const {
657  Generation::print();
658  cmsSpace()->print();
659}
660
661#ifndef PRODUCT
662void ConcurrentMarkSweepGeneration::print_statistics() {
663  cmsSpace()->printFLCensus(0);
664}
665#endif
666
667size_t
668ConcurrentMarkSweepGeneration::contiguous_available() const {
669  // dld proposes an improvement in precision here. If the committed
670  // part of the space ends in a free block we should add that to
671  // uncommitted size in the calculation below. Will make this
672  // change later, staying with the approximation below for the
673  // time being. -- ysr.
674  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
675}
676
677size_t
678ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
679  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
680}
681
682size_t ConcurrentMarkSweepGeneration::max_available() const {
683  return free() + _virtual_space.uncommitted_size();
684}
685
686bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
687  size_t available = max_available();
688  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
689  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
690  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
691                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
692  return res;
693}
694
695// At a promotion failure dump information on block layout in heap
696// (cms old generation).
697void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
698  Log(gc, promotion) log;
699  if (log.is_trace()) {
700    ResourceMark rm;
701    cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
702  }
703}
704
705void ConcurrentMarkSweepGeneration::reset_after_compaction() {
706  // Clear the promotion information.  These pointers can be adjusted
707  // along with all the other pointers into the heap but
708  // compaction is expected to be a rare event with
709  // a heap using cms so don't do it without seeing the need.
710  for (uint i = 0; i < ParallelGCThreads; i++) {
711    _par_gc_thread_states[i]->promo.reset();
712  }
713}
714
715void ConcurrentMarkSweepGeneration::compute_new_size() {
716  assert_locked_or_safepoint(Heap_lock);
717
718  // If incremental collection failed, we just want to expand
719  // to the limit.
720  if (incremental_collection_failed()) {
721    clear_incremental_collection_failed();
722    grow_to_reserved();
723    return;
724  }
725
726  // The heap has been compacted but not reset yet.
727  // Any metric such as free() or used() will be incorrect.
728
729  CardGeneration::compute_new_size();
730
731  // Reset again after a possible resizing
732  if (did_compact()) {
733    cmsSpace()->reset_after_compaction();
734  }
735}
736
737void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
738  assert_locked_or_safepoint(Heap_lock);
739
740  // If incremental collection failed, we just want to expand
741  // to the limit.
742  if (incremental_collection_failed()) {
743    clear_incremental_collection_failed();
744    grow_to_reserved();
745    return;
746  }
747
748  double free_percentage = ((double) free()) / capacity();
749  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
750  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
751
752  // compute expansion delta needed for reaching desired free percentage
753  if (free_percentage < desired_free_percentage) {
754    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
755    assert(desired_capacity >= capacity(), "invalid expansion size");
756    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
757    Log(gc) log;
758    if (log.is_trace()) {
759      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
760      log.trace("From compute_new_size: ");
761      log.trace("  Free fraction %f", free_percentage);
762      log.trace("  Desired free fraction %f", desired_free_percentage);
763      log.trace("  Maximum free fraction %f", maximum_free_percentage);
764      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
765      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
766      GenCollectedHeap* gch = GenCollectedHeap::heap();
767      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
768      size_t young_size = gch->young_gen()->capacity();
769      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
770      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
771      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
772      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
773    }
774    // safe if expansion fails
775    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
776    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
777  } else {
778    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
779    assert(desired_capacity <= capacity(), "invalid expansion size");
780    size_t shrink_bytes = capacity() - desired_capacity;
781    // Don't shrink unless the delta is greater than the minimum shrink we want
782    if (shrink_bytes >= MinHeapDeltaBytes) {
783      shrink_free_list_by(shrink_bytes);
784    }
785  }
786}
787
788Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
789  return cmsSpace()->freelistLock();
790}
791
792HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
793  CMSSynchronousYieldRequest yr;
794  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
795  return have_lock_and_allocate(size, tlab);
796}
797
798HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
799                                                                bool   tlab /* ignored */) {
800  assert_lock_strong(freelistLock());
801  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
802  HeapWord* res = cmsSpace()->allocate(adjustedSize);
803  // Allocate the object live (grey) if the background collector has
804  // started marking. This is necessary because the marker may
805  // have passed this address and consequently this object will
806  // not otherwise be greyed and would be incorrectly swept up.
807  // Note that if this object contains references, the writing
808  // of those references will dirty the card containing this object
809  // allowing the object to be blackened (and its references scanned)
810  // either during a preclean phase or at the final checkpoint.
811  if (res != NULL) {
812    // We may block here with an uninitialized object with
813    // its mark-bit or P-bits not yet set. Such objects need
814    // to be safely navigable by block_start().
815    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
816    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
817    collector()->direct_allocated(res, adjustedSize);
818    _direct_allocated_words += adjustedSize;
819    // allocation counters
820    NOT_PRODUCT(
821      _numObjectsAllocated++;
822      _numWordsAllocated += (int)adjustedSize;
823    )
824  }
825  return res;
826}
827
828// In the case of direct allocation by mutators in a generation that
829// is being concurrently collected, the object must be allocated
830// live (grey) if the background collector has started marking.
831// This is necessary because the marker may
832// have passed this address and consequently this object will
833// not otherwise be greyed and would be incorrectly swept up.
834// Note that if this object contains references, the writing
835// of those references will dirty the card containing this object
836// allowing the object to be blackened (and its references scanned)
837// either during a preclean phase or at the final checkpoint.
838void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
839  assert(_markBitMap.covers(start, size), "Out of bounds");
840  if (_collectorState >= Marking) {
841    MutexLockerEx y(_markBitMap.lock(),
842                    Mutex::_no_safepoint_check_flag);
843    // [see comments preceding SweepClosure::do_blk() below for details]
844    //
845    // Can the P-bits be deleted now?  JJJ
846    //
847    // 1. need to mark the object as live so it isn't collected
848    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
849    // 3. need to mark the end of the object so marking, precleaning or sweeping
850    //    can skip over uninitialized or unparsable objects. An allocated
851    //    object is considered uninitialized for our purposes as long as
852    //    its klass word is NULL.  All old gen objects are parsable
853    //    as soon as they are initialized.)
854    _markBitMap.mark(start);          // object is live
855    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
856    _markBitMap.mark(start + size - 1);
857                                      // mark end of object
858  }
859  // check that oop looks uninitialized
860  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
861}
862
863void CMSCollector::promoted(bool par, HeapWord* start,
864                            bool is_obj_array, size_t obj_size) {
865  assert(_markBitMap.covers(start), "Out of bounds");
866  // See comment in direct_allocated() about when objects should
867  // be allocated live.
868  if (_collectorState >= Marking) {
869    // we already hold the marking bit map lock, taken in
870    // the prologue
871    if (par) {
872      _markBitMap.par_mark(start);
873    } else {
874      _markBitMap.mark(start);
875    }
876    // We don't need to mark the object as uninitialized (as
877    // in direct_allocated above) because this is being done with the
878    // world stopped and the object will be initialized by the
879    // time the marking, precleaning or sweeping get to look at it.
880    // But see the code for copying objects into the CMS generation,
881    // where we need to ensure that concurrent readers of the
882    // block offset table are able to safely navigate a block that
883    // is in flux from being free to being allocated (and in
884    // transition while being copied into) and subsequently
885    // becoming a bona-fide object when the copy/promotion is complete.
886    assert(SafepointSynchronize::is_at_safepoint(),
887           "expect promotion only at safepoints");
888
889    if (_collectorState < Sweeping) {
890      // Mark the appropriate cards in the modUnionTable, so that
891      // this object gets scanned before the sweep. If this is
892      // not done, CMS generation references in the object might
893      // not get marked.
894      // For the case of arrays, which are otherwise precisely
895      // marked, we need to dirty the entire array, not just its head.
896      if (is_obj_array) {
897        // The [par_]mark_range() method expects mr.end() below to
898        // be aligned to the granularity of a bit's representation
899        // in the heap. In the case of the MUT below, that's a
900        // card size.
901        MemRegion mr(start,
902                     (HeapWord*)round_to((intptr_t)(start + obj_size),
903                        CardTableModRefBS::card_size /* bytes */));
904        if (par) {
905          _modUnionTable.par_mark_range(mr);
906        } else {
907          _modUnionTable.mark_range(mr);
908        }
909      } else {  // not an obj array; we can just mark the head
910        if (par) {
911          _modUnionTable.par_mark(start);
912        } else {
913          _modUnionTable.mark(start);
914        }
915      }
916    }
917  }
918}
919
920oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
921  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
922  // allocate, copy and if necessary update promoinfo --
923  // delegate to underlying space.
924  assert_lock_strong(freelistLock());
925
926#ifndef PRODUCT
927  if (GenCollectedHeap::heap()->promotion_should_fail()) {
928    return NULL;
929  }
930#endif  // #ifndef PRODUCT
931
932  oop res = _cmsSpace->promote(obj, obj_size);
933  if (res == NULL) {
934    // expand and retry
935    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
936    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
937    // Since this is the old generation, we don't try to promote
938    // into a more senior generation.
939    res = _cmsSpace->promote(obj, obj_size);
940  }
941  if (res != NULL) {
942    // See comment in allocate() about when objects should
943    // be allocated live.
944    assert(obj->is_oop(), "Will dereference klass pointer below");
945    collector()->promoted(false,           // Not parallel
946                          (HeapWord*)res, obj->is_objArray(), obj_size);
947    // promotion counters
948    NOT_PRODUCT(
949      _numObjectsPromoted++;
950      _numWordsPromoted +=
951        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
952    )
953  }
954  return res;
955}
956
957
958// IMPORTANT: Notes on object size recognition in CMS.
959// ---------------------------------------------------
960// A block of storage in the CMS generation is always in
961// one of three states. A free block (FREE), an allocated
962// object (OBJECT) whose size() method reports the correct size,
963// and an intermediate state (TRANSIENT) in which its size cannot
964// be accurately determined.
965// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
966// -----------------------------------------------------
967// FREE:      klass_word & 1 == 1; mark_word holds block size
968//
969// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
970//            obj->size() computes correct size
971//
972// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
973//
974// STATE IDENTIFICATION: (64 bit+COOPS)
975// ------------------------------------
976// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
977//
978// OBJECT:    klass_word installed; klass_word != 0;
979//            obj->size() computes correct size
980//
981// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
982//
983//
984// STATE TRANSITION DIAGRAM
985//
986//        mut / parnew                     mut  /  parnew
987// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
988//  ^                                                                   |
989//  |------------------------ DEAD <------------------------------------|
990//         sweep                            mut
991//
992// While a block is in TRANSIENT state its size cannot be determined
993// so readers will either need to come back later or stall until
994// the size can be determined. Note that for the case of direct
995// allocation, P-bits, when available, may be used to determine the
996// size of an object that may not yet have been initialized.
997
998// Things to support parallel young-gen collection.
999oop
1000ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1001                                           oop old, markOop m,
1002                                           size_t word_sz) {
1003#ifndef PRODUCT
1004  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1005    return NULL;
1006  }
1007#endif  // #ifndef PRODUCT
1008
1009  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1010  PromotionInfo* promoInfo = &ps->promo;
1011  // if we are tracking promotions, then first ensure space for
1012  // promotion (including spooling space for saving header if necessary).
1013  // then allocate and copy, then track promoted info if needed.
1014  // When tracking (see PromotionInfo::track()), the mark word may
1015  // be displaced and in this case restoration of the mark word
1016  // occurs in the (oop_since_save_marks_)iterate phase.
1017  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1018    // Out of space for allocating spooling buffers;
1019    // try expanding and allocating spooling buffers.
1020    if (!expand_and_ensure_spooling_space(promoInfo)) {
1021      return NULL;
1022    }
1023  }
1024  assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1025  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1026  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1027  if (obj_ptr == NULL) {
1028     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1029     if (obj_ptr == NULL) {
1030       return NULL;
1031     }
1032  }
1033  oop obj = oop(obj_ptr);
1034  OrderAccess::storestore();
1035  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1036  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1037  // IMPORTANT: See note on object initialization for CMS above.
1038  // Otherwise, copy the object.  Here we must be careful to insert the
1039  // klass pointer last, since this marks the block as an allocated object.
1040  // Except with compressed oops it's the mark word.
1041  HeapWord* old_ptr = (HeapWord*)old;
1042  // Restore the mark word copied above.
1043  obj->set_mark(m);
1044  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1045  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1046  OrderAccess::storestore();
1047
1048  if (UseCompressedClassPointers) {
1049    // Copy gap missed by (aligned) header size calculation below
1050    obj->set_klass_gap(old->klass_gap());
1051  }
1052  if (word_sz > (size_t)oopDesc::header_size()) {
1053    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1054                                 obj_ptr + oopDesc::header_size(),
1055                                 word_sz - oopDesc::header_size());
1056  }
1057
1058  // Now we can track the promoted object, if necessary.  We take care
1059  // to delay the transition from uninitialized to full object
1060  // (i.e., insertion of klass pointer) until after, so that it
1061  // atomically becomes a promoted object.
1062  if (promoInfo->tracking()) {
1063    promoInfo->track((PromotedObject*)obj, old->klass());
1064  }
1065  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1066  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1067  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1068
1069  // Finally, install the klass pointer (this should be volatile).
1070  OrderAccess::storestore();
1071  obj->set_klass(old->klass());
1072  // We should now be able to calculate the right size for this object
1073  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1074
1075  collector()->promoted(true,          // parallel
1076                        obj_ptr, old->is_objArray(), word_sz);
1077
1078  NOT_PRODUCT(
1079    Atomic::inc_ptr(&_numObjectsPromoted);
1080    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1081  )
1082
1083  return obj;
1084}
1085
1086void
1087ConcurrentMarkSweepGeneration::
1088par_promote_alloc_done(int thread_num) {
1089  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1090  ps->lab.retire(thread_num);
1091}
1092
1093void
1094ConcurrentMarkSweepGeneration::
1095par_oop_since_save_marks_iterate_done(int thread_num) {
1096  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1097  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1098  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1099
1100  // Because card-scanning has been completed, subsequent phases
1101  // (e.g., reference processing) will not need to recognize which
1102  // objects have been promoted during this GC. So, we can now disable
1103  // promotion tracking.
1104  ps->promo.stopTrackingPromotions();
1105}
1106
1107bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1108                                                   size_t size,
1109                                                   bool   tlab)
1110{
1111  // We allow a STW collection only if a full
1112  // collection was requested.
1113  return full || should_allocate(size, tlab); // FIX ME !!!
1114  // This and promotion failure handling are connected at the
1115  // hip and should be fixed by untying them.
1116}
1117
1118bool CMSCollector::shouldConcurrentCollect() {
1119  LogTarget(Trace, gc) log;
1120
1121  if (_full_gc_requested) {
1122    log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1123    return true;
1124  }
1125
1126  FreelistLocker x(this);
1127  // ------------------------------------------------------------------
1128  // Print out lots of information which affects the initiation of
1129  // a collection.
1130  if (log.is_enabled() && stats().valid()) {
1131    log.print("CMSCollector shouldConcurrentCollect: ");
1132
1133    LogStream out(log);
1134    stats().print_on(&out);
1135
1136    log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1137    log.print("free=" SIZE_FORMAT, _cmsGen->free());
1138    log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1139    log.print("promotion_rate=%g", stats().promotion_rate());
1140    log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1141    log.print("occupancy=%3.7f", _cmsGen->occupancy());
1142    log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1143    log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1144    log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1145    log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1146  }
1147  // ------------------------------------------------------------------
1148
1149  // If the estimated time to complete a cms collection (cms_duration())
1150  // is less than the estimated time remaining until the cms generation
1151  // is full, start a collection.
1152  if (!UseCMSInitiatingOccupancyOnly) {
1153    if (stats().valid()) {
1154      if (stats().time_until_cms_start() == 0.0) {
1155        return true;
1156      }
1157    } else {
1158      // We want to conservatively collect somewhat early in order
1159      // to try and "bootstrap" our CMS/promotion statistics;
1160      // this branch will not fire after the first successful CMS
1161      // collection because the stats should then be valid.
1162      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1163        log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1164                  _cmsGen->occupancy(), _bootstrap_occupancy);
1165        return true;
1166      }
1167    }
1168  }
1169
1170  // Otherwise, we start a collection cycle if
1171  // old gen want a collection cycle started. Each may use
1172  // an appropriate criterion for making this decision.
1173  // XXX We need to make sure that the gen expansion
1174  // criterion dovetails well with this. XXX NEED TO FIX THIS
1175  if (_cmsGen->should_concurrent_collect()) {
1176    log.print("CMS old gen initiated");
1177    return true;
1178  }
1179
1180  // We start a collection if we believe an incremental collection may fail;
1181  // this is not likely to be productive in practice because it's probably too
1182  // late anyway.
1183  GenCollectedHeap* gch = GenCollectedHeap::heap();
1184  assert(gch->collector_policy()->is_generation_policy(),
1185         "You may want to check the correctness of the following");
1186  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1187    log.print("CMSCollector: collect because incremental collection will fail ");
1188    return true;
1189  }
1190
1191  if (MetaspaceGC::should_concurrent_collect()) {
1192    log.print("CMSCollector: collect for metadata allocation ");
1193    return true;
1194  }
1195
1196  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1197  if (CMSTriggerInterval >= 0) {
1198    if (CMSTriggerInterval == 0) {
1199      // Trigger always
1200      return true;
1201    }
1202
1203    // Check the CMS time since begin (we do not check the stats validity
1204    // as we want to be able to trigger the first CMS cycle as well)
1205    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1206      if (stats().valid()) {
1207        log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1208                  stats().cms_time_since_begin());
1209      } else {
1210        log.print("CMSCollector: collect because of trigger interval (first collection)");
1211      }
1212      return true;
1213    }
1214  }
1215
1216  return false;
1217}
1218
1219void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1220
1221// Clear _expansion_cause fields of constituent generations
1222void CMSCollector::clear_expansion_cause() {
1223  _cmsGen->clear_expansion_cause();
1224}
1225
1226// We should be conservative in starting a collection cycle.  To
1227// start too eagerly runs the risk of collecting too often in the
1228// extreme.  To collect too rarely falls back on full collections,
1229// which works, even if not optimum in terms of concurrent work.
1230// As a work around for too eagerly collecting, use the flag
1231// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1232// giving the user an easily understandable way of controlling the
1233// collections.
1234// We want to start a new collection cycle if any of the following
1235// conditions hold:
1236// . our current occupancy exceeds the configured initiating occupancy
1237//   for this generation, or
1238// . we recently needed to expand this space and have not, since that
1239//   expansion, done a collection of this generation, or
1240// . the underlying space believes that it may be a good idea to initiate
1241//   a concurrent collection (this may be based on criteria such as the
1242//   following: the space uses linear allocation and linear allocation is
1243//   going to fail, or there is believed to be excessive fragmentation in
1244//   the generation, etc... or ...
1245// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1246//   the case of the old generation; see CR 6543076):
1247//   we may be approaching a point at which allocation requests may fail because
1248//   we will be out of sufficient free space given allocation rate estimates.]
1249bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1250
1251  assert_lock_strong(freelistLock());
1252  if (occupancy() > initiating_occupancy()) {
1253    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1254                  short_name(), occupancy(), initiating_occupancy());
1255    return true;
1256  }
1257  if (UseCMSInitiatingOccupancyOnly) {
1258    return false;
1259  }
1260  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1261    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1262    return true;
1263  }
1264  return false;
1265}
1266
1267void ConcurrentMarkSweepGeneration::collect(bool   full,
1268                                            bool   clear_all_soft_refs,
1269                                            size_t size,
1270                                            bool   tlab)
1271{
1272  collector()->collect(full, clear_all_soft_refs, size, tlab);
1273}
1274
1275void CMSCollector::collect(bool   full,
1276                           bool   clear_all_soft_refs,
1277                           size_t size,
1278                           bool   tlab)
1279{
1280  // The following "if" branch is present for defensive reasons.
1281  // In the current uses of this interface, it can be replaced with:
1282  // assert(!GCLocker.is_active(), "Can't be called otherwise");
1283  // But I am not placing that assert here to allow future
1284  // generality in invoking this interface.
1285  if (GCLocker::is_active()) {
1286    // A consistency test for GCLocker
1287    assert(GCLocker::needs_gc(), "Should have been set already");
1288    // Skip this foreground collection, instead
1289    // expanding the heap if necessary.
1290    // Need the free list locks for the call to free() in compute_new_size()
1291    compute_new_size();
1292    return;
1293  }
1294  acquire_control_and_collect(full, clear_all_soft_refs);
1295}
1296
1297void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1298  GenCollectedHeap* gch = GenCollectedHeap::heap();
1299  unsigned int gc_count = gch->total_full_collections();
1300  if (gc_count == full_gc_count) {
1301    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1302    _full_gc_requested = true;
1303    _full_gc_cause = cause;
1304    CGC_lock->notify();   // nudge CMS thread
1305  } else {
1306    assert(gc_count > full_gc_count, "Error: causal loop");
1307  }
1308}
1309
1310bool CMSCollector::is_external_interruption() {
1311  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1312  return GCCause::is_user_requested_gc(cause) ||
1313         GCCause::is_serviceability_requested_gc(cause);
1314}
1315
1316void CMSCollector::report_concurrent_mode_interruption() {
1317  if (is_external_interruption()) {
1318    log_debug(gc)("Concurrent mode interrupted");
1319  } else {
1320    log_debug(gc)("Concurrent mode failure");
1321    _gc_tracer_cm->report_concurrent_mode_failure();
1322  }
1323}
1324
1325
1326// The foreground and background collectors need to coordinate in order
1327// to make sure that they do not mutually interfere with CMS collections.
1328// When a background collection is active,
1329// the foreground collector may need to take over (preempt) and
1330// synchronously complete an ongoing collection. Depending on the
1331// frequency of the background collections and the heap usage
1332// of the application, this preemption can be seldom or frequent.
1333// There are only certain
1334// points in the background collection that the "collection-baton"
1335// can be passed to the foreground collector.
1336//
1337// The foreground collector will wait for the baton before
1338// starting any part of the collection.  The foreground collector
1339// will only wait at one location.
1340//
1341// The background collector will yield the baton before starting a new
1342// phase of the collection (e.g., before initial marking, marking from roots,
1343// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1344// of the loop which switches the phases. The background collector does some
1345// of the phases (initial mark, final re-mark) with the world stopped.
1346// Because of locking involved in stopping the world,
1347// the foreground collector should not block waiting for the background
1348// collector when it is doing a stop-the-world phase.  The background
1349// collector will yield the baton at an additional point just before
1350// it enters a stop-the-world phase.  Once the world is stopped, the
1351// background collector checks the phase of the collection.  If the
1352// phase has not changed, it proceeds with the collection.  If the
1353// phase has changed, it skips that phase of the collection.  See
1354// the comments on the use of the Heap_lock in collect_in_background().
1355//
1356// Variable used in baton passing.
1357//   _foregroundGCIsActive - Set to true by the foreground collector when
1358//      it wants the baton.  The foreground clears it when it has finished
1359//      the collection.
1360//   _foregroundGCShouldWait - Set to true by the background collector
1361//        when it is running.  The foreground collector waits while
1362//      _foregroundGCShouldWait is true.
1363//  CGC_lock - monitor used to protect access to the above variables
1364//      and to notify the foreground and background collectors.
1365//  _collectorState - current state of the CMS collection.
1366//
1367// The foreground collector
1368//   acquires the CGC_lock
1369//   sets _foregroundGCIsActive
1370//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1371//     various locks acquired in preparation for the collection
1372//     are released so as not to block the background collector
1373//     that is in the midst of a collection
1374//   proceeds with the collection
1375//   clears _foregroundGCIsActive
1376//   returns
1377//
1378// The background collector in a loop iterating on the phases of the
1379//      collection
1380//   acquires the CGC_lock
1381//   sets _foregroundGCShouldWait
1382//   if _foregroundGCIsActive is set
1383//     clears _foregroundGCShouldWait, notifies _CGC_lock
1384//     waits on _CGC_lock for _foregroundGCIsActive to become false
1385//     and exits the loop.
1386//   otherwise
1387//     proceed with that phase of the collection
1388//     if the phase is a stop-the-world phase,
1389//       yield the baton once more just before enqueueing
1390//       the stop-world CMS operation (executed by the VM thread).
1391//   returns after all phases of the collection are done
1392//
1393
1394void CMSCollector::acquire_control_and_collect(bool full,
1395        bool clear_all_soft_refs) {
1396  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1397  assert(!Thread::current()->is_ConcurrentGC_thread(),
1398         "shouldn't try to acquire control from self!");
1399
1400  // Start the protocol for acquiring control of the
1401  // collection from the background collector (aka CMS thread).
1402  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1403         "VM thread should have CMS token");
1404  // Remember the possibly interrupted state of an ongoing
1405  // concurrent collection
1406  CollectorState first_state = _collectorState;
1407
1408  // Signal to a possibly ongoing concurrent collection that
1409  // we want to do a foreground collection.
1410  _foregroundGCIsActive = true;
1411
1412  // release locks and wait for a notify from the background collector
1413  // releasing the locks in only necessary for phases which
1414  // do yields to improve the granularity of the collection.
1415  assert_lock_strong(bitMapLock());
1416  // We need to lock the Free list lock for the space that we are
1417  // currently collecting.
1418  assert(haveFreelistLocks(), "Must be holding free list locks");
1419  bitMapLock()->unlock();
1420  releaseFreelistLocks();
1421  {
1422    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1423    if (_foregroundGCShouldWait) {
1424      // We are going to be waiting for action for the CMS thread;
1425      // it had better not be gone (for instance at shutdown)!
1426      assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1427             "CMS thread must be running");
1428      // Wait here until the background collector gives us the go-ahead
1429      ConcurrentMarkSweepThread::clear_CMS_flag(
1430        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1431      // Get a possibly blocked CMS thread going:
1432      //   Note that we set _foregroundGCIsActive true above,
1433      //   without protection of the CGC_lock.
1434      CGC_lock->notify();
1435      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1436             "Possible deadlock");
1437      while (_foregroundGCShouldWait) {
1438        // wait for notification
1439        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1440        // Possibility of delay/starvation here, since CMS token does
1441        // not know to give priority to VM thread? Actually, i think
1442        // there wouldn't be any delay/starvation, but the proof of
1443        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1444      }
1445      ConcurrentMarkSweepThread::set_CMS_flag(
1446        ConcurrentMarkSweepThread::CMS_vm_has_token);
1447    }
1448  }
1449  // The CMS_token is already held.  Get back the other locks.
1450  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1451         "VM thread should have CMS token");
1452  getFreelistLocks();
1453  bitMapLock()->lock_without_safepoint_check();
1454  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1455                       p2i(Thread::current()), first_state);
1456  log_debug(gc, state)("    gets control with state %d", _collectorState);
1457
1458  // Inform cms gen if this was due to partial collection failing.
1459  // The CMS gen may use this fact to determine its expansion policy.
1460  GenCollectedHeap* gch = GenCollectedHeap::heap();
1461  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1462    assert(!_cmsGen->incremental_collection_failed(),
1463           "Should have been noticed, reacted to and cleared");
1464    _cmsGen->set_incremental_collection_failed();
1465  }
1466
1467  if (first_state > Idling) {
1468    report_concurrent_mode_interruption();
1469  }
1470
1471  set_did_compact(true);
1472
1473  // If the collection is being acquired from the background
1474  // collector, there may be references on the discovered
1475  // references lists.  Abandon those references, since some
1476  // of them may have become unreachable after concurrent
1477  // discovery; the STW compacting collector will redo discovery
1478  // more precisely, without being subject to floating garbage.
1479  // Leaving otherwise unreachable references in the discovered
1480  // lists would require special handling.
1481  ref_processor()->disable_discovery();
1482  ref_processor()->abandon_partial_discovery();
1483  ref_processor()->verify_no_references_recorded();
1484
1485  if (first_state > Idling) {
1486    save_heap_summary();
1487  }
1488
1489  do_compaction_work(clear_all_soft_refs);
1490
1491  // Has the GC time limit been exceeded?
1492  size_t max_eden_size = _young_gen->max_eden_size();
1493  GCCause::Cause gc_cause = gch->gc_cause();
1494  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1495                                         _young_gen->eden()->used(),
1496                                         _cmsGen->max_capacity(),
1497                                         max_eden_size,
1498                                         full,
1499                                         gc_cause,
1500                                         gch->collector_policy());
1501
1502  // Reset the expansion cause, now that we just completed
1503  // a collection cycle.
1504  clear_expansion_cause();
1505  _foregroundGCIsActive = false;
1506  return;
1507}
1508
1509// Resize the tenured generation
1510// after obtaining the free list locks for the
1511// two generations.
1512void CMSCollector::compute_new_size() {
1513  assert_locked_or_safepoint(Heap_lock);
1514  FreelistLocker z(this);
1515  MetaspaceGC::compute_new_size();
1516  _cmsGen->compute_new_size_free_list();
1517}
1518
1519// A work method used by the foreground collector to do
1520// a mark-sweep-compact.
1521void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1522  GenCollectedHeap* gch = GenCollectedHeap::heap();
1523
1524  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1525  gc_timer->register_gc_start();
1526
1527  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1528  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1529
1530  gch->pre_full_gc_dump(gc_timer);
1531
1532  GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1533
1534  // Temporarily widen the span of the weak reference processing to
1535  // the entire heap.
1536  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1537  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1538  // Temporarily, clear the "is_alive_non_header" field of the
1539  // reference processor.
1540  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1541  // Temporarily make reference _processing_ single threaded (non-MT).
1542  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1543  // Temporarily make refs discovery atomic
1544  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1545  // Temporarily make reference _discovery_ single threaded (non-MT)
1546  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1547
1548  ref_processor()->set_enqueuing_is_done(false);
1549  ref_processor()->enable_discovery();
1550  ref_processor()->setup_policy(clear_all_soft_refs);
1551  // If an asynchronous collection finishes, the _modUnionTable is
1552  // all clear.  If we are assuming the collection from an asynchronous
1553  // collection, clear the _modUnionTable.
1554  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1555    "_modUnionTable should be clear if the baton was not passed");
1556  _modUnionTable.clear_all();
1557  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1558    "mod union for klasses should be clear if the baton was passed");
1559  _ct->klass_rem_set()->clear_mod_union();
1560
1561  // We must adjust the allocation statistics being maintained
1562  // in the free list space. We do so by reading and clearing
1563  // the sweep timer and updating the block flux rate estimates below.
1564  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1565  if (_inter_sweep_timer.is_active()) {
1566    _inter_sweep_timer.stop();
1567    // Note that we do not use this sample to update the _inter_sweep_estimate.
1568    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1569                                            _inter_sweep_estimate.padded_average(),
1570                                            _intra_sweep_estimate.padded_average());
1571  }
1572
1573  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1574  #ifdef ASSERT
1575    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1576    size_t free_size = cms_space->free();
1577    assert(free_size ==
1578           pointer_delta(cms_space->end(), cms_space->compaction_top())
1579           * HeapWordSize,
1580      "All the free space should be compacted into one chunk at top");
1581    assert(cms_space->dictionary()->total_chunk_size(
1582                                      debug_only(cms_space->freelistLock())) == 0 ||
1583           cms_space->totalSizeInIndexedFreeLists() == 0,
1584      "All the free space should be in a single chunk");
1585    size_t num = cms_space->totalCount();
1586    assert((free_size == 0 && num == 0) ||
1587           (free_size > 0  && (num == 1 || num == 2)),
1588         "There should be at most 2 free chunks after compaction");
1589  #endif // ASSERT
1590  _collectorState = Resetting;
1591  assert(_restart_addr == NULL,
1592         "Should have been NULL'd before baton was passed");
1593  reset_stw();
1594  _cmsGen->reset_after_compaction();
1595  _concurrent_cycles_since_last_unload = 0;
1596
1597  // Clear any data recorded in the PLAB chunk arrays.
1598  if (_survivor_plab_array != NULL) {
1599    reset_survivor_plab_arrays();
1600  }
1601
1602  // Adjust the per-size allocation stats for the next epoch.
1603  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1604  // Restart the "inter sweep timer" for the next epoch.
1605  _inter_sweep_timer.reset();
1606  _inter_sweep_timer.start();
1607
1608  // No longer a need to do a concurrent collection for Metaspace.
1609  MetaspaceGC::set_should_concurrent_collect(false);
1610
1611  gch->post_full_gc_dump(gc_timer);
1612
1613  gc_timer->register_gc_end();
1614
1615  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1616
1617  // For a mark-sweep-compact, compute_new_size() will be called
1618  // in the heap's do_collection() method.
1619}
1620
1621void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1622  Log(gc, heap) log;
1623  if (!log.is_trace()) {
1624    return;
1625  }
1626
1627  ContiguousSpace* eden_space = _young_gen->eden();
1628  ContiguousSpace* from_space = _young_gen->from();
1629  ContiguousSpace* to_space   = _young_gen->to();
1630  // Eden
1631  if (_eden_chunk_array != NULL) {
1632    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1633              p2i(eden_space->bottom()), p2i(eden_space->top()),
1634              p2i(eden_space->end()), eden_space->capacity());
1635    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1636              _eden_chunk_index, _eden_chunk_capacity);
1637    for (size_t i = 0; i < _eden_chunk_index; i++) {
1638      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1639    }
1640  }
1641  // Survivor
1642  if (_survivor_chunk_array != NULL) {
1643    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1644              p2i(from_space->bottom()), p2i(from_space->top()),
1645              p2i(from_space->end()), from_space->capacity());
1646    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1647              _survivor_chunk_index, _survivor_chunk_capacity);
1648    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1649      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1650    }
1651  }
1652}
1653
1654void CMSCollector::getFreelistLocks() const {
1655  // Get locks for all free lists in all generations that this
1656  // collector is responsible for
1657  _cmsGen->freelistLock()->lock_without_safepoint_check();
1658}
1659
1660void CMSCollector::releaseFreelistLocks() const {
1661  // Release locks for all free lists in all generations that this
1662  // collector is responsible for
1663  _cmsGen->freelistLock()->unlock();
1664}
1665
1666bool CMSCollector::haveFreelistLocks() const {
1667  // Check locks for all free lists in all generations that this
1668  // collector is responsible for
1669  assert_lock_strong(_cmsGen->freelistLock());
1670  PRODUCT_ONLY(ShouldNotReachHere());
1671  return true;
1672}
1673
1674// A utility class that is used by the CMS collector to
1675// temporarily "release" the foreground collector from its
1676// usual obligation to wait for the background collector to
1677// complete an ongoing phase before proceeding.
1678class ReleaseForegroundGC: public StackObj {
1679 private:
1680  CMSCollector* _c;
1681 public:
1682  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1683    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1684    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1685    // allow a potentially blocked foreground collector to proceed
1686    _c->_foregroundGCShouldWait = false;
1687    if (_c->_foregroundGCIsActive) {
1688      CGC_lock->notify();
1689    }
1690    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1691           "Possible deadlock");
1692  }
1693
1694  ~ReleaseForegroundGC() {
1695    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1696    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1697    _c->_foregroundGCShouldWait = true;
1698  }
1699};
1700
1701void CMSCollector::collect_in_background(GCCause::Cause cause) {
1702  assert(Thread::current()->is_ConcurrentGC_thread(),
1703    "A CMS asynchronous collection is only allowed on a CMS thread.");
1704
1705  GenCollectedHeap* gch = GenCollectedHeap::heap();
1706  {
1707    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1708    MutexLockerEx hl(Heap_lock, safepoint_check);
1709    FreelistLocker fll(this);
1710    MutexLockerEx x(CGC_lock, safepoint_check);
1711    if (_foregroundGCIsActive) {
1712      // The foreground collector is. Skip this
1713      // background collection.
1714      assert(!_foregroundGCShouldWait, "Should be clear");
1715      return;
1716    } else {
1717      assert(_collectorState == Idling, "Should be idling before start.");
1718      _collectorState = InitialMarking;
1719      register_gc_start(cause);
1720      // Reset the expansion cause, now that we are about to begin
1721      // a new cycle.
1722      clear_expansion_cause();
1723
1724      // Clear the MetaspaceGC flag since a concurrent collection
1725      // is starting but also clear it after the collection.
1726      MetaspaceGC::set_should_concurrent_collect(false);
1727    }
1728    // Decide if we want to enable class unloading as part of the
1729    // ensuing concurrent GC cycle.
1730    update_should_unload_classes();
1731    _full_gc_requested = false;           // acks all outstanding full gc requests
1732    _full_gc_cause = GCCause::_no_gc;
1733    // Signal that we are about to start a collection
1734    gch->increment_total_full_collections();  // ... starting a collection cycle
1735    _collection_count_start = gch->total_full_collections();
1736  }
1737
1738  size_t prev_used = _cmsGen->used();
1739
1740  // The change of the collection state is normally done at this level;
1741  // the exceptions are phases that are executed while the world is
1742  // stopped.  For those phases the change of state is done while the
1743  // world is stopped.  For baton passing purposes this allows the
1744  // background collector to finish the phase and change state atomically.
1745  // The foreground collector cannot wait on a phase that is done
1746  // while the world is stopped because the foreground collector already
1747  // has the world stopped and would deadlock.
1748  while (_collectorState != Idling) {
1749    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1750                         p2i(Thread::current()), _collectorState);
1751    // The foreground collector
1752    //   holds the Heap_lock throughout its collection.
1753    //   holds the CMS token (but not the lock)
1754    //     except while it is waiting for the background collector to yield.
1755    //
1756    // The foreground collector should be blocked (not for long)
1757    //   if the background collector is about to start a phase
1758    //   executed with world stopped.  If the background
1759    //   collector has already started such a phase, the
1760    //   foreground collector is blocked waiting for the
1761    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1762    //   are executed in the VM thread.
1763    //
1764    // The locking order is
1765    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1766    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1767    //   CMS token  (claimed in
1768    //                stop_world_and_do() -->
1769    //                  safepoint_synchronize() -->
1770    //                    CMSThread::synchronize())
1771
1772    {
1773      // Check if the FG collector wants us to yield.
1774      CMSTokenSync x(true); // is cms thread
1775      if (waitForForegroundGC()) {
1776        // We yielded to a foreground GC, nothing more to be
1777        // done this round.
1778        assert(_foregroundGCShouldWait == false, "We set it to false in "
1779               "waitForForegroundGC()");
1780        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1781                             p2i(Thread::current()), _collectorState);
1782        return;
1783      } else {
1784        // The background collector can run but check to see if the
1785        // foreground collector has done a collection while the
1786        // background collector was waiting to get the CGC_lock
1787        // above.  If yes, break so that _foregroundGCShouldWait
1788        // is cleared before returning.
1789        if (_collectorState == Idling) {
1790          break;
1791        }
1792      }
1793    }
1794
1795    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1796      "should be waiting");
1797
1798    switch (_collectorState) {
1799      case InitialMarking:
1800        {
1801          ReleaseForegroundGC x(this);
1802          stats().record_cms_begin();
1803          VM_CMS_Initial_Mark initial_mark_op(this);
1804          VMThread::execute(&initial_mark_op);
1805        }
1806        // The collector state may be any legal state at this point
1807        // since the background collector may have yielded to the
1808        // foreground collector.
1809        break;
1810      case Marking:
1811        // initial marking in checkpointRootsInitialWork has been completed
1812        if (markFromRoots()) { // we were successful
1813          assert(_collectorState == Precleaning, "Collector state should "
1814            "have changed");
1815        } else {
1816          assert(_foregroundGCIsActive, "Internal state inconsistency");
1817        }
1818        break;
1819      case Precleaning:
1820        // marking from roots in markFromRoots has been completed
1821        preclean();
1822        assert(_collectorState == AbortablePreclean ||
1823               _collectorState == FinalMarking,
1824               "Collector state should have changed");
1825        break;
1826      case AbortablePreclean:
1827        abortable_preclean();
1828        assert(_collectorState == FinalMarking, "Collector state should "
1829          "have changed");
1830        break;
1831      case FinalMarking:
1832        {
1833          ReleaseForegroundGC x(this);
1834
1835          VM_CMS_Final_Remark final_remark_op(this);
1836          VMThread::execute(&final_remark_op);
1837        }
1838        assert(_foregroundGCShouldWait, "block post-condition");
1839        break;
1840      case Sweeping:
1841        // final marking in checkpointRootsFinal has been completed
1842        sweep();
1843        assert(_collectorState == Resizing, "Collector state change "
1844          "to Resizing must be done under the free_list_lock");
1845
1846      case Resizing: {
1847        // Sweeping has been completed...
1848        // At this point the background collection has completed.
1849        // Don't move the call to compute_new_size() down
1850        // into code that might be executed if the background
1851        // collection was preempted.
1852        {
1853          ReleaseForegroundGC x(this);   // unblock FG collection
1854          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1855          CMSTokenSync        z(true);   // not strictly needed.
1856          if (_collectorState == Resizing) {
1857            compute_new_size();
1858            save_heap_summary();
1859            _collectorState = Resetting;
1860          } else {
1861            assert(_collectorState == Idling, "The state should only change"
1862                   " because the foreground collector has finished the collection");
1863          }
1864        }
1865        break;
1866      }
1867      case Resetting:
1868        // CMS heap resizing has been completed
1869        reset_concurrent();
1870        assert(_collectorState == Idling, "Collector state should "
1871          "have changed");
1872
1873        MetaspaceGC::set_should_concurrent_collect(false);
1874
1875        stats().record_cms_end();
1876        // Don't move the concurrent_phases_end() and compute_new_size()
1877        // calls to here because a preempted background collection
1878        // has it's state set to "Resetting".
1879        break;
1880      case Idling:
1881      default:
1882        ShouldNotReachHere();
1883        break;
1884    }
1885    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1886                         p2i(Thread::current()), _collectorState);
1887    assert(_foregroundGCShouldWait, "block post-condition");
1888  }
1889
1890  // Should this be in gc_epilogue?
1891  collector_policy()->counters()->update_counters();
1892
1893  {
1894    // Clear _foregroundGCShouldWait and, in the event that the
1895    // foreground collector is waiting, notify it, before
1896    // returning.
1897    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1898    _foregroundGCShouldWait = false;
1899    if (_foregroundGCIsActive) {
1900      CGC_lock->notify();
1901    }
1902    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1903           "Possible deadlock");
1904  }
1905  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1906                       p2i(Thread::current()), _collectorState);
1907  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1908                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1909}
1910
1911void CMSCollector::register_gc_start(GCCause::Cause cause) {
1912  _cms_start_registered = true;
1913  _gc_timer_cm->register_gc_start();
1914  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1915}
1916
1917void CMSCollector::register_gc_end() {
1918  if (_cms_start_registered) {
1919    report_heap_summary(GCWhen::AfterGC);
1920
1921    _gc_timer_cm->register_gc_end();
1922    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1923    _cms_start_registered = false;
1924  }
1925}
1926
1927void CMSCollector::save_heap_summary() {
1928  GenCollectedHeap* gch = GenCollectedHeap::heap();
1929  _last_heap_summary = gch->create_heap_summary();
1930  _last_metaspace_summary = gch->create_metaspace_summary();
1931}
1932
1933void CMSCollector::report_heap_summary(GCWhen::Type when) {
1934  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1935  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1936}
1937
1938bool CMSCollector::waitForForegroundGC() {
1939  bool res = false;
1940  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1941         "CMS thread should have CMS token");
1942  // Block the foreground collector until the
1943  // background collectors decides whether to
1944  // yield.
1945  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1946  _foregroundGCShouldWait = true;
1947  if (_foregroundGCIsActive) {
1948    // The background collector yields to the
1949    // foreground collector and returns a value
1950    // indicating that it has yielded.  The foreground
1951    // collector can proceed.
1952    res = true;
1953    _foregroundGCShouldWait = false;
1954    ConcurrentMarkSweepThread::clear_CMS_flag(
1955      ConcurrentMarkSweepThread::CMS_cms_has_token);
1956    ConcurrentMarkSweepThread::set_CMS_flag(
1957      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1958    // Get a possibly blocked foreground thread going
1959    CGC_lock->notify();
1960    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1961                         p2i(Thread::current()), _collectorState);
1962    while (_foregroundGCIsActive) {
1963      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1964    }
1965    ConcurrentMarkSweepThread::set_CMS_flag(
1966      ConcurrentMarkSweepThread::CMS_cms_has_token);
1967    ConcurrentMarkSweepThread::clear_CMS_flag(
1968      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1969  }
1970  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1971                       p2i(Thread::current()), _collectorState);
1972  return res;
1973}
1974
1975// Because of the need to lock the free lists and other structures in
1976// the collector, common to all the generations that the collector is
1977// collecting, we need the gc_prologues of individual CMS generations
1978// delegate to their collector. It may have been simpler had the
1979// current infrastructure allowed one to call a prologue on a
1980// collector. In the absence of that we have the generation's
1981// prologue delegate to the collector, which delegates back
1982// some "local" work to a worker method in the individual generations
1983// that it's responsible for collecting, while itself doing any
1984// work common to all generations it's responsible for. A similar
1985// comment applies to the  gc_epilogue()'s.
1986// The role of the variable _between_prologue_and_epilogue is to
1987// enforce the invocation protocol.
1988void CMSCollector::gc_prologue(bool full) {
1989  // Call gc_prologue_work() for the CMSGen
1990  // we are responsible for.
1991
1992  // The following locking discipline assumes that we are only called
1993  // when the world is stopped.
1994  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1995
1996  // The CMSCollector prologue must call the gc_prologues for the
1997  // "generations" that it's responsible
1998  // for.
1999
2000  assert(   Thread::current()->is_VM_thread()
2001         || (   CMSScavengeBeforeRemark
2002             && Thread::current()->is_ConcurrentGC_thread()),
2003         "Incorrect thread type for prologue execution");
2004
2005  if (_between_prologue_and_epilogue) {
2006    // We have already been invoked; this is a gc_prologue delegation
2007    // from yet another CMS generation that we are responsible for, just
2008    // ignore it since all relevant work has already been done.
2009    return;
2010  }
2011
2012  // set a bit saying prologue has been called; cleared in epilogue
2013  _between_prologue_and_epilogue = true;
2014  // Claim locks for common data structures, then call gc_prologue_work()
2015  // for each CMSGen.
2016
2017  getFreelistLocks();   // gets free list locks on constituent spaces
2018  bitMapLock()->lock_without_safepoint_check();
2019
2020  // Should call gc_prologue_work() for all cms gens we are responsible for
2021  bool duringMarking =    _collectorState >= Marking
2022                         && _collectorState < Sweeping;
2023
2024  // The young collections clear the modified oops state, which tells if
2025  // there are any modified oops in the class. The remark phase also needs
2026  // that information. Tell the young collection to save the union of all
2027  // modified klasses.
2028  if (duringMarking) {
2029    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2030  }
2031
2032  bool registerClosure = duringMarking;
2033
2034  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2035
2036  if (!full) {
2037    stats().record_gc0_begin();
2038  }
2039}
2040
2041void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2042
2043  _capacity_at_prologue = capacity();
2044  _used_at_prologue = used();
2045
2046  // We enable promotion tracking so that card-scanning can recognize
2047  // which objects have been promoted during this GC and skip them.
2048  for (uint i = 0; i < ParallelGCThreads; i++) {
2049    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2050  }
2051
2052  // Delegate to CMScollector which knows how to coordinate between
2053  // this and any other CMS generations that it is responsible for
2054  // collecting.
2055  collector()->gc_prologue(full);
2056}
2057
2058// This is a "private" interface for use by this generation's CMSCollector.
2059// Not to be called directly by any other entity (for instance,
2060// GenCollectedHeap, which calls the "public" gc_prologue method above).
2061void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2062  bool registerClosure, ModUnionClosure* modUnionClosure) {
2063  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2064  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2065    "Should be NULL");
2066  if (registerClosure) {
2067    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2068  }
2069  cmsSpace()->gc_prologue();
2070  // Clear stat counters
2071  NOT_PRODUCT(
2072    assert(_numObjectsPromoted == 0, "check");
2073    assert(_numWordsPromoted   == 0, "check");
2074    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2075                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2076    _numObjectsAllocated = 0;
2077    _numWordsAllocated   = 0;
2078  )
2079}
2080
2081void CMSCollector::gc_epilogue(bool full) {
2082  // The following locking discipline assumes that we are only called
2083  // when the world is stopped.
2084  assert(SafepointSynchronize::is_at_safepoint(),
2085         "world is stopped assumption");
2086
2087  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2088  // if linear allocation blocks need to be appropriately marked to allow the
2089  // the blocks to be parsable. We also check here whether we need to nudge the
2090  // CMS collector thread to start a new cycle (if it's not already active).
2091  assert(   Thread::current()->is_VM_thread()
2092         || (   CMSScavengeBeforeRemark
2093             && Thread::current()->is_ConcurrentGC_thread()),
2094         "Incorrect thread type for epilogue execution");
2095
2096  if (!_between_prologue_and_epilogue) {
2097    // We have already been invoked; this is a gc_epilogue delegation
2098    // from yet another CMS generation that we are responsible for, just
2099    // ignore it since all relevant work has already been done.
2100    return;
2101  }
2102  assert(haveFreelistLocks(), "must have freelist locks");
2103  assert_lock_strong(bitMapLock());
2104
2105  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2106
2107  _cmsGen->gc_epilogue_work(full);
2108
2109  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2110    // in case sampling was not already enabled, enable it
2111    _start_sampling = true;
2112  }
2113  // reset _eden_chunk_array so sampling starts afresh
2114  _eden_chunk_index = 0;
2115
2116  size_t cms_used   = _cmsGen->cmsSpace()->used();
2117
2118  // update performance counters - this uses a special version of
2119  // update_counters() that allows the utilization to be passed as a
2120  // parameter, avoiding multiple calls to used().
2121  //
2122  _cmsGen->update_counters(cms_used);
2123
2124  bitMapLock()->unlock();
2125  releaseFreelistLocks();
2126
2127  if (!CleanChunkPoolAsync) {
2128    Chunk::clean_chunk_pool();
2129  }
2130
2131  set_did_compact(false);
2132  _between_prologue_and_epilogue = false;  // ready for next cycle
2133}
2134
2135void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2136  collector()->gc_epilogue(full);
2137
2138  // When using ParNew, promotion tracking should have already been
2139  // disabled. However, the prologue (which enables promotion
2140  // tracking) and epilogue are called irrespective of the type of
2141  // GC. So they will also be called before and after Full GCs, during
2142  // which promotion tracking will not be explicitly disabled. So,
2143  // it's safer to also disable it here too (to be symmetric with
2144  // enabling it in the prologue).
2145  for (uint i = 0; i < ParallelGCThreads; i++) {
2146    _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2147  }
2148}
2149
2150void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2151  assert(!incremental_collection_failed(), "Should have been cleared");
2152  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2153  cmsSpace()->gc_epilogue();
2154    // Print stat counters
2155  NOT_PRODUCT(
2156    assert(_numObjectsAllocated == 0, "check");
2157    assert(_numWordsAllocated == 0, "check");
2158    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2159                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2160    _numObjectsPromoted = 0;
2161    _numWordsPromoted   = 0;
2162  )
2163
2164  // Call down the chain in contiguous_available needs the freelistLock
2165  // so print this out before releasing the freeListLock.
2166  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2167}
2168
2169#ifndef PRODUCT
2170bool CMSCollector::have_cms_token() {
2171  Thread* thr = Thread::current();
2172  if (thr->is_VM_thread()) {
2173    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2174  } else if (thr->is_ConcurrentGC_thread()) {
2175    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2176  } else if (thr->is_GC_task_thread()) {
2177    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2178           ParGCRareEvent_lock->owned_by_self();
2179  }
2180  return false;
2181}
2182
2183// Check reachability of the given heap address in CMS generation,
2184// treating all other generations as roots.
2185bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2186  // We could "guarantee" below, rather than assert, but I'll
2187  // leave these as "asserts" so that an adventurous debugger
2188  // could try this in the product build provided some subset of
2189  // the conditions were met, provided they were interested in the
2190  // results and knew that the computation below wouldn't interfere
2191  // with other concurrent computations mutating the structures
2192  // being read or written.
2193  assert(SafepointSynchronize::is_at_safepoint(),
2194         "Else mutations in object graph will make answer suspect");
2195  assert(have_cms_token(), "Should hold cms token");
2196  assert(haveFreelistLocks(), "must hold free list locks");
2197  assert_lock_strong(bitMapLock());
2198
2199  // Clear the marking bit map array before starting, but, just
2200  // for kicks, first report if the given address is already marked
2201  tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2202                _markBitMap.isMarked(addr) ? "" : " not");
2203
2204  if (verify_after_remark()) {
2205    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2206    bool result = verification_mark_bm()->isMarked(addr);
2207    tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2208                  result ? "IS" : "is NOT");
2209    return result;
2210  } else {
2211    tty->print_cr("Could not compute result");
2212    return false;
2213  }
2214}
2215#endif
2216
2217void
2218CMSCollector::print_on_error(outputStream* st) {
2219  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2220  if (collector != NULL) {
2221    CMSBitMap* bitmap = &collector->_markBitMap;
2222    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2223    bitmap->print_on_error(st, " Bits: ");
2224
2225    st->cr();
2226
2227    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2228    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2229    mut_bitmap->print_on_error(st, " Bits: ");
2230  }
2231}
2232
2233////////////////////////////////////////////////////////
2234// CMS Verification Support
2235////////////////////////////////////////////////////////
2236// Following the remark phase, the following invariant
2237// should hold -- each object in the CMS heap which is
2238// marked in markBitMap() should be marked in the verification_mark_bm().
2239
2240class VerifyMarkedClosure: public BitMapClosure {
2241  CMSBitMap* _marks;
2242  bool       _failed;
2243
2244 public:
2245  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2246
2247  bool do_bit(size_t offset) {
2248    HeapWord* addr = _marks->offsetToHeapWord(offset);
2249    if (!_marks->isMarked(addr)) {
2250      Log(gc, verify) log;
2251      ResourceMark rm;
2252      oop(addr)->print_on(log.error_stream());
2253      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2254      _failed = true;
2255    }
2256    return true;
2257  }
2258
2259  bool failed() { return _failed; }
2260};
2261
2262bool CMSCollector::verify_after_remark() {
2263  GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2264  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2265  static bool init = false;
2266
2267  assert(SafepointSynchronize::is_at_safepoint(),
2268         "Else mutations in object graph will make answer suspect");
2269  assert(have_cms_token(),
2270         "Else there may be mutual interference in use of "
2271         " verification data structures");
2272  assert(_collectorState > Marking && _collectorState <= Sweeping,
2273         "Else marking info checked here may be obsolete");
2274  assert(haveFreelistLocks(), "must hold free list locks");
2275  assert_lock_strong(bitMapLock());
2276
2277
2278  // Allocate marking bit map if not already allocated
2279  if (!init) { // first time
2280    if (!verification_mark_bm()->allocate(_span)) {
2281      return false;
2282    }
2283    init = true;
2284  }
2285
2286  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2287
2288  // Turn off refs discovery -- so we will be tracing through refs.
2289  // This is as intended, because by this time
2290  // GC must already have cleared any refs that need to be cleared,
2291  // and traced those that need to be marked; moreover,
2292  // the marking done here is not going to interfere in any
2293  // way with the marking information used by GC.
2294  NoRefDiscovery no_discovery(ref_processor());
2295
2296#if defined(COMPILER2) || INCLUDE_JVMCI
2297  DerivedPointerTableDeactivate dpt_deact;
2298#endif
2299
2300  // Clear any marks from a previous round
2301  verification_mark_bm()->clear_all();
2302  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2303  verify_work_stacks_empty();
2304
2305  GenCollectedHeap* gch = GenCollectedHeap::heap();
2306  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2307  // Update the saved marks which may affect the root scans.
2308  gch->save_marks();
2309
2310  if (CMSRemarkVerifyVariant == 1) {
2311    // In this first variant of verification, we complete
2312    // all marking, then check if the new marks-vector is
2313    // a subset of the CMS marks-vector.
2314    verify_after_remark_work_1();
2315  } else {
2316    guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2317    // In this second variant of verification, we flag an error
2318    // (i.e. an object reachable in the new marks-vector not reachable
2319    // in the CMS marks-vector) immediately, also indicating the
2320    // identify of an object (A) that references the unmarked object (B) --
2321    // presumably, a mutation to A failed to be picked up by preclean/remark?
2322    verify_after_remark_work_2();
2323  }
2324
2325  return true;
2326}
2327
2328void CMSCollector::verify_after_remark_work_1() {
2329  ResourceMark rm;
2330  HandleMark  hm;
2331  GenCollectedHeap* gch = GenCollectedHeap::heap();
2332
2333  // Get a clear set of claim bits for the roots processing to work with.
2334  ClassLoaderDataGraph::clear_claimed_marks();
2335
2336  // Mark from roots one level into CMS
2337  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2338  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2339
2340  {
2341    StrongRootsScope srs(1);
2342
2343    gch->gen_process_roots(&srs,
2344                           GenCollectedHeap::OldGen,
2345                           true,   // young gen as roots
2346                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2347                           should_unload_classes(),
2348                           &notOlder,
2349                           NULL,
2350                           NULL);
2351  }
2352
2353  // Now mark from the roots
2354  MarkFromRootsClosure markFromRootsClosure(this, _span,
2355    verification_mark_bm(), verification_mark_stack(),
2356    false /* don't yield */, true /* verifying */);
2357  assert(_restart_addr == NULL, "Expected pre-condition");
2358  verification_mark_bm()->iterate(&markFromRootsClosure);
2359  while (_restart_addr != NULL) {
2360    // Deal with stack overflow: by restarting at the indicated
2361    // address.
2362    HeapWord* ra = _restart_addr;
2363    markFromRootsClosure.reset(ra);
2364    _restart_addr = NULL;
2365    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2366  }
2367  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2368  verify_work_stacks_empty();
2369
2370  // Marking completed -- now verify that each bit marked in
2371  // verification_mark_bm() is also marked in markBitMap(); flag all
2372  // errors by printing corresponding objects.
2373  VerifyMarkedClosure vcl(markBitMap());
2374  verification_mark_bm()->iterate(&vcl);
2375  if (vcl.failed()) {
2376    Log(gc, verify) log;
2377    log.error("Failed marking verification after remark");
2378    ResourceMark rm;
2379    gch->print_on(log.error_stream());
2380    fatal("CMS: failed marking verification after remark");
2381  }
2382}
2383
2384class VerifyKlassOopsKlassClosure : public KlassClosure {
2385  class VerifyKlassOopsClosure : public OopClosure {
2386    CMSBitMap* _bitmap;
2387   public:
2388    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2389    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2390    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2391  } _oop_closure;
2392 public:
2393  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2394  void do_klass(Klass* k) {
2395    k->oops_do(&_oop_closure);
2396  }
2397};
2398
2399void CMSCollector::verify_after_remark_work_2() {
2400  ResourceMark rm;
2401  HandleMark  hm;
2402  GenCollectedHeap* gch = GenCollectedHeap::heap();
2403
2404  // Get a clear set of claim bits for the roots processing to work with.
2405  ClassLoaderDataGraph::clear_claimed_marks();
2406
2407  // Mark from roots one level into CMS
2408  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2409                                     markBitMap());
2410  CLDToOopClosure cld_closure(&notOlder, true);
2411
2412  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2413
2414  {
2415    StrongRootsScope srs(1);
2416
2417    gch->gen_process_roots(&srs,
2418                           GenCollectedHeap::OldGen,
2419                           true,   // young gen as roots
2420                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2421                           should_unload_classes(),
2422                           &notOlder,
2423                           NULL,
2424                           &cld_closure);
2425  }
2426
2427  // Now mark from the roots
2428  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2429    verification_mark_bm(), markBitMap(), verification_mark_stack());
2430  assert(_restart_addr == NULL, "Expected pre-condition");
2431  verification_mark_bm()->iterate(&markFromRootsClosure);
2432  while (_restart_addr != NULL) {
2433    // Deal with stack overflow: by restarting at the indicated
2434    // address.
2435    HeapWord* ra = _restart_addr;
2436    markFromRootsClosure.reset(ra);
2437    _restart_addr = NULL;
2438    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2439  }
2440  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2441  verify_work_stacks_empty();
2442
2443  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2444  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2445
2446  // Marking completed -- now verify that each bit marked in
2447  // verification_mark_bm() is also marked in markBitMap(); flag all
2448  // errors by printing corresponding objects.
2449  VerifyMarkedClosure vcl(markBitMap());
2450  verification_mark_bm()->iterate(&vcl);
2451  assert(!vcl.failed(), "Else verification above should not have succeeded");
2452}
2453
2454void ConcurrentMarkSweepGeneration::save_marks() {
2455  // delegate to CMS space
2456  cmsSpace()->save_marks();
2457}
2458
2459bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2460  return cmsSpace()->no_allocs_since_save_marks();
2461}
2462
2463#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2464                                                                \
2465void ConcurrentMarkSweepGeneration::                            \
2466oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2467  cl->set_generation(this);                                     \
2468  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2469  cl->reset_generation();                                       \
2470  save_marks();                                                 \
2471}
2472
2473ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2474
2475void
2476ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2477  if (freelistLock()->owned_by_self()) {
2478    Generation::oop_iterate(cl);
2479  } else {
2480    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2481    Generation::oop_iterate(cl);
2482  }
2483}
2484
2485void
2486ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2487  if (freelistLock()->owned_by_self()) {
2488    Generation::object_iterate(cl);
2489  } else {
2490    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2491    Generation::object_iterate(cl);
2492  }
2493}
2494
2495void
2496ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2497  if (freelistLock()->owned_by_self()) {
2498    Generation::safe_object_iterate(cl);
2499  } else {
2500    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2501    Generation::safe_object_iterate(cl);
2502  }
2503}
2504
2505void
2506ConcurrentMarkSweepGeneration::post_compact() {
2507}
2508
2509void
2510ConcurrentMarkSweepGeneration::prepare_for_verify() {
2511  // Fix the linear allocation blocks to look like free blocks.
2512
2513  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2514  // are not called when the heap is verified during universe initialization and
2515  // at vm shutdown.
2516  if (freelistLock()->owned_by_self()) {
2517    cmsSpace()->prepare_for_verify();
2518  } else {
2519    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2520    cmsSpace()->prepare_for_verify();
2521  }
2522}
2523
2524void
2525ConcurrentMarkSweepGeneration::verify() {
2526  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2527  // are not called when the heap is verified during universe initialization and
2528  // at vm shutdown.
2529  if (freelistLock()->owned_by_self()) {
2530    cmsSpace()->verify();
2531  } else {
2532    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2533    cmsSpace()->verify();
2534  }
2535}
2536
2537void CMSCollector::verify() {
2538  _cmsGen->verify();
2539}
2540
2541#ifndef PRODUCT
2542bool CMSCollector::overflow_list_is_empty() const {
2543  assert(_num_par_pushes >= 0, "Inconsistency");
2544  if (_overflow_list == NULL) {
2545    assert(_num_par_pushes == 0, "Inconsistency");
2546  }
2547  return _overflow_list == NULL;
2548}
2549
2550// The methods verify_work_stacks_empty() and verify_overflow_empty()
2551// merely consolidate assertion checks that appear to occur together frequently.
2552void CMSCollector::verify_work_stacks_empty() const {
2553  assert(_markStack.isEmpty(), "Marking stack should be empty");
2554  assert(overflow_list_is_empty(), "Overflow list should be empty");
2555}
2556
2557void CMSCollector::verify_overflow_empty() const {
2558  assert(overflow_list_is_empty(), "Overflow list should be empty");
2559  assert(no_preserved_marks(), "No preserved marks");
2560}
2561#endif // PRODUCT
2562
2563// Decide if we want to enable class unloading as part of the
2564// ensuing concurrent GC cycle. We will collect and
2565// unload classes if it's the case that:
2566// (1) an explicit gc request has been made and the flag
2567//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2568// (2) (a) class unloading is enabled at the command line, and
2569//     (b) old gen is getting really full
2570// NOTE: Provided there is no change in the state of the heap between
2571// calls to this method, it should have idempotent results. Moreover,
2572// its results should be monotonically increasing (i.e. going from 0 to 1,
2573// but not 1 to 0) between successive calls between which the heap was
2574// not collected. For the implementation below, it must thus rely on
2575// the property that concurrent_cycles_since_last_unload()
2576// will not decrease unless a collection cycle happened and that
2577// _cmsGen->is_too_full() are
2578// themselves also monotonic in that sense. See check_monotonicity()
2579// below.
2580void CMSCollector::update_should_unload_classes() {
2581  _should_unload_classes = false;
2582  // Condition 1 above
2583  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2584    _should_unload_classes = true;
2585  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2586    // Disjuncts 2.b.(i,ii,iii) above
2587    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2588                              CMSClassUnloadingMaxInterval)
2589                           || _cmsGen->is_too_full();
2590  }
2591}
2592
2593bool ConcurrentMarkSweepGeneration::is_too_full() const {
2594  bool res = should_concurrent_collect();
2595  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2596  return res;
2597}
2598
2599void CMSCollector::setup_cms_unloading_and_verification_state() {
2600  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2601                             || VerifyBeforeExit;
2602  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2603
2604  // We set the proper root for this CMS cycle here.
2605  if (should_unload_classes()) {   // Should unload classes this cycle
2606    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2607    set_verifying(should_verify);    // Set verification state for this cycle
2608    return;                            // Nothing else needs to be done at this time
2609  }
2610
2611  // Not unloading classes this cycle
2612  assert(!should_unload_classes(), "Inconsistency!");
2613
2614  // If we are not unloading classes then add SO_AllCodeCache to root
2615  // scanning options.
2616  add_root_scanning_option(rso);
2617
2618  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2619    set_verifying(true);
2620  } else if (verifying() && !should_verify) {
2621    // We were verifying, but some verification flags got disabled.
2622    set_verifying(false);
2623    // Exclude symbols, strings and code cache elements from root scanning to
2624    // reduce IM and RM pauses.
2625    remove_root_scanning_option(rso);
2626  }
2627}
2628
2629
2630#ifndef PRODUCT
2631HeapWord* CMSCollector::block_start(const void* p) const {
2632  const HeapWord* addr = (HeapWord*)p;
2633  if (_span.contains(p)) {
2634    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2635      return _cmsGen->cmsSpace()->block_start(p);
2636    }
2637  }
2638  return NULL;
2639}
2640#endif
2641
2642HeapWord*
2643ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2644                                                   bool   tlab,
2645                                                   bool   parallel) {
2646  CMSSynchronousYieldRequest yr;
2647  assert(!tlab, "Can't deal with TLAB allocation");
2648  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2649  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2650  if (GCExpandToAllocateDelayMillis > 0) {
2651    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2652  }
2653  return have_lock_and_allocate(word_size, tlab);
2654}
2655
2656void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2657    size_t bytes,
2658    size_t expand_bytes,
2659    CMSExpansionCause::Cause cause)
2660{
2661
2662  bool success = expand(bytes, expand_bytes);
2663
2664  // remember why we expanded; this information is used
2665  // by shouldConcurrentCollect() when making decisions on whether to start
2666  // a new CMS cycle.
2667  if (success) {
2668    set_expansion_cause(cause);
2669    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2670  }
2671}
2672
2673HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2674  HeapWord* res = NULL;
2675  MutexLocker x(ParGCRareEvent_lock);
2676  while (true) {
2677    // Expansion by some other thread might make alloc OK now:
2678    res = ps->lab.alloc(word_sz);
2679    if (res != NULL) return res;
2680    // If there's not enough expansion space available, give up.
2681    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2682      return NULL;
2683    }
2684    // Otherwise, we try expansion.
2685    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2686    // Now go around the loop and try alloc again;
2687    // A competing par_promote might beat us to the expansion space,
2688    // so we may go around the loop again if promotion fails again.
2689    if (GCExpandToAllocateDelayMillis > 0) {
2690      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2691    }
2692  }
2693}
2694
2695
2696bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2697  PromotionInfo* promo) {
2698  MutexLocker x(ParGCRareEvent_lock);
2699  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2700  while (true) {
2701    // Expansion by some other thread might make alloc OK now:
2702    if (promo->ensure_spooling_space()) {
2703      assert(promo->has_spooling_space(),
2704             "Post-condition of successful ensure_spooling_space()");
2705      return true;
2706    }
2707    // If there's not enough expansion space available, give up.
2708    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2709      return false;
2710    }
2711    // Otherwise, we try expansion.
2712    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2713    // Now go around the loop and try alloc again;
2714    // A competing allocation might beat us to the expansion space,
2715    // so we may go around the loop again if allocation fails again.
2716    if (GCExpandToAllocateDelayMillis > 0) {
2717      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2718    }
2719  }
2720}
2721
2722void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2723  // Only shrink if a compaction was done so that all the free space
2724  // in the generation is in a contiguous block at the end.
2725  if (did_compact()) {
2726    CardGeneration::shrink(bytes);
2727  }
2728}
2729
2730void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2731  assert_locked_or_safepoint(Heap_lock);
2732}
2733
2734void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2735  assert_locked_or_safepoint(Heap_lock);
2736  assert_lock_strong(freelistLock());
2737  log_trace(gc)("Shrinking of CMS not yet implemented");
2738  return;
2739}
2740
2741
2742// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2743// phases.
2744class CMSPhaseAccounting: public StackObj {
2745 public:
2746  CMSPhaseAccounting(CMSCollector *collector,
2747                     const char *title);
2748  ~CMSPhaseAccounting();
2749
2750 private:
2751  CMSCollector *_collector;
2752  const char *_title;
2753  GCTraceConcTime(Info, gc) _trace_time;
2754
2755 public:
2756  // Not MT-safe; so do not pass around these StackObj's
2757  // where they may be accessed by other threads.
2758  double wallclock_millis() {
2759    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2760  }
2761};
2762
2763CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2764                                       const char *title) :
2765  _collector(collector), _title(title), _trace_time(title) {
2766
2767  _collector->resetYields();
2768  _collector->resetTimer();
2769  _collector->startTimer();
2770  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2771}
2772
2773CMSPhaseAccounting::~CMSPhaseAccounting() {
2774  _collector->gc_timer_cm()->register_gc_concurrent_end();
2775  _collector->stopTimer();
2776  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2777  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2778}
2779
2780// CMS work
2781
2782// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2783class CMSParMarkTask : public AbstractGangTask {
2784 protected:
2785  CMSCollector*     _collector;
2786  uint              _n_workers;
2787  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2788      AbstractGangTask(name),
2789      _collector(collector),
2790      _n_workers(n_workers) {}
2791  // Work method in support of parallel rescan ... of young gen spaces
2792  void do_young_space_rescan(OopsInGenClosure* cl,
2793                             ContiguousSpace* space,
2794                             HeapWord** chunk_array, size_t chunk_top);
2795  void work_on_young_gen_roots(OopsInGenClosure* cl);
2796};
2797
2798// Parallel initial mark task
2799class CMSParInitialMarkTask: public CMSParMarkTask {
2800  StrongRootsScope* _strong_roots_scope;
2801 public:
2802  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2803      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2804      _strong_roots_scope(strong_roots_scope) {}
2805  void work(uint worker_id);
2806};
2807
2808// Checkpoint the roots into this generation from outside
2809// this generation. [Note this initial checkpoint need only
2810// be approximate -- we'll do a catch up phase subsequently.]
2811void CMSCollector::checkpointRootsInitial() {
2812  assert(_collectorState == InitialMarking, "Wrong collector state");
2813  check_correct_thread_executing();
2814  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2815
2816  save_heap_summary();
2817  report_heap_summary(GCWhen::BeforeGC);
2818
2819  ReferenceProcessor* rp = ref_processor();
2820  assert(_restart_addr == NULL, "Control point invariant");
2821  {
2822    // acquire locks for subsequent manipulations
2823    MutexLockerEx x(bitMapLock(),
2824                    Mutex::_no_safepoint_check_flag);
2825    checkpointRootsInitialWork();
2826    // enable ("weak") refs discovery
2827    rp->enable_discovery();
2828    _collectorState = Marking;
2829  }
2830}
2831
2832void CMSCollector::checkpointRootsInitialWork() {
2833  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2834  assert(_collectorState == InitialMarking, "just checking");
2835
2836  // Already have locks.
2837  assert_lock_strong(bitMapLock());
2838  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2839
2840  // Setup the verification and class unloading state for this
2841  // CMS collection cycle.
2842  setup_cms_unloading_and_verification_state();
2843
2844  GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2845
2846  // Reset all the PLAB chunk arrays if necessary.
2847  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2848    reset_survivor_plab_arrays();
2849  }
2850
2851  ResourceMark rm;
2852  HandleMark  hm;
2853
2854  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2855  GenCollectedHeap* gch = GenCollectedHeap::heap();
2856
2857  verify_work_stacks_empty();
2858  verify_overflow_empty();
2859
2860  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2861  // Update the saved marks which may affect the root scans.
2862  gch->save_marks();
2863
2864  // weak reference processing has not started yet.
2865  ref_processor()->set_enqueuing_is_done(false);
2866
2867  // Need to remember all newly created CLDs,
2868  // so that we can guarantee that the remark finds them.
2869  ClassLoaderDataGraph::remember_new_clds(true);
2870
2871  // Whenever a CLD is found, it will be claimed before proceeding to mark
2872  // the klasses. The claimed marks need to be cleared before marking starts.
2873  ClassLoaderDataGraph::clear_claimed_marks();
2874
2875  print_eden_and_survivor_chunk_arrays();
2876
2877  {
2878#if defined(COMPILER2) || INCLUDE_JVMCI
2879    DerivedPointerTableDeactivate dpt_deact;
2880#endif
2881    if (CMSParallelInitialMarkEnabled) {
2882      // The parallel version.
2883      WorkGang* workers = gch->workers();
2884      assert(workers != NULL, "Need parallel worker threads.");
2885      uint n_workers = workers->active_workers();
2886
2887      StrongRootsScope srs(n_workers);
2888
2889      CMSParInitialMarkTask tsk(this, &srs, n_workers);
2890      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2891      // If the total workers is greater than 1, then multiple workers
2892      // may be used at some time and the initialization has been set
2893      // such that the single threaded path cannot be used.
2894      if (workers->total_workers() > 1) {
2895        workers->run_task(&tsk);
2896      } else {
2897        tsk.work(0);
2898      }
2899    } else {
2900      // The serial version.
2901      CLDToOopClosure cld_closure(&notOlder, true);
2902      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2903
2904      StrongRootsScope srs(1);
2905
2906      gch->gen_process_roots(&srs,
2907                             GenCollectedHeap::OldGen,
2908                             true,   // young gen as roots
2909                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
2910                             should_unload_classes(),
2911                             &notOlder,
2912                             NULL,
2913                             &cld_closure);
2914    }
2915  }
2916
2917  // Clear mod-union table; it will be dirtied in the prologue of
2918  // CMS generation per each young generation collection.
2919
2920  assert(_modUnionTable.isAllClear(),
2921       "Was cleared in most recent final checkpoint phase"
2922       " or no bits are set in the gc_prologue before the start of the next "
2923       "subsequent marking phase.");
2924
2925  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2926
2927  // Save the end of the used_region of the constituent generations
2928  // to be used to limit the extent of sweep in each generation.
2929  save_sweep_limits();
2930  verify_overflow_empty();
2931}
2932
2933bool CMSCollector::markFromRoots() {
2934  // we might be tempted to assert that:
2935  // assert(!SafepointSynchronize::is_at_safepoint(),
2936  //        "inconsistent argument?");
2937  // However that wouldn't be right, because it's possible that
2938  // a safepoint is indeed in progress as a young generation
2939  // stop-the-world GC happens even as we mark in this generation.
2940  assert(_collectorState == Marking, "inconsistent state?");
2941  check_correct_thread_executing();
2942  verify_overflow_empty();
2943
2944  // Weak ref discovery note: We may be discovering weak
2945  // refs in this generation concurrent (but interleaved) with
2946  // weak ref discovery by the young generation collector.
2947
2948  CMSTokenSyncWithLocks ts(true, bitMapLock());
2949  GCTraceCPUTime tcpu;
2950  CMSPhaseAccounting pa(this, "Concurrent Mark");
2951  bool res = markFromRootsWork();
2952  if (res) {
2953    _collectorState = Precleaning;
2954  } else { // We failed and a foreground collection wants to take over
2955    assert(_foregroundGCIsActive, "internal state inconsistency");
2956    assert(_restart_addr == NULL,  "foreground will restart from scratch");
2957    log_debug(gc)("bailing out to foreground collection");
2958  }
2959  verify_overflow_empty();
2960  return res;
2961}
2962
2963bool CMSCollector::markFromRootsWork() {
2964  // iterate over marked bits in bit map, doing a full scan and mark
2965  // from these roots using the following algorithm:
2966  // . if oop is to the right of the current scan pointer,
2967  //   mark corresponding bit (we'll process it later)
2968  // . else (oop is to left of current scan pointer)
2969  //   push oop on marking stack
2970  // . drain the marking stack
2971
2972  // Note that when we do a marking step we need to hold the
2973  // bit map lock -- recall that direct allocation (by mutators)
2974  // and promotion (by the young generation collector) is also
2975  // marking the bit map. [the so-called allocate live policy.]
2976  // Because the implementation of bit map marking is not
2977  // robust wrt simultaneous marking of bits in the same word,
2978  // we need to make sure that there is no such interference
2979  // between concurrent such updates.
2980
2981  // already have locks
2982  assert_lock_strong(bitMapLock());
2983
2984  verify_work_stacks_empty();
2985  verify_overflow_empty();
2986  bool result = false;
2987  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2988    result = do_marking_mt();
2989  } else {
2990    result = do_marking_st();
2991  }
2992  return result;
2993}
2994
2995// Forward decl
2996class CMSConcMarkingTask;
2997
2998class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2999  CMSCollector*       _collector;
3000  CMSConcMarkingTask* _task;
3001 public:
3002  virtual void yield();
3003
3004  // "n_threads" is the number of threads to be terminated.
3005  // "queue_set" is a set of work queues of other threads.
3006  // "collector" is the CMS collector associated with this task terminator.
3007  // "yield" indicates whether we need the gang as a whole to yield.
3008  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3009    ParallelTaskTerminator(n_threads, queue_set),
3010    _collector(collector) { }
3011
3012  void set_task(CMSConcMarkingTask* task) {
3013    _task = task;
3014  }
3015};
3016
3017class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3018  CMSConcMarkingTask* _task;
3019 public:
3020  bool should_exit_termination();
3021  void set_task(CMSConcMarkingTask* task) {
3022    _task = task;
3023  }
3024};
3025
3026// MT Concurrent Marking Task
3027class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3028  CMSCollector* _collector;
3029  uint          _n_workers;       // requested/desired # workers
3030  bool          _result;
3031  CompactibleFreeListSpace*  _cms_space;
3032  char          _pad_front[64];   // padding to ...
3033  HeapWord*     _global_finger;   // ... avoid sharing cache line
3034  char          _pad_back[64];
3035  HeapWord*     _restart_addr;
3036
3037  //  Exposed here for yielding support
3038  Mutex* const _bit_map_lock;
3039
3040  // The per thread work queues, available here for stealing
3041  OopTaskQueueSet*  _task_queues;
3042
3043  // Termination (and yielding) support
3044  CMSConcMarkingTerminator _term;
3045  CMSConcMarkingTerminatorTerminator _term_term;
3046
3047 public:
3048  CMSConcMarkingTask(CMSCollector* collector,
3049                 CompactibleFreeListSpace* cms_space,
3050                 YieldingFlexibleWorkGang* workers,
3051                 OopTaskQueueSet* task_queues):
3052    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3053    _collector(collector),
3054    _cms_space(cms_space),
3055    _n_workers(0), _result(true),
3056    _task_queues(task_queues),
3057    _term(_n_workers, task_queues, _collector),
3058    _bit_map_lock(collector->bitMapLock())
3059  {
3060    _requested_size = _n_workers;
3061    _term.set_task(this);
3062    _term_term.set_task(this);
3063    _restart_addr = _global_finger = _cms_space->bottom();
3064  }
3065
3066
3067  OopTaskQueueSet* task_queues()  { return _task_queues; }
3068
3069  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3070
3071  HeapWord** global_finger_addr() { return &_global_finger; }
3072
3073  CMSConcMarkingTerminator* terminator() { return &_term; }
3074
3075  virtual void set_for_termination(uint active_workers) {
3076    terminator()->reset_for_reuse(active_workers);
3077  }
3078
3079  void work(uint worker_id);
3080  bool should_yield() {
3081    return    ConcurrentMarkSweepThread::should_yield()
3082           && !_collector->foregroundGCIsActive();
3083  }
3084
3085  virtual void coordinator_yield();  // stuff done by coordinator
3086  bool result() { return _result; }
3087
3088  void reset(HeapWord* ra) {
3089    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3090    _restart_addr = _global_finger = ra;
3091    _term.reset_for_reuse();
3092  }
3093
3094  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3095                                           OopTaskQueue* work_q);
3096
3097 private:
3098  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3099  void do_work_steal(int i);
3100  void bump_global_finger(HeapWord* f);
3101};
3102
3103bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3104  assert(_task != NULL, "Error");
3105  return _task->yielding();
3106  // Note that we do not need the disjunct || _task->should_yield() above
3107  // because we want terminating threads to yield only if the task
3108  // is already in the midst of yielding, which happens only after at least one
3109  // thread has yielded.
3110}
3111
3112void CMSConcMarkingTerminator::yield() {
3113  if (_task->should_yield()) {
3114    _task->yield();
3115  } else {
3116    ParallelTaskTerminator::yield();
3117  }
3118}
3119
3120////////////////////////////////////////////////////////////////
3121// Concurrent Marking Algorithm Sketch
3122////////////////////////////////////////////////////////////////
3123// Until all tasks exhausted (both spaces):
3124// -- claim next available chunk
3125// -- bump global finger via CAS
3126// -- find first object that starts in this chunk
3127//    and start scanning bitmap from that position
3128// -- scan marked objects for oops
3129// -- CAS-mark target, and if successful:
3130//    . if target oop is above global finger (volatile read)
3131//      nothing to do
3132//    . if target oop is in chunk and above local finger
3133//        then nothing to do
3134//    . else push on work-queue
3135// -- Deal with possible overflow issues:
3136//    . local work-queue overflow causes stuff to be pushed on
3137//      global (common) overflow queue
3138//    . always first empty local work queue
3139//    . then get a batch of oops from global work queue if any
3140//    . then do work stealing
3141// -- When all tasks claimed (both spaces)
3142//    and local work queue empty,
3143//    then in a loop do:
3144//    . check global overflow stack; steal a batch of oops and trace
3145//    . try to steal from other threads oif GOS is empty
3146//    . if neither is available, offer termination
3147// -- Terminate and return result
3148//
3149void CMSConcMarkingTask::work(uint worker_id) {
3150  elapsedTimer _timer;
3151  ResourceMark rm;
3152  HandleMark hm;
3153
3154  DEBUG_ONLY(_collector->verify_overflow_empty();)
3155
3156  // Before we begin work, our work queue should be empty
3157  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3158  // Scan the bitmap covering _cms_space, tracing through grey objects.
3159  _timer.start();
3160  do_scan_and_mark(worker_id, _cms_space);
3161  _timer.stop();
3162  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3163
3164  // ... do work stealing
3165  _timer.reset();
3166  _timer.start();
3167  do_work_steal(worker_id);
3168  _timer.stop();
3169  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3170  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3171  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3172  // Note that under the current task protocol, the
3173  // following assertion is true even of the spaces
3174  // expanded since the completion of the concurrent
3175  // marking. XXX This will likely change under a strict
3176  // ABORT semantics.
3177  // After perm removal the comparison was changed to
3178  // greater than or equal to from strictly greater than.
3179  // Before perm removal the highest address sweep would
3180  // have been at the end of perm gen but now is at the
3181  // end of the tenured gen.
3182  assert(_global_finger >=  _cms_space->end(),
3183         "All tasks have been completed");
3184  DEBUG_ONLY(_collector->verify_overflow_empty();)
3185}
3186
3187void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3188  HeapWord* read = _global_finger;
3189  HeapWord* cur  = read;
3190  while (f > read) {
3191    cur = read;
3192    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3193    if (cur == read) {
3194      // our cas succeeded
3195      assert(_global_finger >= f, "protocol consistency");
3196      break;
3197    }
3198  }
3199}
3200
3201// This is really inefficient, and should be redone by
3202// using (not yet available) block-read and -write interfaces to the
3203// stack and the work_queue. XXX FIX ME !!!
3204bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3205                                                      OopTaskQueue* work_q) {
3206  // Fast lock-free check
3207  if (ovflw_stk->length() == 0) {
3208    return false;
3209  }
3210  assert(work_q->size() == 0, "Shouldn't steal");
3211  MutexLockerEx ml(ovflw_stk->par_lock(),
3212                   Mutex::_no_safepoint_check_flag);
3213  // Grab up to 1/4 the size of the work queue
3214  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3215                    (size_t)ParGCDesiredObjsFromOverflowList);
3216  num = MIN2(num, ovflw_stk->length());
3217  for (int i = (int) num; i > 0; i--) {
3218    oop cur = ovflw_stk->pop();
3219    assert(cur != NULL, "Counted wrong?");
3220    work_q->push(cur);
3221  }
3222  return num > 0;
3223}
3224
3225void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3226  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3227  int n_tasks = pst->n_tasks();
3228  // We allow that there may be no tasks to do here because
3229  // we are restarting after a stack overflow.
3230  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3231  uint nth_task = 0;
3232
3233  HeapWord* aligned_start = sp->bottom();
3234  if (sp->used_region().contains(_restart_addr)) {
3235    // Align down to a card boundary for the start of 0th task
3236    // for this space.
3237    aligned_start =
3238      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3239                                 CardTableModRefBS::card_size);
3240  }
3241
3242  size_t chunk_size = sp->marking_task_size();
3243  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3244    // Having claimed the nth task in this space,
3245    // compute the chunk that it corresponds to:
3246    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3247                               aligned_start + (nth_task+1)*chunk_size);
3248    // Try and bump the global finger via a CAS;
3249    // note that we need to do the global finger bump
3250    // _before_ taking the intersection below, because
3251    // the task corresponding to that region will be
3252    // deemed done even if the used_region() expands
3253    // because of allocation -- as it almost certainly will
3254    // during start-up while the threads yield in the
3255    // closure below.
3256    HeapWord* finger = span.end();
3257    bump_global_finger(finger);   // atomically
3258    // There are null tasks here corresponding to chunks
3259    // beyond the "top" address of the space.
3260    span = span.intersection(sp->used_region());
3261    if (!span.is_empty()) {  // Non-null task
3262      HeapWord* prev_obj;
3263      assert(!span.contains(_restart_addr) || nth_task == 0,
3264             "Inconsistency");
3265      if (nth_task == 0) {
3266        // For the 0th task, we'll not need to compute a block_start.
3267        if (span.contains(_restart_addr)) {
3268          // In the case of a restart because of stack overflow,
3269          // we might additionally skip a chunk prefix.
3270          prev_obj = _restart_addr;
3271        } else {
3272          prev_obj = span.start();
3273        }
3274      } else {
3275        // We want to skip the first object because
3276        // the protocol is to scan any object in its entirety
3277        // that _starts_ in this span; a fortiori, any
3278        // object starting in an earlier span is scanned
3279        // as part of an earlier claimed task.
3280        // Below we use the "careful" version of block_start
3281        // so we do not try to navigate uninitialized objects.
3282        prev_obj = sp->block_start_careful(span.start());
3283        // Below we use a variant of block_size that uses the
3284        // Printezis bits to avoid waiting for allocated
3285        // objects to become initialized/parsable.
3286        while (prev_obj < span.start()) {
3287          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3288          if (sz > 0) {
3289            prev_obj += sz;
3290          } else {
3291            // In this case we may end up doing a bit of redundant
3292            // scanning, but that appears unavoidable, short of
3293            // locking the free list locks; see bug 6324141.
3294            break;
3295          }
3296        }
3297      }
3298      if (prev_obj < span.end()) {
3299        MemRegion my_span = MemRegion(prev_obj, span.end());
3300        // Do the marking work within a non-empty span --
3301        // the last argument to the constructor indicates whether the
3302        // iteration should be incremental with periodic yields.
3303        ParMarkFromRootsClosure cl(this, _collector, my_span,
3304                                   &_collector->_markBitMap,
3305                                   work_queue(i),
3306                                   &_collector->_markStack);
3307        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3308      } // else nothing to do for this task
3309    }   // else nothing to do for this task
3310  }
3311  // We'd be tempted to assert here that since there are no
3312  // more tasks left to claim in this space, the global_finger
3313  // must exceed space->top() and a fortiori space->end(). However,
3314  // that would not quite be correct because the bumping of
3315  // global_finger occurs strictly after the claiming of a task,
3316  // so by the time we reach here the global finger may not yet
3317  // have been bumped up by the thread that claimed the last
3318  // task.
3319  pst->all_tasks_completed();
3320}
3321
3322class ParConcMarkingClosure: public MetadataAwareOopClosure {
3323 private:
3324  CMSCollector* _collector;
3325  CMSConcMarkingTask* _task;
3326  MemRegion     _span;
3327  CMSBitMap*    _bit_map;
3328  CMSMarkStack* _overflow_stack;
3329  OopTaskQueue* _work_queue;
3330 protected:
3331  DO_OOP_WORK_DEFN
3332 public:
3333  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3334                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3335    MetadataAwareOopClosure(collector->ref_processor()),
3336    _collector(collector),
3337    _task(task),
3338    _span(collector->_span),
3339    _work_queue(work_queue),
3340    _bit_map(bit_map),
3341    _overflow_stack(overflow_stack)
3342  { }
3343  virtual void do_oop(oop* p);
3344  virtual void do_oop(narrowOop* p);
3345
3346  void trim_queue(size_t max);
3347  void handle_stack_overflow(HeapWord* lost);
3348  void do_yield_check() {
3349    if (_task->should_yield()) {
3350      _task->yield();
3351    }
3352  }
3353};
3354
3355DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3356
3357// Grey object scanning during work stealing phase --
3358// the salient assumption here is that any references
3359// that are in these stolen objects being scanned must
3360// already have been initialized (else they would not have
3361// been published), so we do not need to check for
3362// uninitialized objects before pushing here.
3363void ParConcMarkingClosure::do_oop(oop obj) {
3364  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3365  HeapWord* addr = (HeapWord*)obj;
3366  // Check if oop points into the CMS generation
3367  // and is not marked
3368  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3369    // a white object ...
3370    // If we manage to "claim" the object, by being the
3371    // first thread to mark it, then we push it on our
3372    // marking stack
3373    if (_bit_map->par_mark(addr)) {     // ... now grey
3374      // push on work queue (grey set)
3375      bool simulate_overflow = false;
3376      NOT_PRODUCT(
3377        if (CMSMarkStackOverflowALot &&
3378            _collector->simulate_overflow()) {
3379          // simulate a stack overflow
3380          simulate_overflow = true;
3381        }
3382      )
3383      if (simulate_overflow ||
3384          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3385        // stack overflow
3386        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3387        // We cannot assert that the overflow stack is full because
3388        // it may have been emptied since.
3389        assert(simulate_overflow ||
3390               _work_queue->size() == _work_queue->max_elems(),
3391              "Else push should have succeeded");
3392        handle_stack_overflow(addr);
3393      }
3394    } // Else, some other thread got there first
3395    do_yield_check();
3396  }
3397}
3398
3399void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3400void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3401
3402void ParConcMarkingClosure::trim_queue(size_t max) {
3403  while (_work_queue->size() > max) {
3404    oop new_oop;
3405    if (_work_queue->pop_local(new_oop)) {
3406      assert(new_oop->is_oop(), "Should be an oop");
3407      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3408      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3409      new_oop->oop_iterate(this);  // do_oop() above
3410      do_yield_check();
3411    }
3412  }
3413}
3414
3415// Upon stack overflow, we discard (part of) the stack,
3416// remembering the least address amongst those discarded
3417// in CMSCollector's _restart_address.
3418void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3419  // We need to do this under a mutex to prevent other
3420  // workers from interfering with the work done below.
3421  MutexLockerEx ml(_overflow_stack->par_lock(),
3422                   Mutex::_no_safepoint_check_flag);
3423  // Remember the least grey address discarded
3424  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3425  _collector->lower_restart_addr(ra);
3426  _overflow_stack->reset();  // discard stack contents
3427  _overflow_stack->expand(); // expand the stack if possible
3428}
3429
3430
3431void CMSConcMarkingTask::do_work_steal(int i) {
3432  OopTaskQueue* work_q = work_queue(i);
3433  oop obj_to_scan;
3434  CMSBitMap* bm = &(_collector->_markBitMap);
3435  CMSMarkStack* ovflw = &(_collector->_markStack);
3436  int* seed = _collector->hash_seed(i);
3437  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3438  while (true) {
3439    cl.trim_queue(0);
3440    assert(work_q->size() == 0, "Should have been emptied above");
3441    if (get_work_from_overflow_stack(ovflw, work_q)) {
3442      // Can't assert below because the work obtained from the
3443      // overflow stack may already have been stolen from us.
3444      // assert(work_q->size() > 0, "Work from overflow stack");
3445      continue;
3446    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3447      assert(obj_to_scan->is_oop(), "Should be an oop");
3448      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3449      obj_to_scan->oop_iterate(&cl);
3450    } else if (terminator()->offer_termination(&_term_term)) {
3451      assert(work_q->size() == 0, "Impossible!");
3452      break;
3453    } else if (yielding() || should_yield()) {
3454      yield();
3455    }
3456  }
3457}
3458
3459// This is run by the CMS (coordinator) thread.
3460void CMSConcMarkingTask::coordinator_yield() {
3461  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3462         "CMS thread should hold CMS token");
3463  // First give up the locks, then yield, then re-lock
3464  // We should probably use a constructor/destructor idiom to
3465  // do this unlock/lock or modify the MutexUnlocker class to
3466  // serve our purpose. XXX
3467  assert_lock_strong(_bit_map_lock);
3468  _bit_map_lock->unlock();
3469  ConcurrentMarkSweepThread::desynchronize(true);
3470  _collector->stopTimer();
3471  _collector->incrementYields();
3472
3473  // It is possible for whichever thread initiated the yield request
3474  // not to get a chance to wake up and take the bitmap lock between
3475  // this thread releasing it and reacquiring it. So, while the
3476  // should_yield() flag is on, let's sleep for a bit to give the
3477  // other thread a chance to wake up. The limit imposed on the number
3478  // of iterations is defensive, to avoid any unforseen circumstances
3479  // putting us into an infinite loop. Since it's always been this
3480  // (coordinator_yield()) method that was observed to cause the
3481  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3482  // which is by default non-zero. For the other seven methods that
3483  // also perform the yield operation, as are using a different
3484  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3485  // can enable the sleeping for those methods too, if necessary.
3486  // See 6442774.
3487  //
3488  // We really need to reconsider the synchronization between the GC
3489  // thread and the yield-requesting threads in the future and we
3490  // should really use wait/notify, which is the recommended
3491  // way of doing this type of interaction. Additionally, we should
3492  // consolidate the eight methods that do the yield operation and they
3493  // are almost identical into one for better maintainability and
3494  // readability. See 6445193.
3495  //
3496  // Tony 2006.06.29
3497  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3498                   ConcurrentMarkSweepThread::should_yield() &&
3499                   !CMSCollector::foregroundGCIsActive(); ++i) {
3500    os::sleep(Thread::current(), 1, false);
3501  }
3502
3503  ConcurrentMarkSweepThread::synchronize(true);
3504  _bit_map_lock->lock_without_safepoint_check();
3505  _collector->startTimer();
3506}
3507
3508bool CMSCollector::do_marking_mt() {
3509  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3510  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3511                                                                  conc_workers()->active_workers(),
3512                                                                  Threads::number_of_non_daemon_threads());
3513  num_workers = conc_workers()->update_active_workers(num_workers);
3514  log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3515
3516  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3517
3518  CMSConcMarkingTask tsk(this,
3519                         cms_space,
3520                         conc_workers(),
3521                         task_queues());
3522
3523  // Since the actual number of workers we get may be different
3524  // from the number we requested above, do we need to do anything different
3525  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3526  // class?? XXX
3527  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3528
3529  // Refs discovery is already non-atomic.
3530  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3531  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3532  conc_workers()->start_task(&tsk);
3533  while (tsk.yielded()) {
3534    tsk.coordinator_yield();
3535    conc_workers()->continue_task(&tsk);
3536  }
3537  // If the task was aborted, _restart_addr will be non-NULL
3538  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3539  while (_restart_addr != NULL) {
3540    // XXX For now we do not make use of ABORTED state and have not
3541    // yet implemented the right abort semantics (even in the original
3542    // single-threaded CMS case). That needs some more investigation
3543    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3544    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3545    // If _restart_addr is non-NULL, a marking stack overflow
3546    // occurred; we need to do a fresh marking iteration from the
3547    // indicated restart address.
3548    if (_foregroundGCIsActive) {
3549      // We may be running into repeated stack overflows, having
3550      // reached the limit of the stack size, while making very
3551      // slow forward progress. It may be best to bail out and
3552      // let the foreground collector do its job.
3553      // Clear _restart_addr, so that foreground GC
3554      // works from scratch. This avoids the headache of
3555      // a "rescan" which would otherwise be needed because
3556      // of the dirty mod union table & card table.
3557      _restart_addr = NULL;
3558      return false;
3559    }
3560    // Adjust the task to restart from _restart_addr
3561    tsk.reset(_restart_addr);
3562    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3563                  _restart_addr);
3564    _restart_addr = NULL;
3565    // Get the workers going again
3566    conc_workers()->start_task(&tsk);
3567    while (tsk.yielded()) {
3568      tsk.coordinator_yield();
3569      conc_workers()->continue_task(&tsk);
3570    }
3571  }
3572  assert(tsk.completed(), "Inconsistency");
3573  assert(tsk.result() == true, "Inconsistency");
3574  return true;
3575}
3576
3577bool CMSCollector::do_marking_st() {
3578  ResourceMark rm;
3579  HandleMark   hm;
3580
3581  // Temporarily make refs discovery single threaded (non-MT)
3582  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3583  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3584    &_markStack, CMSYield);
3585  // the last argument to iterate indicates whether the iteration
3586  // should be incremental with periodic yields.
3587  _markBitMap.iterate(&markFromRootsClosure);
3588  // If _restart_addr is non-NULL, a marking stack overflow
3589  // occurred; we need to do a fresh iteration from the
3590  // indicated restart address.
3591  while (_restart_addr != NULL) {
3592    if (_foregroundGCIsActive) {
3593      // We may be running into repeated stack overflows, having
3594      // reached the limit of the stack size, while making very
3595      // slow forward progress. It may be best to bail out and
3596      // let the foreground collector do its job.
3597      // Clear _restart_addr, so that foreground GC
3598      // works from scratch. This avoids the headache of
3599      // a "rescan" which would otherwise be needed because
3600      // of the dirty mod union table & card table.
3601      _restart_addr = NULL;
3602      return false;  // indicating failure to complete marking
3603    }
3604    // Deal with stack overflow:
3605    // we restart marking from _restart_addr
3606    HeapWord* ra = _restart_addr;
3607    markFromRootsClosure.reset(ra);
3608    _restart_addr = NULL;
3609    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3610  }
3611  return true;
3612}
3613
3614void CMSCollector::preclean() {
3615  check_correct_thread_executing();
3616  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3617  verify_work_stacks_empty();
3618  verify_overflow_empty();
3619  _abort_preclean = false;
3620  if (CMSPrecleaningEnabled) {
3621    if (!CMSEdenChunksRecordAlways) {
3622      _eden_chunk_index = 0;
3623    }
3624    size_t used = get_eden_used();
3625    size_t capacity = get_eden_capacity();
3626    // Don't start sampling unless we will get sufficiently
3627    // many samples.
3628    if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3629                * CMSScheduleRemarkEdenPenetration)) {
3630      _start_sampling = true;
3631    } else {
3632      _start_sampling = false;
3633    }
3634    GCTraceCPUTime tcpu;
3635    CMSPhaseAccounting pa(this, "Concurrent Preclean");
3636    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3637  }
3638  CMSTokenSync x(true); // is cms thread
3639  if (CMSPrecleaningEnabled) {
3640    sample_eden();
3641    _collectorState = AbortablePreclean;
3642  } else {
3643    _collectorState = FinalMarking;
3644  }
3645  verify_work_stacks_empty();
3646  verify_overflow_empty();
3647}
3648
3649// Try and schedule the remark such that young gen
3650// occupancy is CMSScheduleRemarkEdenPenetration %.
3651void CMSCollector::abortable_preclean() {
3652  check_correct_thread_executing();
3653  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3654  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3655
3656  // If Eden's current occupancy is below this threshold,
3657  // immediately schedule the remark; else preclean
3658  // past the next scavenge in an effort to
3659  // schedule the pause as described above. By choosing
3660  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3661  // we will never do an actual abortable preclean cycle.
3662  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3663    GCTraceCPUTime tcpu;
3664    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3665    // We need more smarts in the abortable preclean
3666    // loop below to deal with cases where allocation
3667    // in young gen is very very slow, and our precleaning
3668    // is running a losing race against a horde of
3669    // mutators intent on flooding us with CMS updates
3670    // (dirty cards).
3671    // One, admittedly dumb, strategy is to give up
3672    // after a certain number of abortable precleaning loops
3673    // or after a certain maximum time. We want to make
3674    // this smarter in the next iteration.
3675    // XXX FIX ME!!! YSR
3676    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3677    while (!(should_abort_preclean() ||
3678             ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3679      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3680      cumworkdone += workdone;
3681      loops++;
3682      // Voluntarily terminate abortable preclean phase if we have
3683      // been at it for too long.
3684      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3685          loops >= CMSMaxAbortablePrecleanLoops) {
3686        log_debug(gc)(" CMS: abort preclean due to loops ");
3687        break;
3688      }
3689      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3690        log_debug(gc)(" CMS: abort preclean due to time ");
3691        break;
3692      }
3693      // If we are doing little work each iteration, we should
3694      // take a short break.
3695      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3696        // Sleep for some time, waiting for work to accumulate
3697        stopTimer();
3698        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3699        startTimer();
3700        waited++;
3701      }
3702    }
3703    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3704                               loops, waited, cumworkdone);
3705  }
3706  CMSTokenSync x(true); // is cms thread
3707  if (_collectorState != Idling) {
3708    assert(_collectorState == AbortablePreclean,
3709           "Spontaneous state transition?");
3710    _collectorState = FinalMarking;
3711  } // Else, a foreground collection completed this CMS cycle.
3712  return;
3713}
3714
3715// Respond to an Eden sampling opportunity
3716void CMSCollector::sample_eden() {
3717  // Make sure a young gc cannot sneak in between our
3718  // reading and recording of a sample.
3719  assert(Thread::current()->is_ConcurrentGC_thread(),
3720         "Only the cms thread may collect Eden samples");
3721  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3722         "Should collect samples while holding CMS token");
3723  if (!_start_sampling) {
3724    return;
3725  }
3726  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3727  // is populated by the young generation.
3728  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3729    if (_eden_chunk_index < _eden_chunk_capacity) {
3730      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3731      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3732             "Unexpected state of Eden");
3733      // We'd like to check that what we just sampled is an oop-start address;
3734      // however, we cannot do that here since the object may not yet have been
3735      // initialized. So we'll instead do the check when we _use_ this sample
3736      // later.
3737      if (_eden_chunk_index == 0 ||
3738          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3739                         _eden_chunk_array[_eden_chunk_index-1])
3740           >= CMSSamplingGrain)) {
3741        _eden_chunk_index++;  // commit sample
3742      }
3743    }
3744  }
3745  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3746    size_t used = get_eden_used();
3747    size_t capacity = get_eden_capacity();
3748    assert(used <= capacity, "Unexpected state of Eden");
3749    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3750      _abort_preclean = true;
3751    }
3752  }
3753}
3754
3755
3756size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3757  assert(_collectorState == Precleaning ||
3758         _collectorState == AbortablePreclean, "incorrect state");
3759  ResourceMark rm;
3760  HandleMark   hm;
3761
3762  // Precleaning is currently not MT but the reference processor
3763  // may be set for MT.  Disable it temporarily here.
3764  ReferenceProcessor* rp = ref_processor();
3765  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3766
3767  // Do one pass of scrubbing the discovered reference lists
3768  // to remove any reference objects with strongly-reachable
3769  // referents.
3770  if (clean_refs) {
3771    CMSPrecleanRefsYieldClosure yield_cl(this);
3772    assert(rp->span().equals(_span), "Spans should be equal");
3773    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3774                                   &_markStack, true /* preclean */);
3775    CMSDrainMarkingStackClosure complete_trace(this,
3776                                   _span, &_markBitMap, &_markStack,
3777                                   &keep_alive, true /* preclean */);
3778
3779    // We don't want this step to interfere with a young
3780    // collection because we don't want to take CPU
3781    // or memory bandwidth away from the young GC threads
3782    // (which may be as many as there are CPUs).
3783    // Note that we don't need to protect ourselves from
3784    // interference with mutators because they can't
3785    // manipulate the discovered reference lists nor affect
3786    // the computed reachability of the referents, the
3787    // only properties manipulated by the precleaning
3788    // of these reference lists.
3789    stopTimer();
3790    CMSTokenSyncWithLocks x(true /* is cms thread */,
3791                            bitMapLock());
3792    startTimer();
3793    sample_eden();
3794
3795    // The following will yield to allow foreground
3796    // collection to proceed promptly. XXX YSR:
3797    // The code in this method may need further
3798    // tweaking for better performance and some restructuring
3799    // for cleaner interfaces.
3800    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3801    rp->preclean_discovered_references(
3802          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3803          gc_timer);
3804  }
3805
3806  if (clean_survivor) {  // preclean the active survivor space(s)
3807    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3808                             &_markBitMap, &_modUnionTable,
3809                             &_markStack, true /* precleaning phase */);
3810    stopTimer();
3811    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3812                             bitMapLock());
3813    startTimer();
3814    unsigned int before_count =
3815      GenCollectedHeap::heap()->total_collections();
3816    SurvivorSpacePrecleanClosure
3817      sss_cl(this, _span, &_markBitMap, &_markStack,
3818             &pam_cl, before_count, CMSYield);
3819    _young_gen->from()->object_iterate_careful(&sss_cl);
3820    _young_gen->to()->object_iterate_careful(&sss_cl);
3821  }
3822  MarkRefsIntoAndScanClosure
3823    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3824             &_markStack, this, CMSYield,
3825             true /* precleaning phase */);
3826  // CAUTION: The following closure has persistent state that may need to
3827  // be reset upon a decrease in the sequence of addresses it
3828  // processes.
3829  ScanMarkedObjectsAgainCarefullyClosure
3830    smoac_cl(this, _span,
3831      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3832
3833  // Preclean dirty cards in ModUnionTable and CardTable using
3834  // appropriate convergence criterion;
3835  // repeat CMSPrecleanIter times unless we find that
3836  // we are losing.
3837  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3838  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3839         "Bad convergence multiplier");
3840  assert(CMSPrecleanThreshold >= 100,
3841         "Unreasonably low CMSPrecleanThreshold");
3842
3843  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3844  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3845       numIter < CMSPrecleanIter;
3846       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3847    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3848    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3849    // Either there are very few dirty cards, so re-mark
3850    // pause will be small anyway, or our pre-cleaning isn't
3851    // that much faster than the rate at which cards are being
3852    // dirtied, so we might as well stop and re-mark since
3853    // precleaning won't improve our re-mark time by much.
3854    if (curNumCards <= CMSPrecleanThreshold ||
3855        (numIter > 0 &&
3856         (curNumCards * CMSPrecleanDenominator >
3857         lastNumCards * CMSPrecleanNumerator))) {
3858      numIter++;
3859      cumNumCards += curNumCards;
3860      break;
3861    }
3862  }
3863
3864  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3865
3866  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3867  cumNumCards += curNumCards;
3868  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3869                             curNumCards, cumNumCards, numIter);
3870  return cumNumCards;   // as a measure of useful work done
3871}
3872
3873// PRECLEANING NOTES:
3874// Precleaning involves:
3875// . reading the bits of the modUnionTable and clearing the set bits.
3876// . For the cards corresponding to the set bits, we scan the
3877//   objects on those cards. This means we need the free_list_lock
3878//   so that we can safely iterate over the CMS space when scanning
3879//   for oops.
3880// . When we scan the objects, we'll be both reading and setting
3881//   marks in the marking bit map, so we'll need the marking bit map.
3882// . For protecting _collector_state transitions, we take the CGC_lock.
3883//   Note that any races in the reading of of card table entries by the
3884//   CMS thread on the one hand and the clearing of those entries by the
3885//   VM thread or the setting of those entries by the mutator threads on the
3886//   other are quite benign. However, for efficiency it makes sense to keep
3887//   the VM thread from racing with the CMS thread while the latter is
3888//   dirty card info to the modUnionTable. We therefore also use the
3889//   CGC_lock to protect the reading of the card table and the mod union
3890//   table by the CM thread.
3891// . We run concurrently with mutator updates, so scanning
3892//   needs to be done carefully  -- we should not try to scan
3893//   potentially uninitialized objects.
3894//
3895// Locking strategy: While holding the CGC_lock, we scan over and
3896// reset a maximal dirty range of the mod union / card tables, then lock
3897// the free_list_lock and bitmap lock to do a full marking, then
3898// release these locks; and repeat the cycle. This allows for a
3899// certain amount of fairness in the sharing of these locks between
3900// the CMS collector on the one hand, and the VM thread and the
3901// mutators on the other.
3902
3903// NOTE: preclean_mod_union_table() and preclean_card_table()
3904// further below are largely identical; if you need to modify
3905// one of these methods, please check the other method too.
3906
3907size_t CMSCollector::preclean_mod_union_table(
3908  ConcurrentMarkSweepGeneration* old_gen,
3909  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3910  verify_work_stacks_empty();
3911  verify_overflow_empty();
3912
3913  // strategy: starting with the first card, accumulate contiguous
3914  // ranges of dirty cards; clear these cards, then scan the region
3915  // covered by these cards.
3916
3917  // Since all of the MUT is committed ahead, we can just use
3918  // that, in case the generations expand while we are precleaning.
3919  // It might also be fine to just use the committed part of the
3920  // generation, but we might potentially miss cards when the
3921  // generation is rapidly expanding while we are in the midst
3922  // of precleaning.
3923  HeapWord* startAddr = old_gen->reserved().start();
3924  HeapWord* endAddr   = old_gen->reserved().end();
3925
3926  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3927
3928  size_t numDirtyCards, cumNumDirtyCards;
3929  HeapWord *nextAddr, *lastAddr;
3930  for (cumNumDirtyCards = numDirtyCards = 0,
3931       nextAddr = lastAddr = startAddr;
3932       nextAddr < endAddr;
3933       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3934
3935    ResourceMark rm;
3936    HandleMark   hm;
3937
3938    MemRegion dirtyRegion;
3939    {
3940      stopTimer();
3941      // Potential yield point
3942      CMSTokenSync ts(true);
3943      startTimer();
3944      sample_eden();
3945      // Get dirty region starting at nextOffset (inclusive),
3946      // simultaneously clearing it.
3947      dirtyRegion =
3948        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3949      assert(dirtyRegion.start() >= nextAddr,
3950             "returned region inconsistent?");
3951    }
3952    // Remember where the next search should begin.
3953    // The returned region (if non-empty) is a right open interval,
3954    // so lastOffset is obtained from the right end of that
3955    // interval.
3956    lastAddr = dirtyRegion.end();
3957    // Should do something more transparent and less hacky XXX
3958    numDirtyCards =
3959      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3960
3961    // We'll scan the cards in the dirty region (with periodic
3962    // yields for foreground GC as needed).
3963    if (!dirtyRegion.is_empty()) {
3964      assert(numDirtyCards > 0, "consistency check");
3965      HeapWord* stop_point = NULL;
3966      stopTimer();
3967      // Potential yield point
3968      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3969                               bitMapLock());
3970      startTimer();
3971      {
3972        verify_work_stacks_empty();
3973        verify_overflow_empty();
3974        sample_eden();
3975        stop_point =
3976          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3977      }
3978      if (stop_point != NULL) {
3979        // The careful iteration stopped early either because it found an
3980        // uninitialized object, or because we were in the midst of an
3981        // "abortable preclean", which should now be aborted. Redirty
3982        // the bits corresponding to the partially-scanned or unscanned
3983        // cards. We'll either restart at the next block boundary or
3984        // abort the preclean.
3985        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3986               "Should only be AbortablePreclean.");
3987        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3988        if (should_abort_preclean()) {
3989          break; // out of preclean loop
3990        } else {
3991          // Compute the next address at which preclean should pick up;
3992          // might need bitMapLock in order to read P-bits.
3993          lastAddr = next_card_start_after_block(stop_point);
3994        }
3995      }
3996    } else {
3997      assert(lastAddr == endAddr, "consistency check");
3998      assert(numDirtyCards == 0, "consistency check");
3999      break;
4000    }
4001  }
4002  verify_work_stacks_empty();
4003  verify_overflow_empty();
4004  return cumNumDirtyCards;
4005}
4006
4007// NOTE: preclean_mod_union_table() above and preclean_card_table()
4008// below are largely identical; if you need to modify
4009// one of these methods, please check the other method too.
4010
4011size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4012  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4013  // strategy: it's similar to precleamModUnionTable above, in that
4014  // we accumulate contiguous ranges of dirty cards, mark these cards
4015  // precleaned, then scan the region covered by these cards.
4016  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4017  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4018
4019  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4020
4021  size_t numDirtyCards, cumNumDirtyCards;
4022  HeapWord *lastAddr, *nextAddr;
4023
4024  for (cumNumDirtyCards = numDirtyCards = 0,
4025       nextAddr = lastAddr = startAddr;
4026       nextAddr < endAddr;
4027       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4028
4029    ResourceMark rm;
4030    HandleMark   hm;
4031
4032    MemRegion dirtyRegion;
4033    {
4034      // See comments in "Precleaning notes" above on why we
4035      // do this locking. XXX Could the locking overheads be
4036      // too high when dirty cards are sparse? [I don't think so.]
4037      stopTimer();
4038      CMSTokenSync x(true); // is cms thread
4039      startTimer();
4040      sample_eden();
4041      // Get and clear dirty region from card table
4042      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4043                                    MemRegion(nextAddr, endAddr),
4044                                    true,
4045                                    CardTableModRefBS::precleaned_card_val());
4046
4047      assert(dirtyRegion.start() >= nextAddr,
4048             "returned region inconsistent?");
4049    }
4050    lastAddr = dirtyRegion.end();
4051    numDirtyCards =
4052      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4053
4054    if (!dirtyRegion.is_empty()) {
4055      stopTimer();
4056      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4057      startTimer();
4058      sample_eden();
4059      verify_work_stacks_empty();
4060      verify_overflow_empty();
4061      HeapWord* stop_point =
4062        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4063      if (stop_point != NULL) {
4064        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4065               "Should only be AbortablePreclean.");
4066        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4067        if (should_abort_preclean()) {
4068          break; // out of preclean loop
4069        } else {
4070          // Compute the next address at which preclean should pick up.
4071          lastAddr = next_card_start_after_block(stop_point);
4072        }
4073      }
4074    } else {
4075      break;
4076    }
4077  }
4078  verify_work_stacks_empty();
4079  verify_overflow_empty();
4080  return cumNumDirtyCards;
4081}
4082
4083class PrecleanKlassClosure : public KlassClosure {
4084  KlassToOopClosure _cm_klass_closure;
4085 public:
4086  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4087  void do_klass(Klass* k) {
4088    if (k->has_accumulated_modified_oops()) {
4089      k->clear_accumulated_modified_oops();
4090
4091      _cm_klass_closure.do_klass(k);
4092    }
4093  }
4094};
4095
4096// The freelist lock is needed to prevent asserts, is it really needed?
4097void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4098
4099  cl->set_freelistLock(freelistLock);
4100
4101  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4102
4103  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4104  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4105  PrecleanKlassClosure preclean_klass_closure(cl);
4106  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4107
4108  verify_work_stacks_empty();
4109  verify_overflow_empty();
4110}
4111
4112void CMSCollector::checkpointRootsFinal() {
4113  assert(_collectorState == FinalMarking, "incorrect state transition?");
4114  check_correct_thread_executing();
4115  // world is stopped at this checkpoint
4116  assert(SafepointSynchronize::is_at_safepoint(),
4117         "world should be stopped");
4118  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4119
4120  verify_work_stacks_empty();
4121  verify_overflow_empty();
4122
4123  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4124                _young_gen->used() / K, _young_gen->capacity() / K);
4125  {
4126    if (CMSScavengeBeforeRemark) {
4127      GenCollectedHeap* gch = GenCollectedHeap::heap();
4128      // Temporarily set flag to false, GCH->do_collection will
4129      // expect it to be false and set to true
4130      FlagSetting fl(gch->_is_gc_active, false);
4131
4132      gch->do_collection(true,                      // full (i.e. force, see below)
4133                         false,                     // !clear_all_soft_refs
4134                         0,                         // size
4135                         false,                     // is_tlab
4136                         GenCollectedHeap::YoungGen // type
4137        );
4138    }
4139    FreelistLocker x(this);
4140    MutexLockerEx y(bitMapLock(),
4141                    Mutex::_no_safepoint_check_flag);
4142    checkpointRootsFinalWork();
4143  }
4144  verify_work_stacks_empty();
4145  verify_overflow_empty();
4146}
4147
4148void CMSCollector::checkpointRootsFinalWork() {
4149  GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4150
4151  assert(haveFreelistLocks(), "must have free list locks");
4152  assert_lock_strong(bitMapLock());
4153
4154  ResourceMark rm;
4155  HandleMark   hm;
4156
4157  GenCollectedHeap* gch = GenCollectedHeap::heap();
4158
4159  if (should_unload_classes()) {
4160    CodeCache::gc_prologue();
4161  }
4162  assert(haveFreelistLocks(), "must have free list locks");
4163  assert_lock_strong(bitMapLock());
4164
4165  // We might assume that we need not fill TLAB's when
4166  // CMSScavengeBeforeRemark is set, because we may have just done
4167  // a scavenge which would have filled all TLAB's -- and besides
4168  // Eden would be empty. This however may not always be the case --
4169  // for instance although we asked for a scavenge, it may not have
4170  // happened because of a JNI critical section. We probably need
4171  // a policy for deciding whether we can in that case wait until
4172  // the critical section releases and then do the remark following
4173  // the scavenge, and skip it here. In the absence of that policy,
4174  // or of an indication of whether the scavenge did indeed occur,
4175  // we cannot rely on TLAB's having been filled and must do
4176  // so here just in case a scavenge did not happen.
4177  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4178  // Update the saved marks which may affect the root scans.
4179  gch->save_marks();
4180
4181  print_eden_and_survivor_chunk_arrays();
4182
4183  {
4184#if defined(COMPILER2) || INCLUDE_JVMCI
4185    DerivedPointerTableDeactivate dpt_deact;
4186#endif
4187
4188    // Note on the role of the mod union table:
4189    // Since the marker in "markFromRoots" marks concurrently with
4190    // mutators, it is possible for some reachable objects not to have been
4191    // scanned. For instance, an only reference to an object A was
4192    // placed in object B after the marker scanned B. Unless B is rescanned,
4193    // A would be collected. Such updates to references in marked objects
4194    // are detected via the mod union table which is the set of all cards
4195    // dirtied since the first checkpoint in this GC cycle and prior to
4196    // the most recent young generation GC, minus those cleaned up by the
4197    // concurrent precleaning.
4198    if (CMSParallelRemarkEnabled) {
4199      GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4200      do_remark_parallel();
4201    } else {
4202      GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4203      do_remark_non_parallel();
4204    }
4205  }
4206  verify_work_stacks_empty();
4207  verify_overflow_empty();
4208
4209  {
4210    GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4211    refProcessingWork();
4212  }
4213  verify_work_stacks_empty();
4214  verify_overflow_empty();
4215
4216  if (should_unload_classes()) {
4217    CodeCache::gc_epilogue();
4218  }
4219  JvmtiExport::gc_epilogue();
4220
4221  // If we encountered any (marking stack / work queue) overflow
4222  // events during the current CMS cycle, take appropriate
4223  // remedial measures, where possible, so as to try and avoid
4224  // recurrence of that condition.
4225  assert(_markStack.isEmpty(), "No grey objects");
4226  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4227                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4228  if (ser_ovflw > 0) {
4229    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4230                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4231    _markStack.expand();
4232    _ser_pmc_remark_ovflw = 0;
4233    _ser_pmc_preclean_ovflw = 0;
4234    _ser_kac_preclean_ovflw = 0;
4235    _ser_kac_ovflw = 0;
4236  }
4237  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4238     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4239                          _par_pmc_remark_ovflw, _par_kac_ovflw);
4240     _par_pmc_remark_ovflw = 0;
4241    _par_kac_ovflw = 0;
4242  }
4243   if (_markStack._hit_limit > 0) {
4244     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4245                          _markStack._hit_limit);
4246   }
4247   if (_markStack._failed_double > 0) {
4248     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4249                          _markStack._failed_double, _markStack.capacity());
4250   }
4251  _markStack._hit_limit = 0;
4252  _markStack._failed_double = 0;
4253
4254  if ((VerifyAfterGC || VerifyDuringGC) &&
4255      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4256    verify_after_remark();
4257  }
4258
4259  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4260
4261  // Change under the freelistLocks.
4262  _collectorState = Sweeping;
4263  // Call isAllClear() under bitMapLock
4264  assert(_modUnionTable.isAllClear(),
4265      "Should be clear by end of the final marking");
4266  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4267      "Should be clear by end of the final marking");
4268}
4269
4270void CMSParInitialMarkTask::work(uint worker_id) {
4271  elapsedTimer _timer;
4272  ResourceMark rm;
4273  HandleMark   hm;
4274
4275  // ---------- scan from roots --------------
4276  _timer.start();
4277  GenCollectedHeap* gch = GenCollectedHeap::heap();
4278  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4279
4280  // ---------- young gen roots --------------
4281  {
4282    work_on_young_gen_roots(&par_mri_cl);
4283    _timer.stop();
4284    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4285  }
4286
4287  // ---------- remaining roots --------------
4288  _timer.reset();
4289  _timer.start();
4290
4291  CLDToOopClosure cld_closure(&par_mri_cl, true);
4292
4293  gch->gen_process_roots(_strong_roots_scope,
4294                         GenCollectedHeap::OldGen,
4295                         false,     // yg was scanned above
4296                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4297                         _collector->should_unload_classes(),
4298                         &par_mri_cl,
4299                         NULL,
4300                         &cld_closure);
4301  assert(_collector->should_unload_classes()
4302         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4303         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4304  _timer.stop();
4305  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4306}
4307
4308// Parallel remark task
4309class CMSParRemarkTask: public CMSParMarkTask {
4310  CompactibleFreeListSpace* _cms_space;
4311
4312  // The per-thread work queues, available here for stealing.
4313  OopTaskQueueSet*       _task_queues;
4314  ParallelTaskTerminator _term;
4315  StrongRootsScope*      _strong_roots_scope;
4316
4317 public:
4318  // A value of 0 passed to n_workers will cause the number of
4319  // workers to be taken from the active workers in the work gang.
4320  CMSParRemarkTask(CMSCollector* collector,
4321                   CompactibleFreeListSpace* cms_space,
4322                   uint n_workers, WorkGang* workers,
4323                   OopTaskQueueSet* task_queues,
4324                   StrongRootsScope* strong_roots_scope):
4325    CMSParMarkTask("Rescan roots and grey objects in parallel",
4326                   collector, n_workers),
4327    _cms_space(cms_space),
4328    _task_queues(task_queues),
4329    _term(n_workers, task_queues),
4330    _strong_roots_scope(strong_roots_scope) { }
4331
4332  OopTaskQueueSet* task_queues() { return _task_queues; }
4333
4334  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4335
4336  ParallelTaskTerminator* terminator() { return &_term; }
4337  uint n_workers() { return _n_workers; }
4338
4339  void work(uint worker_id);
4340
4341 private:
4342  // ... of  dirty cards in old space
4343  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4344                                  ParMarkRefsIntoAndScanClosure* cl);
4345
4346  // ... work stealing for the above
4347  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4348};
4349
4350class RemarkKlassClosure : public KlassClosure {
4351  KlassToOopClosure _cm_klass_closure;
4352 public:
4353  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4354  void do_klass(Klass* k) {
4355    // Check if we have modified any oops in the Klass during the concurrent marking.
4356    if (k->has_accumulated_modified_oops()) {
4357      k->clear_accumulated_modified_oops();
4358
4359      // We could have transfered the current modified marks to the accumulated marks,
4360      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4361    } else if (k->has_modified_oops()) {
4362      // Don't clear anything, this info is needed by the next young collection.
4363    } else {
4364      // No modified oops in the Klass.
4365      return;
4366    }
4367
4368    // The klass has modified fields, need to scan the klass.
4369    _cm_klass_closure.do_klass(k);
4370  }
4371};
4372
4373void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4374  ParNewGeneration* young_gen = _collector->_young_gen;
4375  ContiguousSpace* eden_space = young_gen->eden();
4376  ContiguousSpace* from_space = young_gen->from();
4377  ContiguousSpace* to_space   = young_gen->to();
4378
4379  HeapWord** eca = _collector->_eden_chunk_array;
4380  size_t     ect = _collector->_eden_chunk_index;
4381  HeapWord** sca = _collector->_survivor_chunk_array;
4382  size_t     sct = _collector->_survivor_chunk_index;
4383
4384  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4385  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4386
4387  do_young_space_rescan(cl, to_space, NULL, 0);
4388  do_young_space_rescan(cl, from_space, sca, sct);
4389  do_young_space_rescan(cl, eden_space, eca, ect);
4390}
4391
4392// work_queue(i) is passed to the closure
4393// ParMarkRefsIntoAndScanClosure.  The "i" parameter
4394// also is passed to do_dirty_card_rescan_tasks() and to
4395// do_work_steal() to select the i-th task_queue.
4396
4397void CMSParRemarkTask::work(uint worker_id) {
4398  elapsedTimer _timer;
4399  ResourceMark rm;
4400  HandleMark   hm;
4401
4402  // ---------- rescan from roots --------------
4403  _timer.start();
4404  GenCollectedHeap* gch = GenCollectedHeap::heap();
4405  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4406    _collector->_span, _collector->ref_processor(),
4407    &(_collector->_markBitMap),
4408    work_queue(worker_id));
4409
4410  // Rescan young gen roots first since these are likely
4411  // coarsely partitioned and may, on that account, constitute
4412  // the critical path; thus, it's best to start off that
4413  // work first.
4414  // ---------- young gen roots --------------
4415  {
4416    work_on_young_gen_roots(&par_mrias_cl);
4417    _timer.stop();
4418    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4419  }
4420
4421  // ---------- remaining roots --------------
4422  _timer.reset();
4423  _timer.start();
4424  gch->gen_process_roots(_strong_roots_scope,
4425                         GenCollectedHeap::OldGen,
4426                         false,     // yg was scanned above
4427                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4428                         _collector->should_unload_classes(),
4429                         &par_mrias_cl,
4430                         NULL,
4431                         NULL);     // The dirty klasses will be handled below
4432
4433  assert(_collector->should_unload_classes()
4434         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4435         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4436  _timer.stop();
4437  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4438
4439  // ---------- unhandled CLD scanning ----------
4440  if (worker_id == 0) { // Single threaded at the moment.
4441    _timer.reset();
4442    _timer.start();
4443
4444    // Scan all new class loader data objects and new dependencies that were
4445    // introduced during concurrent marking.
4446    ResourceMark rm;
4447    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4448    for (int i = 0; i < array->length(); i++) {
4449      par_mrias_cl.do_cld_nv(array->at(i));
4450    }
4451
4452    // We don't need to keep track of new CLDs anymore.
4453    ClassLoaderDataGraph::remember_new_clds(false);
4454
4455    _timer.stop();
4456    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4457  }
4458
4459  // ---------- dirty klass scanning ----------
4460  if (worker_id == 0) { // Single threaded at the moment.
4461    _timer.reset();
4462    _timer.start();
4463
4464    // Scan all classes that was dirtied during the concurrent marking phase.
4465    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4466    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4467
4468    _timer.stop();
4469    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4470  }
4471
4472  // We might have added oops to ClassLoaderData::_handles during the
4473  // concurrent marking phase. These oops point to newly allocated objects
4474  // that are guaranteed to be kept alive. Either by the direct allocation
4475  // code, or when the young collector processes the roots. Hence,
4476  // we don't have to revisit the _handles block during the remark phase.
4477
4478  // ---------- rescan dirty cards ------------
4479  _timer.reset();
4480  _timer.start();
4481
4482  // Do the rescan tasks for each of the two spaces
4483  // (cms_space) in turn.
4484  // "worker_id" is passed to select the task_queue for "worker_id"
4485  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4486  _timer.stop();
4487  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4488
4489  // ---------- steal work from other threads ...
4490  // ---------- ... and drain overflow list.
4491  _timer.reset();
4492  _timer.start();
4493  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4494  _timer.stop();
4495  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4496}
4497
4498void
4499CMSParMarkTask::do_young_space_rescan(
4500  OopsInGenClosure* cl, ContiguousSpace* space,
4501  HeapWord** chunk_array, size_t chunk_top) {
4502  // Until all tasks completed:
4503  // . claim an unclaimed task
4504  // . compute region boundaries corresponding to task claimed
4505  //   using chunk_array
4506  // . par_oop_iterate(cl) over that region
4507
4508  ResourceMark rm;
4509  HandleMark   hm;
4510
4511  SequentialSubTasksDone* pst = space->par_seq_tasks();
4512
4513  uint nth_task = 0;
4514  uint n_tasks  = pst->n_tasks();
4515
4516  if (n_tasks > 0) {
4517    assert(pst->valid(), "Uninitialized use?");
4518    HeapWord *start, *end;
4519    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4520      // We claimed task # nth_task; compute its boundaries.
4521      if (chunk_top == 0) {  // no samples were taken
4522        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4523        start = space->bottom();
4524        end   = space->top();
4525      } else if (nth_task == 0) {
4526        start = space->bottom();
4527        end   = chunk_array[nth_task];
4528      } else if (nth_task < (uint)chunk_top) {
4529        assert(nth_task >= 1, "Control point invariant");
4530        start = chunk_array[nth_task - 1];
4531        end   = chunk_array[nth_task];
4532      } else {
4533        assert(nth_task == (uint)chunk_top, "Control point invariant");
4534        start = chunk_array[chunk_top - 1];
4535        end   = space->top();
4536      }
4537      MemRegion mr(start, end);
4538      // Verify that mr is in space
4539      assert(mr.is_empty() || space->used_region().contains(mr),
4540             "Should be in space");
4541      // Verify that "start" is an object boundary
4542      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4543             "Should be an oop");
4544      space->par_oop_iterate(mr, cl);
4545    }
4546    pst->all_tasks_completed();
4547  }
4548}
4549
4550void
4551CMSParRemarkTask::do_dirty_card_rescan_tasks(
4552  CompactibleFreeListSpace* sp, int i,
4553  ParMarkRefsIntoAndScanClosure* cl) {
4554  // Until all tasks completed:
4555  // . claim an unclaimed task
4556  // . compute region boundaries corresponding to task claimed
4557  // . transfer dirty bits ct->mut for that region
4558  // . apply rescanclosure to dirty mut bits for that region
4559
4560  ResourceMark rm;
4561  HandleMark   hm;
4562
4563  OopTaskQueue* work_q = work_queue(i);
4564  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4565  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4566  // CAUTION: This closure has state that persists across calls to
4567  // the work method dirty_range_iterate_clear() in that it has
4568  // embedded in it a (subtype of) UpwardsObjectClosure. The
4569  // use of that state in the embedded UpwardsObjectClosure instance
4570  // assumes that the cards are always iterated (even if in parallel
4571  // by several threads) in monotonically increasing order per each
4572  // thread. This is true of the implementation below which picks
4573  // card ranges (chunks) in monotonically increasing order globally
4574  // and, a-fortiori, in monotonically increasing order per thread
4575  // (the latter order being a subsequence of the former).
4576  // If the work code below is ever reorganized into a more chaotic
4577  // work-partitioning form than the current "sequential tasks"
4578  // paradigm, the use of that persistent state will have to be
4579  // revisited and modified appropriately. See also related
4580  // bug 4756801 work on which should examine this code to make
4581  // sure that the changes there do not run counter to the
4582  // assumptions made here and necessary for correctness and
4583  // efficiency. Note also that this code might yield inefficient
4584  // behavior in the case of very large objects that span one or
4585  // more work chunks. Such objects would potentially be scanned
4586  // several times redundantly. Work on 4756801 should try and
4587  // address that performance anomaly if at all possible. XXX
4588  MemRegion  full_span  = _collector->_span;
4589  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4590  MarkFromDirtyCardsClosure
4591    greyRescanClosure(_collector, full_span, // entire span of interest
4592                      sp, bm, work_q, cl);
4593
4594  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4595  assert(pst->valid(), "Uninitialized use?");
4596  uint nth_task = 0;
4597  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4598  MemRegion span = sp->used_region();
4599  HeapWord* start_addr = span.start();
4600  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4601                                           alignment);
4602  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4603  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4604         start_addr, "Check alignment");
4605  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4606         chunk_size, "Check alignment");
4607
4608  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4609    // Having claimed the nth_task, compute corresponding mem-region,
4610    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4611    // The alignment restriction ensures that we do not need any
4612    // synchronization with other gang-workers while setting or
4613    // clearing bits in thus chunk of the MUT.
4614    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4615                                    start_addr + (nth_task+1)*chunk_size);
4616    // The last chunk's end might be way beyond end of the
4617    // used region. In that case pull back appropriately.
4618    if (this_span.end() > end_addr) {
4619      this_span.set_end(end_addr);
4620      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4621    }
4622    // Iterate over the dirty cards covering this chunk, marking them
4623    // precleaned, and setting the corresponding bits in the mod union
4624    // table. Since we have been careful to partition at Card and MUT-word
4625    // boundaries no synchronization is needed between parallel threads.
4626    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4627                                                 &modUnionClosure);
4628
4629    // Having transferred these marks into the modUnionTable,
4630    // rescan the marked objects on the dirty cards in the modUnionTable.
4631    // Even if this is at a synchronous collection, the initial marking
4632    // may have been done during an asynchronous collection so there
4633    // may be dirty bits in the mod-union table.
4634    _collector->_modUnionTable.dirty_range_iterate_clear(
4635                  this_span, &greyRescanClosure);
4636    _collector->_modUnionTable.verifyNoOneBitsInRange(
4637                                 this_span.start(),
4638                                 this_span.end());
4639  }
4640  pst->all_tasks_completed();  // declare that i am done
4641}
4642
4643// . see if we can share work_queues with ParNew? XXX
4644void
4645CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4646                                int* seed) {
4647  OopTaskQueue* work_q = work_queue(i);
4648  NOT_PRODUCT(int num_steals = 0;)
4649  oop obj_to_scan;
4650  CMSBitMap* bm = &(_collector->_markBitMap);
4651
4652  while (true) {
4653    // Completely finish any left over work from (an) earlier round(s)
4654    cl->trim_queue(0);
4655    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4656                                         (size_t)ParGCDesiredObjsFromOverflowList);
4657    // Now check if there's any work in the overflow list
4658    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4659    // only affects the number of attempts made to get work from the
4660    // overflow list and does not affect the number of workers.  Just
4661    // pass ParallelGCThreads so this behavior is unchanged.
4662    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4663                                                work_q,
4664                                                ParallelGCThreads)) {
4665      // found something in global overflow list;
4666      // not yet ready to go stealing work from others.
4667      // We'd like to assert(work_q->size() != 0, ...)
4668      // because we just took work from the overflow list,
4669      // but of course we can't since all of that could have
4670      // been already stolen from us.
4671      // "He giveth and He taketh away."
4672      continue;
4673    }
4674    // Verify that we have no work before we resort to stealing
4675    assert(work_q->size() == 0, "Have work, shouldn't steal");
4676    // Try to steal from other queues that have work
4677    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4678      NOT_PRODUCT(num_steals++;)
4679      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4680      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4681      // Do scanning work
4682      obj_to_scan->oop_iterate(cl);
4683      // Loop around, finish this work, and try to steal some more
4684    } else if (terminator()->offer_termination()) {
4685        break;  // nirvana from the infinite cycle
4686    }
4687  }
4688  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4689  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4690         "Else our work is not yet done");
4691}
4692
4693// Record object boundaries in _eden_chunk_array by sampling the eden
4694// top in the slow-path eden object allocation code path and record
4695// the boundaries, if CMSEdenChunksRecordAlways is true. If
4696// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4697// sampling in sample_eden() that activates during the part of the
4698// preclean phase.
4699void CMSCollector::sample_eden_chunk() {
4700  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4701    if (_eden_chunk_lock->try_lock()) {
4702      // Record a sample. This is the critical section. The contents
4703      // of the _eden_chunk_array have to be non-decreasing in the
4704      // address order.
4705      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4706      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4707             "Unexpected state of Eden");
4708      if (_eden_chunk_index == 0 ||
4709          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4710           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4711                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4712        _eden_chunk_index++;  // commit sample
4713      }
4714      _eden_chunk_lock->unlock();
4715    }
4716  }
4717}
4718
4719// Return a thread-local PLAB recording array, as appropriate.
4720void* CMSCollector::get_data_recorder(int thr_num) {
4721  if (_survivor_plab_array != NULL &&
4722      (CMSPLABRecordAlways ||
4723       (_collectorState > Marking && _collectorState < FinalMarking))) {
4724    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4725    ChunkArray* ca = &_survivor_plab_array[thr_num];
4726    ca->reset();   // clear it so that fresh data is recorded
4727    return (void*) ca;
4728  } else {
4729    return NULL;
4730  }
4731}
4732
4733// Reset all the thread-local PLAB recording arrays
4734void CMSCollector::reset_survivor_plab_arrays() {
4735  for (uint i = 0; i < ParallelGCThreads; i++) {
4736    _survivor_plab_array[i].reset();
4737  }
4738}
4739
4740// Merge the per-thread plab arrays into the global survivor chunk
4741// array which will provide the partitioning of the survivor space
4742// for CMS initial scan and rescan.
4743void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4744                                              int no_of_gc_threads) {
4745  assert(_survivor_plab_array  != NULL, "Error");
4746  assert(_survivor_chunk_array != NULL, "Error");
4747  assert(_collectorState == FinalMarking ||
4748         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4749  for (int j = 0; j < no_of_gc_threads; j++) {
4750    _cursor[j] = 0;
4751  }
4752  HeapWord* top = surv->top();
4753  size_t i;
4754  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4755    HeapWord* min_val = top;          // Higher than any PLAB address
4756    uint      min_tid = 0;            // position of min_val this round
4757    for (int j = 0; j < no_of_gc_threads; j++) {
4758      ChunkArray* cur_sca = &_survivor_plab_array[j];
4759      if (_cursor[j] == cur_sca->end()) {
4760        continue;
4761      }
4762      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4763      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4764      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4765      if (cur_val < min_val) {
4766        min_tid = j;
4767        min_val = cur_val;
4768      } else {
4769        assert(cur_val < top, "All recorded addresses should be less");
4770      }
4771    }
4772    // At this point min_val and min_tid are respectively
4773    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4774    // and the thread (j) that witnesses that address.
4775    // We record this address in the _survivor_chunk_array[i]
4776    // and increment _cursor[min_tid] prior to the next round i.
4777    if (min_val == top) {
4778      break;
4779    }
4780    _survivor_chunk_array[i] = min_val;
4781    _cursor[min_tid]++;
4782  }
4783  // We are all done; record the size of the _survivor_chunk_array
4784  _survivor_chunk_index = i; // exclusive: [0, i)
4785  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4786  // Verify that we used up all the recorded entries
4787  #ifdef ASSERT
4788    size_t total = 0;
4789    for (int j = 0; j < no_of_gc_threads; j++) {
4790      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4791      total += _cursor[j];
4792    }
4793    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4794    // Check that the merged array is in sorted order
4795    if (total > 0) {
4796      for (size_t i = 0; i < total - 1; i++) {
4797        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4798                                     i, p2i(_survivor_chunk_array[i]));
4799        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4800               "Not sorted");
4801      }
4802    }
4803  #endif // ASSERT
4804}
4805
4806// Set up the space's par_seq_tasks structure for work claiming
4807// for parallel initial scan and rescan of young gen.
4808// See ParRescanTask where this is currently used.
4809void
4810CMSCollector::
4811initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4812  assert(n_threads > 0, "Unexpected n_threads argument");
4813
4814  // Eden space
4815  if (!_young_gen->eden()->is_empty()) {
4816    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4817    assert(!pst->valid(), "Clobbering existing data?");
4818    // Each valid entry in [0, _eden_chunk_index) represents a task.
4819    size_t n_tasks = _eden_chunk_index + 1;
4820    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4821    // Sets the condition for completion of the subtask (how many threads
4822    // need to finish in order to be done).
4823    pst->set_n_threads(n_threads);
4824    pst->set_n_tasks((int)n_tasks);
4825  }
4826
4827  // Merge the survivor plab arrays into _survivor_chunk_array
4828  if (_survivor_plab_array != NULL) {
4829    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4830  } else {
4831    assert(_survivor_chunk_index == 0, "Error");
4832  }
4833
4834  // To space
4835  {
4836    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4837    assert(!pst->valid(), "Clobbering existing data?");
4838    // Sets the condition for completion of the subtask (how many threads
4839    // need to finish in order to be done).
4840    pst->set_n_threads(n_threads);
4841    pst->set_n_tasks(1);
4842    assert(pst->valid(), "Error");
4843  }
4844
4845  // From space
4846  {
4847    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4848    assert(!pst->valid(), "Clobbering existing data?");
4849    size_t n_tasks = _survivor_chunk_index + 1;
4850    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4851    // Sets the condition for completion of the subtask (how many threads
4852    // need to finish in order to be done).
4853    pst->set_n_threads(n_threads);
4854    pst->set_n_tasks((int)n_tasks);
4855    assert(pst->valid(), "Error");
4856  }
4857}
4858
4859// Parallel version of remark
4860void CMSCollector::do_remark_parallel() {
4861  GenCollectedHeap* gch = GenCollectedHeap::heap();
4862  WorkGang* workers = gch->workers();
4863  assert(workers != NULL, "Need parallel worker threads.");
4864  // Choose to use the number of GC workers most recently set
4865  // into "active_workers".
4866  uint n_workers = workers->active_workers();
4867
4868  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4869
4870  StrongRootsScope srs(n_workers);
4871
4872  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4873
4874  // We won't be iterating over the cards in the card table updating
4875  // the younger_gen cards, so we shouldn't call the following else
4876  // the verification code as well as subsequent younger_refs_iterate
4877  // code would get confused. XXX
4878  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4879
4880  // The young gen rescan work will not be done as part of
4881  // process_roots (which currently doesn't know how to
4882  // parallelize such a scan), but rather will be broken up into
4883  // a set of parallel tasks (via the sampling that the [abortable]
4884  // preclean phase did of eden, plus the [two] tasks of
4885  // scanning the [two] survivor spaces. Further fine-grain
4886  // parallelization of the scanning of the survivor spaces
4887  // themselves, and of precleaning of the young gen itself
4888  // is deferred to the future.
4889  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4890
4891  // The dirty card rescan work is broken up into a "sequence"
4892  // of parallel tasks (per constituent space) that are dynamically
4893  // claimed by the parallel threads.
4894  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4895
4896  // It turns out that even when we're using 1 thread, doing the work in a
4897  // separate thread causes wide variance in run times.  We can't help this
4898  // in the multi-threaded case, but we special-case n=1 here to get
4899  // repeatable measurements of the 1-thread overhead of the parallel code.
4900  if (n_workers > 1) {
4901    // Make refs discovery MT-safe, if it isn't already: it may not
4902    // necessarily be so, since it's possible that we are doing
4903    // ST marking.
4904    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4905    workers->run_task(&tsk);
4906  } else {
4907    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4908    tsk.work(0);
4909  }
4910
4911  // restore, single-threaded for now, any preserved marks
4912  // as a result of work_q overflow
4913  restore_preserved_marks_if_any();
4914}
4915
4916// Non-parallel version of remark
4917void CMSCollector::do_remark_non_parallel() {
4918  ResourceMark rm;
4919  HandleMark   hm;
4920  GenCollectedHeap* gch = GenCollectedHeap::heap();
4921  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4922
4923  MarkRefsIntoAndScanClosure
4924    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4925             &_markStack, this,
4926             false /* should_yield */, false /* not precleaning */);
4927  MarkFromDirtyCardsClosure
4928    markFromDirtyCardsClosure(this, _span,
4929                              NULL,  // space is set further below
4930                              &_markBitMap, &_markStack, &mrias_cl);
4931  {
4932    GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4933    // Iterate over the dirty cards, setting the corresponding bits in the
4934    // mod union table.
4935    {
4936      ModUnionClosure modUnionClosure(&_modUnionTable);
4937      _ct->ct_bs()->dirty_card_iterate(
4938                      _cmsGen->used_region(),
4939                      &modUnionClosure);
4940    }
4941    // Having transferred these marks into the modUnionTable, we just need
4942    // to rescan the marked objects on the dirty cards in the modUnionTable.
4943    // The initial marking may have been done during an asynchronous
4944    // collection so there may be dirty bits in the mod-union table.
4945    const int alignment =
4946      CardTableModRefBS::card_size * BitsPerWord;
4947    {
4948      // ... First handle dirty cards in CMS gen
4949      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4950      MemRegion ur = _cmsGen->used_region();
4951      HeapWord* lb = ur.start();
4952      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4953      MemRegion cms_span(lb, ub);
4954      _modUnionTable.dirty_range_iterate_clear(cms_span,
4955                                               &markFromDirtyCardsClosure);
4956      verify_work_stacks_empty();
4957      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4958    }
4959  }
4960  if (VerifyDuringGC &&
4961      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4962    HandleMark hm;  // Discard invalid handles created during verification
4963    Universe::verify();
4964  }
4965  {
4966    GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
4967
4968    verify_work_stacks_empty();
4969
4970    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4971    StrongRootsScope srs(1);
4972
4973    gch->gen_process_roots(&srs,
4974                           GenCollectedHeap::OldGen,
4975                           true,  // young gen as roots
4976                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
4977                           should_unload_classes(),
4978                           &mrias_cl,
4979                           NULL,
4980                           NULL); // The dirty klasses will be handled below
4981
4982    assert(should_unload_classes()
4983           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4984           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4985  }
4986
4987  {
4988    GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4989
4990    verify_work_stacks_empty();
4991
4992    // Scan all class loader data objects that might have been introduced
4993    // during concurrent marking.
4994    ResourceMark rm;
4995    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4996    for (int i = 0; i < array->length(); i++) {
4997      mrias_cl.do_cld_nv(array->at(i));
4998    }
4999
5000    // We don't need to keep track of new CLDs anymore.
5001    ClassLoaderDataGraph::remember_new_clds(false);
5002
5003    verify_work_stacks_empty();
5004  }
5005
5006  {
5007    GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
5008
5009    verify_work_stacks_empty();
5010
5011    RemarkKlassClosure remark_klass_closure(&mrias_cl);
5012    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5013
5014    verify_work_stacks_empty();
5015  }
5016
5017  // We might have added oops to ClassLoaderData::_handles during the
5018  // concurrent marking phase. These oops point to newly allocated objects
5019  // that are guaranteed to be kept alive. Either by the direct allocation
5020  // code, or when the young collector processes the roots. Hence,
5021  // we don't have to revisit the _handles block during the remark phase.
5022
5023  verify_work_stacks_empty();
5024  // Restore evacuated mark words, if any, used for overflow list links
5025  restore_preserved_marks_if_any();
5026
5027  verify_overflow_empty();
5028}
5029
5030////////////////////////////////////////////////////////
5031// Parallel Reference Processing Task Proxy Class
5032////////////////////////////////////////////////////////
5033class AbstractGangTaskWOopQueues : public AbstractGangTask {
5034  OopTaskQueueSet*       _queues;
5035  ParallelTaskTerminator _terminator;
5036 public:
5037  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5038    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5039  ParallelTaskTerminator* terminator() { return &_terminator; }
5040  OopTaskQueueSet* queues() { return _queues; }
5041};
5042
5043class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5044  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5045  CMSCollector*          _collector;
5046  CMSBitMap*             _mark_bit_map;
5047  const MemRegion        _span;
5048  ProcessTask&           _task;
5049
5050public:
5051  CMSRefProcTaskProxy(ProcessTask&     task,
5052                      CMSCollector*    collector,
5053                      const MemRegion& span,
5054                      CMSBitMap*       mark_bit_map,
5055                      AbstractWorkGang* workers,
5056                      OopTaskQueueSet* task_queues):
5057    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5058      task_queues,
5059      workers->active_workers()),
5060    _task(task),
5061    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5062  {
5063    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5064           "Inconsistency in _span");
5065  }
5066
5067  OopTaskQueueSet* task_queues() { return queues(); }
5068
5069  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5070
5071  void do_work_steal(int i,
5072                     CMSParDrainMarkingStackClosure* drain,
5073                     CMSParKeepAliveClosure* keep_alive,
5074                     int* seed);
5075
5076  virtual void work(uint worker_id);
5077};
5078
5079void CMSRefProcTaskProxy::work(uint worker_id) {
5080  ResourceMark rm;
5081  HandleMark hm;
5082  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5083  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5084                                        _mark_bit_map,
5085                                        work_queue(worker_id));
5086  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5087                                                 _mark_bit_map,
5088                                                 work_queue(worker_id));
5089  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5090  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5091  if (_task.marks_oops_alive()) {
5092    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5093                  _collector->hash_seed(worker_id));
5094  }
5095  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5096  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5097}
5098
5099class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5100  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5101  EnqueueTask& _task;
5102
5103public:
5104  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5105    : AbstractGangTask("Enqueue reference objects in parallel"),
5106      _task(task)
5107  { }
5108
5109  virtual void work(uint worker_id)
5110  {
5111    _task.work(worker_id);
5112  }
5113};
5114
5115CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5116  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5117   _span(span),
5118   _bit_map(bit_map),
5119   _work_queue(work_queue),
5120   _mark_and_push(collector, span, bit_map, work_queue),
5121   _low_water_mark(MIN2((work_queue->max_elems()/4),
5122                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5123{ }
5124
5125// . see if we can share work_queues with ParNew? XXX
5126void CMSRefProcTaskProxy::do_work_steal(int i,
5127  CMSParDrainMarkingStackClosure* drain,
5128  CMSParKeepAliveClosure* keep_alive,
5129  int* seed) {
5130  OopTaskQueue* work_q = work_queue(i);
5131  NOT_PRODUCT(int num_steals = 0;)
5132  oop obj_to_scan;
5133
5134  while (true) {
5135    // Completely finish any left over work from (an) earlier round(s)
5136    drain->trim_queue(0);
5137    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5138                                         (size_t)ParGCDesiredObjsFromOverflowList);
5139    // Now check if there's any work in the overflow list
5140    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5141    // only affects the number of attempts made to get work from the
5142    // overflow list and does not affect the number of workers.  Just
5143    // pass ParallelGCThreads so this behavior is unchanged.
5144    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5145                                                work_q,
5146                                                ParallelGCThreads)) {
5147      // Found something in global overflow list;
5148      // not yet ready to go stealing work from others.
5149      // We'd like to assert(work_q->size() != 0, ...)
5150      // because we just took work from the overflow list,
5151      // but of course we can't, since all of that might have
5152      // been already stolen from us.
5153      continue;
5154    }
5155    // Verify that we have no work before we resort to stealing
5156    assert(work_q->size() == 0, "Have work, shouldn't steal");
5157    // Try to steal from other queues that have work
5158    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5159      NOT_PRODUCT(num_steals++;)
5160      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5161      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5162      // Do scanning work
5163      obj_to_scan->oop_iterate(keep_alive);
5164      // Loop around, finish this work, and try to steal some more
5165    } else if (terminator()->offer_termination()) {
5166      break;  // nirvana from the infinite cycle
5167    }
5168  }
5169  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5170}
5171
5172void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5173{
5174  GenCollectedHeap* gch = GenCollectedHeap::heap();
5175  WorkGang* workers = gch->workers();
5176  assert(workers != NULL, "Need parallel worker threads.");
5177  CMSRefProcTaskProxy rp_task(task, &_collector,
5178                              _collector.ref_processor()->span(),
5179                              _collector.markBitMap(),
5180                              workers, _collector.task_queues());
5181  workers->run_task(&rp_task);
5182}
5183
5184void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5185{
5186
5187  GenCollectedHeap* gch = GenCollectedHeap::heap();
5188  WorkGang* workers = gch->workers();
5189  assert(workers != NULL, "Need parallel worker threads.");
5190  CMSRefEnqueueTaskProxy enq_task(task);
5191  workers->run_task(&enq_task);
5192}
5193
5194void CMSCollector::refProcessingWork() {
5195  ResourceMark rm;
5196  HandleMark   hm;
5197
5198  ReferenceProcessor* rp = ref_processor();
5199  assert(rp->span().equals(_span), "Spans should be equal");
5200  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5201  // Process weak references.
5202  rp->setup_policy(false);
5203  verify_work_stacks_empty();
5204
5205  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5206                                          &_markStack, false /* !preclean */);
5207  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5208                                _span, &_markBitMap, &_markStack,
5209                                &cmsKeepAliveClosure, false /* !preclean */);
5210  {
5211    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5212
5213    ReferenceProcessorStats stats;
5214    if (rp->processing_is_mt()) {
5215      // Set the degree of MT here.  If the discovery is done MT, there
5216      // may have been a different number of threads doing the discovery
5217      // and a different number of discovered lists may have Ref objects.
5218      // That is OK as long as the Reference lists are balanced (see
5219      // balance_all_queues() and balance_queues()).
5220      GenCollectedHeap* gch = GenCollectedHeap::heap();
5221      uint active_workers = ParallelGCThreads;
5222      WorkGang* workers = gch->workers();
5223      if (workers != NULL) {
5224        active_workers = workers->active_workers();
5225        // The expectation is that active_workers will have already
5226        // been set to a reasonable value.  If it has not been set,
5227        // investigate.
5228        assert(active_workers > 0, "Should have been set during scavenge");
5229      }
5230      rp->set_active_mt_degree(active_workers);
5231      CMSRefProcTaskExecutor task_executor(*this);
5232      stats = rp->process_discovered_references(&_is_alive_closure,
5233                                        &cmsKeepAliveClosure,
5234                                        &cmsDrainMarkingStackClosure,
5235                                        &task_executor,
5236                                        _gc_timer_cm);
5237    } else {
5238      stats = rp->process_discovered_references(&_is_alive_closure,
5239                                        &cmsKeepAliveClosure,
5240                                        &cmsDrainMarkingStackClosure,
5241                                        NULL,
5242                                        _gc_timer_cm);
5243    }
5244    _gc_tracer_cm->report_gc_reference_stats(stats);
5245
5246  }
5247
5248  // This is the point where the entire marking should have completed.
5249  verify_work_stacks_empty();
5250
5251  if (should_unload_classes()) {
5252    {
5253      GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5254
5255      // Unload classes and purge the SystemDictionary.
5256      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5257
5258      // Unload nmethods.
5259      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5260
5261      // Prune dead klasses from subklass/sibling/implementor lists.
5262      Klass::clean_weak_klass_links(&_is_alive_closure);
5263    }
5264
5265    {
5266      GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
5267      // Clean up unreferenced symbols in symbol table.
5268      SymbolTable::unlink();
5269    }
5270
5271    {
5272      GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
5273      // Delete entries for dead interned strings.
5274      StringTable::unlink(&_is_alive_closure);
5275    }
5276  }
5277
5278
5279  // Restore any preserved marks as a result of mark stack or
5280  // work queue overflow
5281  restore_preserved_marks_if_any();  // done single-threaded for now
5282
5283  rp->set_enqueuing_is_done(true);
5284  if (rp->processing_is_mt()) {
5285    rp->balance_all_queues();
5286    CMSRefProcTaskExecutor task_executor(*this);
5287    rp->enqueue_discovered_references(&task_executor);
5288  } else {
5289    rp->enqueue_discovered_references(NULL);
5290  }
5291  rp->verify_no_references_recorded();
5292  assert(!rp->discovery_enabled(), "should have been disabled");
5293}
5294
5295#ifndef PRODUCT
5296void CMSCollector::check_correct_thread_executing() {
5297  Thread* t = Thread::current();
5298  // Only the VM thread or the CMS thread should be here.
5299  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5300         "Unexpected thread type");
5301  // If this is the vm thread, the foreground process
5302  // should not be waiting.  Note that _foregroundGCIsActive is
5303  // true while the foreground collector is waiting.
5304  if (_foregroundGCShouldWait) {
5305    // We cannot be the VM thread
5306    assert(t->is_ConcurrentGC_thread(),
5307           "Should be CMS thread");
5308  } else {
5309    // We can be the CMS thread only if we are in a stop-world
5310    // phase of CMS collection.
5311    if (t->is_ConcurrentGC_thread()) {
5312      assert(_collectorState == InitialMarking ||
5313             _collectorState == FinalMarking,
5314             "Should be a stop-world phase");
5315      // The CMS thread should be holding the CMS_token.
5316      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5317             "Potential interference with concurrently "
5318             "executing VM thread");
5319    }
5320  }
5321}
5322#endif
5323
5324void CMSCollector::sweep() {
5325  assert(_collectorState == Sweeping, "just checking");
5326  check_correct_thread_executing();
5327  verify_work_stacks_empty();
5328  verify_overflow_empty();
5329  increment_sweep_count();
5330  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5331
5332  _inter_sweep_timer.stop();
5333  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5334
5335  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5336  _intra_sweep_timer.reset();
5337  _intra_sweep_timer.start();
5338  {
5339    GCTraceCPUTime tcpu;
5340    CMSPhaseAccounting pa(this, "Concurrent Sweep");
5341    // First sweep the old gen
5342    {
5343      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5344                               bitMapLock());
5345      sweepWork(_cmsGen);
5346    }
5347
5348    // Update Universe::_heap_*_at_gc figures.
5349    // We need all the free list locks to make the abstract state
5350    // transition from Sweeping to Resetting. See detailed note
5351    // further below.
5352    {
5353      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5354      // Update heap occupancy information which is used as
5355      // input to soft ref clearing policy at the next gc.
5356      Universe::update_heap_info_at_gc();
5357      _collectorState = Resizing;
5358    }
5359  }
5360  verify_work_stacks_empty();
5361  verify_overflow_empty();
5362
5363  if (should_unload_classes()) {
5364    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5365    // requires that the virtual spaces are stable and not deleted.
5366    ClassLoaderDataGraph::set_should_purge(true);
5367  }
5368
5369  _intra_sweep_timer.stop();
5370  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5371
5372  _inter_sweep_timer.reset();
5373  _inter_sweep_timer.start();
5374
5375  // We need to use a monotonically non-decreasing time in ms
5376  // or we will see time-warp warnings and os::javaTimeMillis()
5377  // does not guarantee monotonicity.
5378  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5379  update_time_of_last_gc(now);
5380
5381  // NOTE on abstract state transitions:
5382  // Mutators allocate-live and/or mark the mod-union table dirty
5383  // based on the state of the collection.  The former is done in
5384  // the interval [Marking, Sweeping] and the latter in the interval
5385  // [Marking, Sweeping).  Thus the transitions into the Marking state
5386  // and out of the Sweeping state must be synchronously visible
5387  // globally to the mutators.
5388  // The transition into the Marking state happens with the world
5389  // stopped so the mutators will globally see it.  Sweeping is
5390  // done asynchronously by the background collector so the transition
5391  // from the Sweeping state to the Resizing state must be done
5392  // under the freelistLock (as is the check for whether to
5393  // allocate-live and whether to dirty the mod-union table).
5394  assert(_collectorState == Resizing, "Change of collector state to"
5395    " Resizing must be done under the freelistLocks (plural)");
5396
5397  // Now that sweeping has been completed, we clear
5398  // the incremental_collection_failed flag,
5399  // thus inviting a younger gen collection to promote into
5400  // this generation. If such a promotion may still fail,
5401  // the flag will be set again when a young collection is
5402  // attempted.
5403  GenCollectedHeap* gch = GenCollectedHeap::heap();
5404  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5405  gch->update_full_collections_completed(_collection_count_start);
5406}
5407
5408// FIX ME!!! Looks like this belongs in CFLSpace, with
5409// CMSGen merely delegating to it.
5410void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5411  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5412  HeapWord*  minAddr        = _cmsSpace->bottom();
5413  HeapWord*  largestAddr    =
5414    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5415  if (largestAddr == NULL) {
5416    // The dictionary appears to be empty.  In this case
5417    // try to coalesce at the end of the heap.
5418    largestAddr = _cmsSpace->end();
5419  }
5420  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5421  size_t nearLargestOffset =
5422    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5423  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5424                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5425  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5426}
5427
5428bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5429  return addr >= _cmsSpace->nearLargestChunk();
5430}
5431
5432FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5433  return _cmsSpace->find_chunk_at_end();
5434}
5435
5436void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5437                                                    bool full) {
5438  // If the young generation has been collected, gather any statistics
5439  // that are of interest at this point.
5440  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5441  if (!full && current_is_young) {
5442    // Gather statistics on the young generation collection.
5443    collector()->stats().record_gc0_end(used());
5444  }
5445}
5446
5447void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5448  // We iterate over the space(s) underlying this generation,
5449  // checking the mark bit map to see if the bits corresponding
5450  // to specific blocks are marked or not. Blocks that are
5451  // marked are live and are not swept up. All remaining blocks
5452  // are swept up, with coalescing on-the-fly as we sweep up
5453  // contiguous free and/or garbage blocks:
5454  // We need to ensure that the sweeper synchronizes with allocators
5455  // and stop-the-world collectors. In particular, the following
5456  // locks are used:
5457  // . CMS token: if this is held, a stop the world collection cannot occur
5458  // . freelistLock: if this is held no allocation can occur from this
5459  //                 generation by another thread
5460  // . bitMapLock: if this is held, no other thread can access or update
5461  //
5462
5463  // Note that we need to hold the freelistLock if we use
5464  // block iterate below; else the iterator might go awry if
5465  // a mutator (or promotion) causes block contents to change
5466  // (for instance if the allocator divvies up a block).
5467  // If we hold the free list lock, for all practical purposes
5468  // young generation GC's can't occur (they'll usually need to
5469  // promote), so we might as well prevent all young generation
5470  // GC's while we do a sweeping step. For the same reason, we might
5471  // as well take the bit map lock for the entire duration
5472
5473  // check that we hold the requisite locks
5474  assert(have_cms_token(), "Should hold cms token");
5475  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5476  assert_lock_strong(old_gen->freelistLock());
5477  assert_lock_strong(bitMapLock());
5478
5479  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5480  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5481  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5482                                          _inter_sweep_estimate.padded_average(),
5483                                          _intra_sweep_estimate.padded_average());
5484  old_gen->setNearLargestChunk();
5485
5486  {
5487    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5488    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5489    // We need to free-up/coalesce garbage/blocks from a
5490    // co-terminal free run. This is done in the SweepClosure
5491    // destructor; so, do not remove this scope, else the
5492    // end-of-sweep-census below will be off by a little bit.
5493  }
5494  old_gen->cmsSpace()->sweep_completed();
5495  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5496  if (should_unload_classes()) {                // unloaded classes this cycle,
5497    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5498  } else {                                      // did not unload classes,
5499    _concurrent_cycles_since_last_unload++;     // ... increment count
5500  }
5501}
5502
5503// Reset CMS data structures (for now just the marking bit map)
5504// preparatory for the next cycle.
5505void CMSCollector::reset_concurrent() {
5506  CMSTokenSyncWithLocks ts(true, bitMapLock());
5507
5508  // If the state is not "Resetting", the foreground  thread
5509  // has done a collection and the resetting.
5510  if (_collectorState != Resetting) {
5511    assert(_collectorState == Idling, "The state should only change"
5512      " because the foreground collector has finished the collection");
5513    return;
5514  }
5515
5516  {
5517    // Clear the mark bitmap (no grey objects to start with)
5518    // for the next cycle.
5519    GCTraceCPUTime tcpu;
5520    CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5521
5522    HeapWord* curAddr = _markBitMap.startWord();
5523    while (curAddr < _markBitMap.endWord()) {
5524      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5525      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5526      _markBitMap.clear_large_range(chunk);
5527      if (ConcurrentMarkSweepThread::should_yield() &&
5528          !foregroundGCIsActive() &&
5529          CMSYield) {
5530        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5531               "CMS thread should hold CMS token");
5532        assert_lock_strong(bitMapLock());
5533        bitMapLock()->unlock();
5534        ConcurrentMarkSweepThread::desynchronize(true);
5535        stopTimer();
5536        incrementYields();
5537
5538        // See the comment in coordinator_yield()
5539        for (unsigned i = 0; i < CMSYieldSleepCount &&
5540                         ConcurrentMarkSweepThread::should_yield() &&
5541                         !CMSCollector::foregroundGCIsActive(); ++i) {
5542          os::sleep(Thread::current(), 1, false);
5543        }
5544
5545        ConcurrentMarkSweepThread::synchronize(true);
5546        bitMapLock()->lock_without_safepoint_check();
5547        startTimer();
5548      }
5549      curAddr = chunk.end();
5550    }
5551    // A successful mostly concurrent collection has been done.
5552    // Because only the full (i.e., concurrent mode failure) collections
5553    // are being measured for gc overhead limits, clean the "near" flag
5554    // and count.
5555    size_policy()->reset_gc_overhead_limit_count();
5556    _collectorState = Idling;
5557  }
5558
5559  register_gc_end();
5560}
5561
5562// Same as above but for STW paths
5563void CMSCollector::reset_stw() {
5564  // already have the lock
5565  assert(_collectorState == Resetting, "just checking");
5566  assert_lock_strong(bitMapLock());
5567  GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5568  _markBitMap.clear_all();
5569  _collectorState = Idling;
5570  register_gc_end();
5571}
5572
5573void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5574  GCTraceCPUTime tcpu;
5575  TraceCollectorStats tcs(counters());
5576
5577  switch (op) {
5578    case CMS_op_checkpointRootsInitial: {
5579      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5580      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5581      checkpointRootsInitial();
5582      break;
5583    }
5584    case CMS_op_checkpointRootsFinal: {
5585      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5586      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5587      checkpointRootsFinal();
5588      break;
5589    }
5590    default:
5591      fatal("No such CMS_op");
5592  }
5593}
5594
5595#ifndef PRODUCT
5596size_t const CMSCollector::skip_header_HeapWords() {
5597  return FreeChunk::header_size();
5598}
5599
5600// Try and collect here conditions that should hold when
5601// CMS thread is exiting. The idea is that the foreground GC
5602// thread should not be blocked if it wants to terminate
5603// the CMS thread and yet continue to run the VM for a while
5604// after that.
5605void CMSCollector::verify_ok_to_terminate() const {
5606  assert(Thread::current()->is_ConcurrentGC_thread(),
5607         "should be called by CMS thread");
5608  assert(!_foregroundGCShouldWait, "should be false");
5609  // We could check here that all the various low-level locks
5610  // are not held by the CMS thread, but that is overkill; see
5611  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5612  // is checked.
5613}
5614#endif
5615
5616size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5617   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5618          "missing Printezis mark?");
5619  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5620  size_t size = pointer_delta(nextOneAddr + 1, addr);
5621  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5622         "alignment problem");
5623  assert(size >= 3, "Necessary for Printezis marks to work");
5624  return size;
5625}
5626
5627// A variant of the above (block_size_using_printezis_bits()) except
5628// that we return 0 if the P-bits are not yet set.
5629size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5630  if (_markBitMap.isMarked(addr + 1)) {
5631    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5632    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5633    size_t size = pointer_delta(nextOneAddr + 1, addr);
5634    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5635           "alignment problem");
5636    assert(size >= 3, "Necessary for Printezis marks to work");
5637    return size;
5638  }
5639  return 0;
5640}
5641
5642HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5643  size_t sz = 0;
5644  oop p = (oop)addr;
5645  if (p->klass_or_null() != NULL) {
5646    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5647  } else {
5648    sz = block_size_using_printezis_bits(addr);
5649  }
5650  assert(sz > 0, "size must be nonzero");
5651  HeapWord* next_block = addr + sz;
5652  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5653                                             CardTableModRefBS::card_size);
5654  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5655         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5656         "must be different cards");
5657  return next_card;
5658}
5659
5660
5661// CMS Bit Map Wrapper /////////////////////////////////////////
5662
5663// Construct a CMS bit map infrastructure, but don't create the
5664// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5665// further below.
5666CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5667  _bm(),
5668  _shifter(shifter),
5669  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5670                                    Monitor::_safepoint_check_sometimes) : NULL)
5671{
5672  _bmStartWord = 0;
5673  _bmWordSize  = 0;
5674}
5675
5676bool CMSBitMap::allocate(MemRegion mr) {
5677  _bmStartWord = mr.start();
5678  _bmWordSize  = mr.word_size();
5679  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5680                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5681  if (!brs.is_reserved()) {
5682    log_warning(gc)("CMS bit map allocation failure");
5683    return false;
5684  }
5685  // For now we'll just commit all of the bit map up front.
5686  // Later on we'll try to be more parsimonious with swap.
5687  if (!_virtual_space.initialize(brs, brs.size())) {
5688    log_warning(gc)("CMS bit map backing store failure");
5689    return false;
5690  }
5691  assert(_virtual_space.committed_size() == brs.size(),
5692         "didn't reserve backing store for all of CMS bit map?");
5693  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5694         _bmWordSize, "inconsistency in bit map sizing");
5695  _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5696
5697  // bm.clear(); // can we rely on getting zero'd memory? verify below
5698  assert(isAllClear(),
5699         "Expected zero'd memory from ReservedSpace constructor");
5700  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5701         "consistency check");
5702  return true;
5703}
5704
5705void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5706  HeapWord *next_addr, *end_addr, *last_addr;
5707  assert_locked();
5708  assert(covers(mr), "out-of-range error");
5709  // XXX assert that start and end are appropriately aligned
5710  for (next_addr = mr.start(), end_addr = mr.end();
5711       next_addr < end_addr; next_addr = last_addr) {
5712    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5713    last_addr = dirty_region.end();
5714    if (!dirty_region.is_empty()) {
5715      cl->do_MemRegion(dirty_region);
5716    } else {
5717      assert(last_addr == end_addr, "program logic");
5718      return;
5719    }
5720  }
5721}
5722
5723void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5724  _bm.print_on_error(st, prefix);
5725}
5726
5727#ifndef PRODUCT
5728void CMSBitMap::assert_locked() const {
5729  CMSLockVerifier::assert_locked(lock());
5730}
5731
5732bool CMSBitMap::covers(MemRegion mr) const {
5733  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5734  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5735         "size inconsistency");
5736  return (mr.start() >= _bmStartWord) &&
5737         (mr.end()   <= endWord());
5738}
5739
5740bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5741    return (start >= _bmStartWord && (start + size) <= endWord());
5742}
5743
5744void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5745  // verify that there are no 1 bits in the interval [left, right)
5746  FalseBitMapClosure falseBitMapClosure;
5747  iterate(&falseBitMapClosure, left, right);
5748}
5749
5750void CMSBitMap::region_invariant(MemRegion mr)
5751{
5752  assert_locked();
5753  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5754  assert(!mr.is_empty(), "unexpected empty region");
5755  assert(covers(mr), "mr should be covered by bit map");
5756  // convert address range into offset range
5757  size_t start_ofs = heapWordToOffset(mr.start());
5758  // Make sure that end() is appropriately aligned
5759  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5760                        (1 << (_shifter+LogHeapWordSize))),
5761         "Misaligned mr.end()");
5762  size_t end_ofs   = heapWordToOffset(mr.end());
5763  assert(end_ofs > start_ofs, "Should mark at least one bit");
5764}
5765
5766#endif
5767
5768bool CMSMarkStack::allocate(size_t size) {
5769  // allocate a stack of the requisite depth
5770  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5771                   size * sizeof(oop)));
5772  if (!rs.is_reserved()) {
5773    log_warning(gc)("CMSMarkStack allocation failure");
5774    return false;
5775  }
5776  if (!_virtual_space.initialize(rs, rs.size())) {
5777    log_warning(gc)("CMSMarkStack backing store failure");
5778    return false;
5779  }
5780  assert(_virtual_space.committed_size() == rs.size(),
5781         "didn't reserve backing store for all of CMS stack?");
5782  _base = (oop*)(_virtual_space.low());
5783  _index = 0;
5784  _capacity = size;
5785  NOT_PRODUCT(_max_depth = 0);
5786  return true;
5787}
5788
5789// XXX FIX ME !!! In the MT case we come in here holding a
5790// leaf lock. For printing we need to take a further lock
5791// which has lower rank. We need to recalibrate the two
5792// lock-ranks involved in order to be able to print the
5793// messages below. (Or defer the printing to the caller.
5794// For now we take the expedient path of just disabling the
5795// messages for the problematic case.)
5796void CMSMarkStack::expand() {
5797  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5798  if (_capacity == MarkStackSizeMax) {
5799    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5800      // We print a warning message only once per CMS cycle.
5801      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5802    }
5803    return;
5804  }
5805  // Double capacity if possible
5806  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5807  // Do not give up existing stack until we have managed to
5808  // get the double capacity that we desired.
5809  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5810                   new_capacity * sizeof(oop)));
5811  if (rs.is_reserved()) {
5812    // Release the backing store associated with old stack
5813    _virtual_space.release();
5814    // Reinitialize virtual space for new stack
5815    if (!_virtual_space.initialize(rs, rs.size())) {
5816      fatal("Not enough swap for expanded marking stack");
5817    }
5818    _base = (oop*)(_virtual_space.low());
5819    _index = 0;
5820    _capacity = new_capacity;
5821  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5822    // Failed to double capacity, continue;
5823    // we print a detail message only once per CMS cycle.
5824    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5825                        _capacity / K, new_capacity / K);
5826  }
5827}
5828
5829
5830// Closures
5831// XXX: there seems to be a lot of code  duplication here;
5832// should refactor and consolidate common code.
5833
5834// This closure is used to mark refs into the CMS generation in
5835// the CMS bit map. Called at the first checkpoint. This closure
5836// assumes that we do not need to re-mark dirty cards; if the CMS
5837// generation on which this is used is not an oldest
5838// generation then this will lose younger_gen cards!
5839
5840MarkRefsIntoClosure::MarkRefsIntoClosure(
5841  MemRegion span, CMSBitMap* bitMap):
5842    _span(span),
5843    _bitMap(bitMap)
5844{
5845  assert(ref_processor() == NULL, "deliberately left NULL");
5846  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5847}
5848
5849void MarkRefsIntoClosure::do_oop(oop obj) {
5850  // if p points into _span, then mark corresponding bit in _markBitMap
5851  assert(obj->is_oop(), "expected an oop");
5852  HeapWord* addr = (HeapWord*)obj;
5853  if (_span.contains(addr)) {
5854    // this should be made more efficient
5855    _bitMap->mark(addr);
5856  }
5857}
5858
5859void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5860void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5861
5862ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5863  MemRegion span, CMSBitMap* bitMap):
5864    _span(span),
5865    _bitMap(bitMap)
5866{
5867  assert(ref_processor() == NULL, "deliberately left NULL");
5868  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5869}
5870
5871void ParMarkRefsIntoClosure::do_oop(oop obj) {
5872  // if p points into _span, then mark corresponding bit in _markBitMap
5873  assert(obj->is_oop(), "expected an oop");
5874  HeapWord* addr = (HeapWord*)obj;
5875  if (_span.contains(addr)) {
5876    // this should be made more efficient
5877    _bitMap->par_mark(addr);
5878  }
5879}
5880
5881void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5882void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5883
5884// A variant of the above, used for CMS marking verification.
5885MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5886  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5887    _span(span),
5888    _verification_bm(verification_bm),
5889    _cms_bm(cms_bm)
5890{
5891  assert(ref_processor() == NULL, "deliberately left NULL");
5892  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5893}
5894
5895void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5896  // if p points into _span, then mark corresponding bit in _markBitMap
5897  assert(obj->is_oop(), "expected an oop");
5898  HeapWord* addr = (HeapWord*)obj;
5899  if (_span.contains(addr)) {
5900    _verification_bm->mark(addr);
5901    if (!_cms_bm->isMarked(addr)) {
5902      Log(gc, verify) log;
5903      ResourceMark rm;
5904      oop(addr)->print_on(log.error_stream());
5905      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5906      fatal("... aborting");
5907    }
5908  }
5909}
5910
5911void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5912void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5913
5914//////////////////////////////////////////////////
5915// MarkRefsIntoAndScanClosure
5916//////////////////////////////////////////////////
5917
5918MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5919                                                       ReferenceProcessor* rp,
5920                                                       CMSBitMap* bit_map,
5921                                                       CMSBitMap* mod_union_table,
5922                                                       CMSMarkStack*  mark_stack,
5923                                                       CMSCollector* collector,
5924                                                       bool should_yield,
5925                                                       bool concurrent_precleaning):
5926  _collector(collector),
5927  _span(span),
5928  _bit_map(bit_map),
5929  _mark_stack(mark_stack),
5930  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5931                      mark_stack, concurrent_precleaning),
5932  _yield(should_yield),
5933  _concurrent_precleaning(concurrent_precleaning),
5934  _freelistLock(NULL)
5935{
5936  // FIXME: Should initialize in base class constructor.
5937  assert(rp != NULL, "ref_processor shouldn't be NULL");
5938  set_ref_processor_internal(rp);
5939}
5940
5941// This closure is used to mark refs into the CMS generation at the
5942// second (final) checkpoint, and to scan and transitively follow
5943// the unmarked oops. It is also used during the concurrent precleaning
5944// phase while scanning objects on dirty cards in the CMS generation.
5945// The marks are made in the marking bit map and the marking stack is
5946// used for keeping the (newly) grey objects during the scan.
5947// The parallel version (Par_...) appears further below.
5948void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5949  if (obj != NULL) {
5950    assert(obj->is_oop(), "expected an oop");
5951    HeapWord* addr = (HeapWord*)obj;
5952    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5953    assert(_collector->overflow_list_is_empty(),
5954           "overflow list should be empty");
5955    if (_span.contains(addr) &&
5956        !_bit_map->isMarked(addr)) {
5957      // mark bit map (object is now grey)
5958      _bit_map->mark(addr);
5959      // push on marking stack (stack should be empty), and drain the
5960      // stack by applying this closure to the oops in the oops popped
5961      // from the stack (i.e. blacken the grey objects)
5962      bool res = _mark_stack->push(obj);
5963      assert(res, "Should have space to push on empty stack");
5964      do {
5965        oop new_oop = _mark_stack->pop();
5966        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5967        assert(_bit_map->isMarked((HeapWord*)new_oop),
5968               "only grey objects on this stack");
5969        // iterate over the oops in this oop, marking and pushing
5970        // the ones in CMS heap (i.e. in _span).
5971        new_oop->oop_iterate(&_pushAndMarkClosure);
5972        // check if it's time to yield
5973        do_yield_check();
5974      } while (!_mark_stack->isEmpty() ||
5975               (!_concurrent_precleaning && take_from_overflow_list()));
5976        // if marking stack is empty, and we are not doing this
5977        // during precleaning, then check the overflow list
5978    }
5979    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5980    assert(_collector->overflow_list_is_empty(),
5981           "overflow list was drained above");
5982
5983    assert(_collector->no_preserved_marks(),
5984           "All preserved marks should have been restored above");
5985  }
5986}
5987
5988void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5989void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5990
5991void MarkRefsIntoAndScanClosure::do_yield_work() {
5992  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5993         "CMS thread should hold CMS token");
5994  assert_lock_strong(_freelistLock);
5995  assert_lock_strong(_bit_map->lock());
5996  // relinquish the free_list_lock and bitMaplock()
5997  _bit_map->lock()->unlock();
5998  _freelistLock->unlock();
5999  ConcurrentMarkSweepThread::desynchronize(true);
6000  _collector->stopTimer();
6001  _collector->incrementYields();
6002
6003  // See the comment in coordinator_yield()
6004  for (unsigned i = 0;
6005       i < CMSYieldSleepCount &&
6006       ConcurrentMarkSweepThread::should_yield() &&
6007       !CMSCollector::foregroundGCIsActive();
6008       ++i) {
6009    os::sleep(Thread::current(), 1, false);
6010  }
6011
6012  ConcurrentMarkSweepThread::synchronize(true);
6013  _freelistLock->lock_without_safepoint_check();
6014  _bit_map->lock()->lock_without_safepoint_check();
6015  _collector->startTimer();
6016}
6017
6018///////////////////////////////////////////////////////////
6019// ParMarkRefsIntoAndScanClosure: a parallel version of
6020//                                MarkRefsIntoAndScanClosure
6021///////////////////////////////////////////////////////////
6022ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6023  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6024  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6025  _span(span),
6026  _bit_map(bit_map),
6027  _work_queue(work_queue),
6028  _low_water_mark(MIN2((work_queue->max_elems()/4),
6029                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6030  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6031{
6032  // FIXME: Should initialize in base class constructor.
6033  assert(rp != NULL, "ref_processor shouldn't be NULL");
6034  set_ref_processor_internal(rp);
6035}
6036
6037// This closure is used to mark refs into the CMS generation at the
6038// second (final) checkpoint, and to scan and transitively follow
6039// the unmarked oops. The marks are made in the marking bit map and
6040// the work_queue is used for keeping the (newly) grey objects during
6041// the scan phase whence they are also available for stealing by parallel
6042// threads. Since the marking bit map is shared, updates are
6043// synchronized (via CAS).
6044void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6045  if (obj != NULL) {
6046    // Ignore mark word because this could be an already marked oop
6047    // that may be chained at the end of the overflow list.
6048    assert(obj->is_oop(true), "expected an oop");
6049    HeapWord* addr = (HeapWord*)obj;
6050    if (_span.contains(addr) &&
6051        !_bit_map->isMarked(addr)) {
6052      // mark bit map (object will become grey):
6053      // It is possible for several threads to be
6054      // trying to "claim" this object concurrently;
6055      // the unique thread that succeeds in marking the
6056      // object first will do the subsequent push on
6057      // to the work queue (or overflow list).
6058      if (_bit_map->par_mark(addr)) {
6059        // push on work_queue (which may not be empty), and trim the
6060        // queue to an appropriate length by applying this closure to
6061        // the oops in the oops popped from the stack (i.e. blacken the
6062        // grey objects)
6063        bool res = _work_queue->push(obj);
6064        assert(res, "Low water mark should be less than capacity?");
6065        trim_queue(_low_water_mark);
6066      } // Else, another thread claimed the object
6067    }
6068  }
6069}
6070
6071void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6072void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6073
6074// This closure is used to rescan the marked objects on the dirty cards
6075// in the mod union table and the card table proper.
6076size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6077  oop p, MemRegion mr) {
6078
6079  size_t size = 0;
6080  HeapWord* addr = (HeapWord*)p;
6081  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6082  assert(_span.contains(addr), "we are scanning the CMS generation");
6083  // check if it's time to yield
6084  if (do_yield_check()) {
6085    // We yielded for some foreground stop-world work,
6086    // and we have been asked to abort this ongoing preclean cycle.
6087    return 0;
6088  }
6089  if (_bitMap->isMarked(addr)) {
6090    // it's marked; is it potentially uninitialized?
6091    if (p->klass_or_null() != NULL) {
6092        // an initialized object; ignore mark word in verification below
6093        // since we are running concurrent with mutators
6094        assert(p->is_oop(true), "should be an oop");
6095        if (p->is_objArray()) {
6096          // objArrays are precisely marked; restrict scanning
6097          // to dirty cards only.
6098          size = CompactibleFreeListSpace::adjustObjectSize(
6099                   p->oop_iterate_size(_scanningClosure, mr));
6100        } else {
6101          // A non-array may have been imprecisely marked; we need
6102          // to scan object in its entirety.
6103          size = CompactibleFreeListSpace::adjustObjectSize(
6104                   p->oop_iterate_size(_scanningClosure));
6105        }
6106      #ifdef ASSERT
6107        size_t direct_size =
6108          CompactibleFreeListSpace::adjustObjectSize(p->size());
6109        assert(size == direct_size, "Inconsistency in size");
6110        assert(size >= 3, "Necessary for Printezis marks to work");
6111        HeapWord* start_pbit = addr + 1;
6112        HeapWord* end_pbit = addr + size - 1;
6113        assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6114               "inconsistent Printezis mark");
6115        // Verify inner mark bits (between Printezis bits) are clear,
6116        // but don't repeat if there are multiple dirty regions for
6117        // the same object, to avoid potential O(N^2) performance.
6118        if (addr != _last_scanned_object) {
6119          _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6120          _last_scanned_object = addr;
6121        }
6122      #endif // ASSERT
6123    } else {
6124      // An uninitialized object.
6125      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6126      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6127      size = pointer_delta(nextOneAddr + 1, addr);
6128      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6129             "alignment problem");
6130      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6131      // will dirty the card when the klass pointer is installed in the
6132      // object (signaling the completion of initialization).
6133    }
6134  } else {
6135    // Either a not yet marked object or an uninitialized object
6136    if (p->klass_or_null() == NULL) {
6137      // An uninitialized object, skip to the next card, since
6138      // we may not be able to read its P-bits yet.
6139      assert(size == 0, "Initial value");
6140    } else {
6141      // An object not (yet) reached by marking: we merely need to
6142      // compute its size so as to go look at the next block.
6143      assert(p->is_oop(true), "should be an oop");
6144      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6145    }
6146  }
6147  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6148  return size;
6149}
6150
6151void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6152  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6153         "CMS thread should hold CMS token");
6154  assert_lock_strong(_freelistLock);
6155  assert_lock_strong(_bitMap->lock());
6156  // relinquish the free_list_lock and bitMaplock()
6157  _bitMap->lock()->unlock();
6158  _freelistLock->unlock();
6159  ConcurrentMarkSweepThread::desynchronize(true);
6160  _collector->stopTimer();
6161  _collector->incrementYields();
6162
6163  // See the comment in coordinator_yield()
6164  for (unsigned i = 0; i < CMSYieldSleepCount &&
6165                   ConcurrentMarkSweepThread::should_yield() &&
6166                   !CMSCollector::foregroundGCIsActive(); ++i) {
6167    os::sleep(Thread::current(), 1, false);
6168  }
6169
6170  ConcurrentMarkSweepThread::synchronize(true);
6171  _freelistLock->lock_without_safepoint_check();
6172  _bitMap->lock()->lock_without_safepoint_check();
6173  _collector->startTimer();
6174}
6175
6176
6177//////////////////////////////////////////////////////////////////
6178// SurvivorSpacePrecleanClosure
6179//////////////////////////////////////////////////////////////////
6180// This (single-threaded) closure is used to preclean the oops in
6181// the survivor spaces.
6182size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6183
6184  HeapWord* addr = (HeapWord*)p;
6185  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6186  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6187  assert(p->klass_or_null() != NULL, "object should be initialized");
6188  // an initialized object; ignore mark word in verification below
6189  // since we are running concurrent with mutators
6190  assert(p->is_oop(true), "should be an oop");
6191  // Note that we do not yield while we iterate over
6192  // the interior oops of p, pushing the relevant ones
6193  // on our marking stack.
6194  size_t size = p->oop_iterate_size(_scanning_closure);
6195  do_yield_check();
6196  // Observe that below, we do not abandon the preclean
6197  // phase as soon as we should; rather we empty the
6198  // marking stack before returning. This is to satisfy
6199  // some existing assertions. In general, it may be a
6200  // good idea to abort immediately and complete the marking
6201  // from the grey objects at a later time.
6202  while (!_mark_stack->isEmpty()) {
6203    oop new_oop = _mark_stack->pop();
6204    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6205    assert(_bit_map->isMarked((HeapWord*)new_oop),
6206           "only grey objects on this stack");
6207    // iterate over the oops in this oop, marking and pushing
6208    // the ones in CMS heap (i.e. in _span).
6209    new_oop->oop_iterate(_scanning_closure);
6210    // check if it's time to yield
6211    do_yield_check();
6212  }
6213  unsigned int after_count =
6214    GenCollectedHeap::heap()->total_collections();
6215  bool abort = (_before_count != after_count) ||
6216               _collector->should_abort_preclean();
6217  return abort ? 0 : size;
6218}
6219
6220void SurvivorSpacePrecleanClosure::do_yield_work() {
6221  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6222         "CMS thread should hold CMS token");
6223  assert_lock_strong(_bit_map->lock());
6224  // Relinquish the bit map lock
6225  _bit_map->lock()->unlock();
6226  ConcurrentMarkSweepThread::desynchronize(true);
6227  _collector->stopTimer();
6228  _collector->incrementYields();
6229
6230  // See the comment in coordinator_yield()
6231  for (unsigned i = 0; i < CMSYieldSleepCount &&
6232                       ConcurrentMarkSweepThread::should_yield() &&
6233                       !CMSCollector::foregroundGCIsActive(); ++i) {
6234    os::sleep(Thread::current(), 1, false);
6235  }
6236
6237  ConcurrentMarkSweepThread::synchronize(true);
6238  _bit_map->lock()->lock_without_safepoint_check();
6239  _collector->startTimer();
6240}
6241
6242// This closure is used to rescan the marked objects on the dirty cards
6243// in the mod union table and the card table proper. In the parallel
6244// case, although the bitMap is shared, we do a single read so the
6245// isMarked() query is "safe".
6246bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6247  // Ignore mark word because we are running concurrent with mutators
6248  assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6249  HeapWord* addr = (HeapWord*)p;
6250  assert(_span.contains(addr), "we are scanning the CMS generation");
6251  bool is_obj_array = false;
6252  #ifdef ASSERT
6253    if (!_parallel) {
6254      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6255      assert(_collector->overflow_list_is_empty(),
6256             "overflow list should be empty");
6257
6258    }
6259  #endif // ASSERT
6260  if (_bit_map->isMarked(addr)) {
6261    // Obj arrays are precisely marked, non-arrays are not;
6262    // so we scan objArrays precisely and non-arrays in their
6263    // entirety.
6264    if (p->is_objArray()) {
6265      is_obj_array = true;
6266      if (_parallel) {
6267        p->oop_iterate(_par_scan_closure, mr);
6268      } else {
6269        p->oop_iterate(_scan_closure, mr);
6270      }
6271    } else {
6272      if (_parallel) {
6273        p->oop_iterate(_par_scan_closure);
6274      } else {
6275        p->oop_iterate(_scan_closure);
6276      }
6277    }
6278  }
6279  #ifdef ASSERT
6280    if (!_parallel) {
6281      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6282      assert(_collector->overflow_list_is_empty(),
6283             "overflow list should be empty");
6284
6285    }
6286  #endif // ASSERT
6287  return is_obj_array;
6288}
6289
6290MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6291                        MemRegion span,
6292                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6293                        bool should_yield, bool verifying):
6294  _collector(collector),
6295  _span(span),
6296  _bitMap(bitMap),
6297  _mut(&collector->_modUnionTable),
6298  _markStack(markStack),
6299  _yield(should_yield),
6300  _skipBits(0)
6301{
6302  assert(_markStack->isEmpty(), "stack should be empty");
6303  _finger = _bitMap->startWord();
6304  _threshold = _finger;
6305  assert(_collector->_restart_addr == NULL, "Sanity check");
6306  assert(_span.contains(_finger), "Out of bounds _finger?");
6307  DEBUG_ONLY(_verifying = verifying;)
6308}
6309
6310void MarkFromRootsClosure::reset(HeapWord* addr) {
6311  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6312  assert(_span.contains(addr), "Out of bounds _finger?");
6313  _finger = addr;
6314  _threshold = (HeapWord*)round_to(
6315                 (intptr_t)_finger, CardTableModRefBS::card_size);
6316}
6317
6318// Should revisit to see if this should be restructured for
6319// greater efficiency.
6320bool MarkFromRootsClosure::do_bit(size_t offset) {
6321  if (_skipBits > 0) {
6322    _skipBits--;
6323    return true;
6324  }
6325  // convert offset into a HeapWord*
6326  HeapWord* addr = _bitMap->startWord() + offset;
6327  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6328         "address out of range");
6329  assert(_bitMap->isMarked(addr), "tautology");
6330  if (_bitMap->isMarked(addr+1)) {
6331    // this is an allocated but not yet initialized object
6332    assert(_skipBits == 0, "tautology");
6333    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6334    oop p = oop(addr);
6335    if (p->klass_or_null() == NULL) {
6336      DEBUG_ONLY(if (!_verifying) {)
6337        // We re-dirty the cards on which this object lies and increase
6338        // the _threshold so that we'll come back to scan this object
6339        // during the preclean or remark phase. (CMSCleanOnEnter)
6340        if (CMSCleanOnEnter) {
6341          size_t sz = _collector->block_size_using_printezis_bits(addr);
6342          HeapWord* end_card_addr   = (HeapWord*)round_to(
6343                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6344          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6345          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6346          // Bump _threshold to end_card_addr; note that
6347          // _threshold cannot possibly exceed end_card_addr, anyhow.
6348          // This prevents future clearing of the card as the scan proceeds
6349          // to the right.
6350          assert(_threshold <= end_card_addr,
6351                 "Because we are just scanning into this object");
6352          if (_threshold < end_card_addr) {
6353            _threshold = end_card_addr;
6354          }
6355          if (p->klass_or_null() != NULL) {
6356            // Redirty the range of cards...
6357            _mut->mark_range(redirty_range);
6358          } // ...else the setting of klass will dirty the card anyway.
6359        }
6360      DEBUG_ONLY(})
6361      return true;
6362    }
6363  }
6364  scanOopsInOop(addr);
6365  return true;
6366}
6367
6368// We take a break if we've been at this for a while,
6369// so as to avoid monopolizing the locks involved.
6370void MarkFromRootsClosure::do_yield_work() {
6371  // First give up the locks, then yield, then re-lock
6372  // We should probably use a constructor/destructor idiom to
6373  // do this unlock/lock or modify the MutexUnlocker class to
6374  // serve our purpose. XXX
6375  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6376         "CMS thread should hold CMS token");
6377  assert_lock_strong(_bitMap->lock());
6378  _bitMap->lock()->unlock();
6379  ConcurrentMarkSweepThread::desynchronize(true);
6380  _collector->stopTimer();
6381  _collector->incrementYields();
6382
6383  // See the comment in coordinator_yield()
6384  for (unsigned i = 0; i < CMSYieldSleepCount &&
6385                       ConcurrentMarkSweepThread::should_yield() &&
6386                       !CMSCollector::foregroundGCIsActive(); ++i) {
6387    os::sleep(Thread::current(), 1, false);
6388  }
6389
6390  ConcurrentMarkSweepThread::synchronize(true);
6391  _bitMap->lock()->lock_without_safepoint_check();
6392  _collector->startTimer();
6393}
6394
6395void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6396  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6397  assert(_markStack->isEmpty(),
6398         "should drain stack to limit stack usage");
6399  // convert ptr to an oop preparatory to scanning
6400  oop obj = oop(ptr);
6401  // Ignore mark word in verification below, since we
6402  // may be running concurrent with mutators.
6403  assert(obj->is_oop(true), "should be an oop");
6404  assert(_finger <= ptr, "_finger runneth ahead");
6405  // advance the finger to right end of this object
6406  _finger = ptr + obj->size();
6407  assert(_finger > ptr, "we just incremented it above");
6408  // On large heaps, it may take us some time to get through
6409  // the marking phase. During
6410  // this time it's possible that a lot of mutations have
6411  // accumulated in the card table and the mod union table --
6412  // these mutation records are redundant until we have
6413  // actually traced into the corresponding card.
6414  // Here, we check whether advancing the finger would make
6415  // us cross into a new card, and if so clear corresponding
6416  // cards in the MUT (preclean them in the card-table in the
6417  // future).
6418
6419  DEBUG_ONLY(if (!_verifying) {)
6420    // The clean-on-enter optimization is disabled by default,
6421    // until we fix 6178663.
6422    if (CMSCleanOnEnter && (_finger > _threshold)) {
6423      // [_threshold, _finger) represents the interval
6424      // of cards to be cleared  in MUT (or precleaned in card table).
6425      // The set of cards to be cleared is all those that overlap
6426      // with the interval [_threshold, _finger); note that
6427      // _threshold is always kept card-aligned but _finger isn't
6428      // always card-aligned.
6429      HeapWord* old_threshold = _threshold;
6430      assert(old_threshold == (HeapWord*)round_to(
6431              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6432             "_threshold should always be card-aligned");
6433      _threshold = (HeapWord*)round_to(
6434                     (intptr_t)_finger, CardTableModRefBS::card_size);
6435      MemRegion mr(old_threshold, _threshold);
6436      assert(!mr.is_empty(), "Control point invariant");
6437      assert(_span.contains(mr), "Should clear within span");
6438      _mut->clear_range(mr);
6439    }
6440  DEBUG_ONLY(})
6441  // Note: the finger doesn't advance while we drain
6442  // the stack below.
6443  PushOrMarkClosure pushOrMarkClosure(_collector,
6444                                      _span, _bitMap, _markStack,
6445                                      _finger, this);
6446  bool res = _markStack->push(obj);
6447  assert(res, "Empty non-zero size stack should have space for single push");
6448  while (!_markStack->isEmpty()) {
6449    oop new_oop = _markStack->pop();
6450    // Skip verifying header mark word below because we are
6451    // running concurrent with mutators.
6452    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6453    // now scan this oop's oops
6454    new_oop->oop_iterate(&pushOrMarkClosure);
6455    do_yield_check();
6456  }
6457  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6458}
6459
6460ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6461                       CMSCollector* collector, MemRegion span,
6462                       CMSBitMap* bit_map,
6463                       OopTaskQueue* work_queue,
6464                       CMSMarkStack*  overflow_stack):
6465  _collector(collector),
6466  _whole_span(collector->_span),
6467  _span(span),
6468  _bit_map(bit_map),
6469  _mut(&collector->_modUnionTable),
6470  _work_queue(work_queue),
6471  _overflow_stack(overflow_stack),
6472  _skip_bits(0),
6473  _task(task)
6474{
6475  assert(_work_queue->size() == 0, "work_queue should be empty");
6476  _finger = span.start();
6477  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6478  assert(_span.contains(_finger), "Out of bounds _finger?");
6479}
6480
6481// Should revisit to see if this should be restructured for
6482// greater efficiency.
6483bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6484  if (_skip_bits > 0) {
6485    _skip_bits--;
6486    return true;
6487  }
6488  // convert offset into a HeapWord*
6489  HeapWord* addr = _bit_map->startWord() + offset;
6490  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6491         "address out of range");
6492  assert(_bit_map->isMarked(addr), "tautology");
6493  if (_bit_map->isMarked(addr+1)) {
6494    // this is an allocated object that might not yet be initialized
6495    assert(_skip_bits == 0, "tautology");
6496    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6497    oop p = oop(addr);
6498    if (p->klass_or_null() == NULL) {
6499      // in the case of Clean-on-Enter optimization, redirty card
6500      // and avoid clearing card by increasing  the threshold.
6501      return true;
6502    }
6503  }
6504  scan_oops_in_oop(addr);
6505  return true;
6506}
6507
6508void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6509  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6510  // Should we assert that our work queue is empty or
6511  // below some drain limit?
6512  assert(_work_queue->size() == 0,
6513         "should drain stack to limit stack usage");
6514  // convert ptr to an oop preparatory to scanning
6515  oop obj = oop(ptr);
6516  // Ignore mark word in verification below, since we
6517  // may be running concurrent with mutators.
6518  assert(obj->is_oop(true), "should be an oop");
6519  assert(_finger <= ptr, "_finger runneth ahead");
6520  // advance the finger to right end of this object
6521  _finger = ptr + obj->size();
6522  assert(_finger > ptr, "we just incremented it above");
6523  // On large heaps, it may take us some time to get through
6524  // the marking phase. During
6525  // this time it's possible that a lot of mutations have
6526  // accumulated in the card table and the mod union table --
6527  // these mutation records are redundant until we have
6528  // actually traced into the corresponding card.
6529  // Here, we check whether advancing the finger would make
6530  // us cross into a new card, and if so clear corresponding
6531  // cards in the MUT (preclean them in the card-table in the
6532  // future).
6533
6534  // The clean-on-enter optimization is disabled by default,
6535  // until we fix 6178663.
6536  if (CMSCleanOnEnter && (_finger > _threshold)) {
6537    // [_threshold, _finger) represents the interval
6538    // of cards to be cleared  in MUT (or precleaned in card table).
6539    // The set of cards to be cleared is all those that overlap
6540    // with the interval [_threshold, _finger); note that
6541    // _threshold is always kept card-aligned but _finger isn't
6542    // always card-aligned.
6543    HeapWord* old_threshold = _threshold;
6544    assert(old_threshold == (HeapWord*)round_to(
6545            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6546           "_threshold should always be card-aligned");
6547    _threshold = (HeapWord*)round_to(
6548                   (intptr_t)_finger, CardTableModRefBS::card_size);
6549    MemRegion mr(old_threshold, _threshold);
6550    assert(!mr.is_empty(), "Control point invariant");
6551    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6552    _mut->clear_range(mr);
6553  }
6554
6555  // Note: the local finger doesn't advance while we drain
6556  // the stack below, but the global finger sure can and will.
6557  HeapWord** gfa = _task->global_finger_addr();
6558  ParPushOrMarkClosure pushOrMarkClosure(_collector,
6559                                         _span, _bit_map,
6560                                         _work_queue,
6561                                         _overflow_stack,
6562                                         _finger,
6563                                         gfa, this);
6564  bool res = _work_queue->push(obj);   // overflow could occur here
6565  assert(res, "Will hold once we use workqueues");
6566  while (true) {
6567    oop new_oop;
6568    if (!_work_queue->pop_local(new_oop)) {
6569      // We emptied our work_queue; check if there's stuff that can
6570      // be gotten from the overflow stack.
6571      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6572            _overflow_stack, _work_queue)) {
6573        do_yield_check();
6574        continue;
6575      } else {  // done
6576        break;
6577      }
6578    }
6579    // Skip verifying header mark word below because we are
6580    // running concurrent with mutators.
6581    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6582    // now scan this oop's oops
6583    new_oop->oop_iterate(&pushOrMarkClosure);
6584    do_yield_check();
6585  }
6586  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6587}
6588
6589// Yield in response to a request from VM Thread or
6590// from mutators.
6591void ParMarkFromRootsClosure::do_yield_work() {
6592  assert(_task != NULL, "sanity");
6593  _task->yield();
6594}
6595
6596// A variant of the above used for verifying CMS marking work.
6597MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6598                        MemRegion span,
6599                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6600                        CMSMarkStack*  mark_stack):
6601  _collector(collector),
6602  _span(span),
6603  _verification_bm(verification_bm),
6604  _cms_bm(cms_bm),
6605  _mark_stack(mark_stack),
6606  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6607                      mark_stack)
6608{
6609  assert(_mark_stack->isEmpty(), "stack should be empty");
6610  _finger = _verification_bm->startWord();
6611  assert(_collector->_restart_addr == NULL, "Sanity check");
6612  assert(_span.contains(_finger), "Out of bounds _finger?");
6613}
6614
6615void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6616  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6617  assert(_span.contains(addr), "Out of bounds _finger?");
6618  _finger = addr;
6619}
6620
6621// Should revisit to see if this should be restructured for
6622// greater efficiency.
6623bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6624  // convert offset into a HeapWord*
6625  HeapWord* addr = _verification_bm->startWord() + offset;
6626  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6627         "address out of range");
6628  assert(_verification_bm->isMarked(addr), "tautology");
6629  assert(_cms_bm->isMarked(addr), "tautology");
6630
6631  assert(_mark_stack->isEmpty(),
6632         "should drain stack to limit stack usage");
6633  // convert addr to an oop preparatory to scanning
6634  oop obj = oop(addr);
6635  assert(obj->is_oop(), "should be an oop");
6636  assert(_finger <= addr, "_finger runneth ahead");
6637  // advance the finger to right end of this object
6638  _finger = addr + obj->size();
6639  assert(_finger > addr, "we just incremented it above");
6640  // Note: the finger doesn't advance while we drain
6641  // the stack below.
6642  bool res = _mark_stack->push(obj);
6643  assert(res, "Empty non-zero size stack should have space for single push");
6644  while (!_mark_stack->isEmpty()) {
6645    oop new_oop = _mark_stack->pop();
6646    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6647    // now scan this oop's oops
6648    new_oop->oop_iterate(&_pam_verify_closure);
6649  }
6650  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6651  return true;
6652}
6653
6654PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6655  CMSCollector* collector, MemRegion span,
6656  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6657  CMSMarkStack*  mark_stack):
6658  MetadataAwareOopClosure(collector->ref_processor()),
6659  _collector(collector),
6660  _span(span),
6661  _verification_bm(verification_bm),
6662  _cms_bm(cms_bm),
6663  _mark_stack(mark_stack)
6664{ }
6665
6666void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6667void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6668
6669// Upon stack overflow, we discard (part of) the stack,
6670// remembering the least address amongst those discarded
6671// in CMSCollector's _restart_address.
6672void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6673  // Remember the least grey address discarded
6674  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6675  _collector->lower_restart_addr(ra);
6676  _mark_stack->reset();  // discard stack contents
6677  _mark_stack->expand(); // expand the stack if possible
6678}
6679
6680void PushAndMarkVerifyClosure::do_oop(oop obj) {
6681  assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6682  HeapWord* addr = (HeapWord*)obj;
6683  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6684    // Oop lies in _span and isn't yet grey or black
6685    _verification_bm->mark(addr);            // now grey
6686    if (!_cms_bm->isMarked(addr)) {
6687      Log(gc, verify) log;
6688      ResourceMark rm;
6689      oop(addr)->print_on(log.error_stream());
6690      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6691      fatal("... aborting");
6692    }
6693
6694    if (!_mark_stack->push(obj)) { // stack overflow
6695      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6696      assert(_mark_stack->isFull(), "Else push should have succeeded");
6697      handle_stack_overflow(addr);
6698    }
6699    // anything including and to the right of _finger
6700    // will be scanned as we iterate over the remainder of the
6701    // bit map
6702  }
6703}
6704
6705PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6706                     MemRegion span,
6707                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6708                     HeapWord* finger, MarkFromRootsClosure* parent) :
6709  MetadataAwareOopClosure(collector->ref_processor()),
6710  _collector(collector),
6711  _span(span),
6712  _bitMap(bitMap),
6713  _markStack(markStack),
6714  _finger(finger),
6715  _parent(parent)
6716{ }
6717
6718ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6719                                           MemRegion span,
6720                                           CMSBitMap* bit_map,
6721                                           OopTaskQueue* work_queue,
6722                                           CMSMarkStack*  overflow_stack,
6723                                           HeapWord* finger,
6724                                           HeapWord** global_finger_addr,
6725                                           ParMarkFromRootsClosure* parent) :
6726  MetadataAwareOopClosure(collector->ref_processor()),
6727  _collector(collector),
6728  _whole_span(collector->_span),
6729  _span(span),
6730  _bit_map(bit_map),
6731  _work_queue(work_queue),
6732  _overflow_stack(overflow_stack),
6733  _finger(finger),
6734  _global_finger_addr(global_finger_addr),
6735  _parent(parent)
6736{ }
6737
6738// Assumes thread-safe access by callers, who are
6739// responsible for mutual exclusion.
6740void CMSCollector::lower_restart_addr(HeapWord* low) {
6741  assert(_span.contains(low), "Out of bounds addr");
6742  if (_restart_addr == NULL) {
6743    _restart_addr = low;
6744  } else {
6745    _restart_addr = MIN2(_restart_addr, low);
6746  }
6747}
6748
6749// Upon stack overflow, we discard (part of) the stack,
6750// remembering the least address amongst those discarded
6751// in CMSCollector's _restart_address.
6752void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6753  // Remember the least grey address discarded
6754  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6755  _collector->lower_restart_addr(ra);
6756  _markStack->reset();  // discard stack contents
6757  _markStack->expand(); // expand the stack if possible
6758}
6759
6760// Upon stack overflow, we discard (part of) the stack,
6761// remembering the least address amongst those discarded
6762// in CMSCollector's _restart_address.
6763void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6764  // We need to do this under a mutex to prevent other
6765  // workers from interfering with the work done below.
6766  MutexLockerEx ml(_overflow_stack->par_lock(),
6767                   Mutex::_no_safepoint_check_flag);
6768  // Remember the least grey address discarded
6769  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6770  _collector->lower_restart_addr(ra);
6771  _overflow_stack->reset();  // discard stack contents
6772  _overflow_stack->expand(); // expand the stack if possible
6773}
6774
6775void PushOrMarkClosure::do_oop(oop obj) {
6776  // Ignore mark word because we are running concurrent with mutators.
6777  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6778  HeapWord* addr = (HeapWord*)obj;
6779  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6780    // Oop lies in _span and isn't yet grey or black
6781    _bitMap->mark(addr);            // now grey
6782    if (addr < _finger) {
6783      // the bit map iteration has already either passed, or
6784      // sampled, this bit in the bit map; we'll need to
6785      // use the marking stack to scan this oop's oops.
6786      bool simulate_overflow = false;
6787      NOT_PRODUCT(
6788        if (CMSMarkStackOverflowALot &&
6789            _collector->simulate_overflow()) {
6790          // simulate a stack overflow
6791          simulate_overflow = true;
6792        }
6793      )
6794      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6795        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6796        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6797        handle_stack_overflow(addr);
6798      }
6799    }
6800    // anything including and to the right of _finger
6801    // will be scanned as we iterate over the remainder of the
6802    // bit map
6803    do_yield_check();
6804  }
6805}
6806
6807void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6808void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6809
6810void ParPushOrMarkClosure::do_oop(oop obj) {
6811  // Ignore mark word because we are running concurrent with mutators.
6812  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6813  HeapWord* addr = (HeapWord*)obj;
6814  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6815    // Oop lies in _span and isn't yet grey or black
6816    // We read the global_finger (volatile read) strictly after marking oop
6817    bool res = _bit_map->par_mark(addr);    // now grey
6818    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6819    // Should we push this marked oop on our stack?
6820    // -- if someone else marked it, nothing to do
6821    // -- if target oop is above global finger nothing to do
6822    // -- if target oop is in chunk and above local finger
6823    //      then nothing to do
6824    // -- else push on work queue
6825    if (   !res       // someone else marked it, they will deal with it
6826        || (addr >= *gfa)  // will be scanned in a later task
6827        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6828      return;
6829    }
6830    // the bit map iteration has already either passed, or
6831    // sampled, this bit in the bit map; we'll need to
6832    // use the marking stack to scan this oop's oops.
6833    bool simulate_overflow = false;
6834    NOT_PRODUCT(
6835      if (CMSMarkStackOverflowALot &&
6836          _collector->simulate_overflow()) {
6837        // simulate a stack overflow
6838        simulate_overflow = true;
6839      }
6840    )
6841    if (simulate_overflow ||
6842        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6843      // stack overflow
6844      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6845      // We cannot assert that the overflow stack is full because
6846      // it may have been emptied since.
6847      assert(simulate_overflow ||
6848             _work_queue->size() == _work_queue->max_elems(),
6849            "Else push should have succeeded");
6850      handle_stack_overflow(addr);
6851    }
6852    do_yield_check();
6853  }
6854}
6855
6856void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6857void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6858
6859PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6860                                       MemRegion span,
6861                                       ReferenceProcessor* rp,
6862                                       CMSBitMap* bit_map,
6863                                       CMSBitMap* mod_union_table,
6864                                       CMSMarkStack*  mark_stack,
6865                                       bool           concurrent_precleaning):
6866  MetadataAwareOopClosure(rp),
6867  _collector(collector),
6868  _span(span),
6869  _bit_map(bit_map),
6870  _mod_union_table(mod_union_table),
6871  _mark_stack(mark_stack),
6872  _concurrent_precleaning(concurrent_precleaning)
6873{
6874  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6875}
6876
6877// Grey object rescan during pre-cleaning and second checkpoint phases --
6878// the non-parallel version (the parallel version appears further below.)
6879void PushAndMarkClosure::do_oop(oop obj) {
6880  // Ignore mark word verification. If during concurrent precleaning,
6881  // the object monitor may be locked. If during the checkpoint
6882  // phases, the object may already have been reached by a  different
6883  // path and may be at the end of the global overflow list (so
6884  // the mark word may be NULL).
6885  assert(obj->is_oop_or_null(true /* ignore mark word */),
6886         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6887  HeapWord* addr = (HeapWord*)obj;
6888  // Check if oop points into the CMS generation
6889  // and is not marked
6890  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6891    // a white object ...
6892    _bit_map->mark(addr);         // ... now grey
6893    // push on the marking stack (grey set)
6894    bool simulate_overflow = false;
6895    NOT_PRODUCT(
6896      if (CMSMarkStackOverflowALot &&
6897          _collector->simulate_overflow()) {
6898        // simulate a stack overflow
6899        simulate_overflow = true;
6900      }
6901    )
6902    if (simulate_overflow || !_mark_stack->push(obj)) {
6903      if (_concurrent_precleaning) {
6904         // During precleaning we can just dirty the appropriate card(s)
6905         // in the mod union table, thus ensuring that the object remains
6906         // in the grey set  and continue. In the case of object arrays
6907         // we need to dirty all of the cards that the object spans,
6908         // since the rescan of object arrays will be limited to the
6909         // dirty cards.
6910         // Note that no one can be interfering with us in this action
6911         // of dirtying the mod union table, so no locking or atomics
6912         // are required.
6913         if (obj->is_objArray()) {
6914           size_t sz = obj->size();
6915           HeapWord* end_card_addr = (HeapWord*)round_to(
6916                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6917           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6918           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6919           _mod_union_table->mark_range(redirty_range);
6920         } else {
6921           _mod_union_table->mark(addr);
6922         }
6923         _collector->_ser_pmc_preclean_ovflw++;
6924      } else {
6925         // During the remark phase, we need to remember this oop
6926         // in the overflow list.
6927         _collector->push_on_overflow_list(obj);
6928         _collector->_ser_pmc_remark_ovflw++;
6929      }
6930    }
6931  }
6932}
6933
6934ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6935                                             MemRegion span,
6936                                             ReferenceProcessor* rp,
6937                                             CMSBitMap* bit_map,
6938                                             OopTaskQueue* work_queue):
6939  MetadataAwareOopClosure(rp),
6940  _collector(collector),
6941  _span(span),
6942  _bit_map(bit_map),
6943  _work_queue(work_queue)
6944{
6945  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6946}
6947
6948void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6949void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6950
6951// Grey object rescan during second checkpoint phase --
6952// the parallel version.
6953void ParPushAndMarkClosure::do_oop(oop obj) {
6954  // In the assert below, we ignore the mark word because
6955  // this oop may point to an already visited object that is
6956  // on the overflow stack (in which case the mark word has
6957  // been hijacked for chaining into the overflow stack --
6958  // if this is the last object in the overflow stack then
6959  // its mark word will be NULL). Because this object may
6960  // have been subsequently popped off the global overflow
6961  // stack, and the mark word possibly restored to the prototypical
6962  // value, by the time we get to examined this failing assert in
6963  // the debugger, is_oop_or_null(false) may subsequently start
6964  // to hold.
6965  assert(obj->is_oop_or_null(true),
6966         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6967  HeapWord* addr = (HeapWord*)obj;
6968  // Check if oop points into the CMS generation
6969  // and is not marked
6970  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6971    // a white object ...
6972    // If we manage to "claim" the object, by being the
6973    // first thread to mark it, then we push it on our
6974    // marking stack
6975    if (_bit_map->par_mark(addr)) {     // ... now grey
6976      // push on work queue (grey set)
6977      bool simulate_overflow = false;
6978      NOT_PRODUCT(
6979        if (CMSMarkStackOverflowALot &&
6980            _collector->par_simulate_overflow()) {
6981          // simulate a stack overflow
6982          simulate_overflow = true;
6983        }
6984      )
6985      if (simulate_overflow || !_work_queue->push(obj)) {
6986        _collector->par_push_on_overflow_list(obj);
6987        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6988      }
6989    } // Else, some other thread got there first
6990  }
6991}
6992
6993void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6994void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6995
6996void CMSPrecleanRefsYieldClosure::do_yield_work() {
6997  Mutex* bml = _collector->bitMapLock();
6998  assert_lock_strong(bml);
6999  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7000         "CMS thread should hold CMS token");
7001
7002  bml->unlock();
7003  ConcurrentMarkSweepThread::desynchronize(true);
7004
7005  _collector->stopTimer();
7006  _collector->incrementYields();
7007
7008  // See the comment in coordinator_yield()
7009  for (unsigned i = 0; i < CMSYieldSleepCount &&
7010                       ConcurrentMarkSweepThread::should_yield() &&
7011                       !CMSCollector::foregroundGCIsActive(); ++i) {
7012    os::sleep(Thread::current(), 1, false);
7013  }
7014
7015  ConcurrentMarkSweepThread::synchronize(true);
7016  bml->lock();
7017
7018  _collector->startTimer();
7019}
7020
7021bool CMSPrecleanRefsYieldClosure::should_return() {
7022  if (ConcurrentMarkSweepThread::should_yield()) {
7023    do_yield_work();
7024  }
7025  return _collector->foregroundGCIsActive();
7026}
7027
7028void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7029  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7030         "mr should be aligned to start at a card boundary");
7031  // We'd like to assert:
7032  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7033  //        "mr should be a range of cards");
7034  // However, that would be too strong in one case -- the last
7035  // partition ends at _unallocated_block which, in general, can be
7036  // an arbitrary boundary, not necessarily card aligned.
7037  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7038  _space->object_iterate_mem(mr, &_scan_cl);
7039}
7040
7041SweepClosure::SweepClosure(CMSCollector* collector,
7042                           ConcurrentMarkSweepGeneration* g,
7043                           CMSBitMap* bitMap, bool should_yield) :
7044  _collector(collector),
7045  _g(g),
7046  _sp(g->cmsSpace()),
7047  _limit(_sp->sweep_limit()),
7048  _freelistLock(_sp->freelistLock()),
7049  _bitMap(bitMap),
7050  _yield(should_yield),
7051  _inFreeRange(false),           // No free range at beginning of sweep
7052  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7053  _lastFreeRangeCoalesced(false),
7054  _freeFinger(g->used_region().start())
7055{
7056  NOT_PRODUCT(
7057    _numObjectsFreed = 0;
7058    _numWordsFreed   = 0;
7059    _numObjectsLive = 0;
7060    _numWordsLive = 0;
7061    _numObjectsAlreadyFree = 0;
7062    _numWordsAlreadyFree = 0;
7063    _last_fc = NULL;
7064
7065    _sp->initializeIndexedFreeListArrayReturnedBytes();
7066    _sp->dictionary()->initialize_dict_returned_bytes();
7067  )
7068  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7069         "sweep _limit out of bounds");
7070  log_develop_trace(gc, sweep)("====================");
7071  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7072}
7073
7074void SweepClosure::print_on(outputStream* st) const {
7075  st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7076               p2i(_sp->bottom()), p2i(_sp->end()));
7077  st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7078  st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7079  NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7080  st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7081               _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7082}
7083
7084#ifndef PRODUCT
7085// Assertion checking only:  no useful work in product mode --
7086// however, if any of the flags below become product flags,
7087// you may need to review this code to see if it needs to be
7088// enabled in product mode.
7089SweepClosure::~SweepClosure() {
7090  assert_lock_strong(_freelistLock);
7091  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7092         "sweep _limit out of bounds");
7093  if (inFreeRange()) {
7094    Log(gc, sweep) log;
7095    log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7096    ResourceMark rm;
7097    print_on(log.error_stream());
7098    ShouldNotReachHere();
7099  }
7100
7101  if (log_is_enabled(Debug, gc, sweep)) {
7102    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7103                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7104    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7105                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7106    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7107    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7108  }
7109
7110  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7111    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7112    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7113    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7114    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7115                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7116  }
7117  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7118  log_develop_trace(gc, sweep)("================");
7119}
7120#endif  // PRODUCT
7121
7122void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7123    bool freeRangeInFreeLists) {
7124  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7125                               p2i(freeFinger), freeRangeInFreeLists);
7126  assert(!inFreeRange(), "Trampling existing free range");
7127  set_inFreeRange(true);
7128  set_lastFreeRangeCoalesced(false);
7129
7130  set_freeFinger(freeFinger);
7131  set_freeRangeInFreeLists(freeRangeInFreeLists);
7132  if (CMSTestInFreeList) {
7133    if (freeRangeInFreeLists) {
7134      FreeChunk* fc = (FreeChunk*) freeFinger;
7135      assert(fc->is_free(), "A chunk on the free list should be free.");
7136      assert(fc->size() > 0, "Free range should have a size");
7137      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7138    }
7139  }
7140}
7141
7142// Note that the sweeper runs concurrently with mutators. Thus,
7143// it is possible for direct allocation in this generation to happen
7144// in the middle of the sweep. Note that the sweeper also coalesces
7145// contiguous free blocks. Thus, unless the sweeper and the allocator
7146// synchronize appropriately freshly allocated blocks may get swept up.
7147// This is accomplished by the sweeper locking the free lists while
7148// it is sweeping. Thus blocks that are determined to be free are
7149// indeed free. There is however one additional complication:
7150// blocks that have been allocated since the final checkpoint and
7151// mark, will not have been marked and so would be treated as
7152// unreachable and swept up. To prevent this, the allocator marks
7153// the bit map when allocating during the sweep phase. This leads,
7154// however, to a further complication -- objects may have been allocated
7155// but not yet initialized -- in the sense that the header isn't yet
7156// installed. The sweeper can not then determine the size of the block
7157// in order to skip over it. To deal with this case, we use a technique
7158// (due to Printezis) to encode such uninitialized block sizes in the
7159// bit map. Since the bit map uses a bit per every HeapWord, but the
7160// CMS generation has a minimum object size of 3 HeapWords, it follows
7161// that "normal marks" won't be adjacent in the bit map (there will
7162// always be at least two 0 bits between successive 1 bits). We make use
7163// of these "unused" bits to represent uninitialized blocks -- the bit
7164// corresponding to the start of the uninitialized object and the next
7165// bit are both set. Finally, a 1 bit marks the end of the object that
7166// started with the two consecutive 1 bits to indicate its potentially
7167// uninitialized state.
7168
7169size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7170  FreeChunk* fc = (FreeChunk*)addr;
7171  size_t res;
7172
7173  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7174  // than "addr == _limit" because although _limit was a block boundary when
7175  // we started the sweep, it may no longer be one because heap expansion
7176  // may have caused us to coalesce the block ending at the address _limit
7177  // with a newly expanded chunk (this happens when _limit was set to the
7178  // previous _end of the space), so we may have stepped past _limit:
7179  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7180  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7181    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7182           "sweep _limit out of bounds");
7183    assert(addr < _sp->end(), "addr out of bounds");
7184    // Flush any free range we might be holding as a single
7185    // coalesced chunk to the appropriate free list.
7186    if (inFreeRange()) {
7187      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7188             "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7189      flush_cur_free_chunk(freeFinger(),
7190                           pointer_delta(addr, freeFinger()));
7191      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7192                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7193                                   lastFreeRangeCoalesced() ? 1 : 0);
7194    }
7195
7196    // help the iterator loop finish
7197    return pointer_delta(_sp->end(), addr);
7198  }
7199
7200  assert(addr < _limit, "sweep invariant");
7201  // check if we should yield
7202  do_yield_check(addr);
7203  if (fc->is_free()) {
7204    // Chunk that is already free
7205    res = fc->size();
7206    do_already_free_chunk(fc);
7207    debug_only(_sp->verifyFreeLists());
7208    // If we flush the chunk at hand in lookahead_and_flush()
7209    // and it's coalesced with a preceding chunk, then the
7210    // process of "mangling" the payload of the coalesced block
7211    // will cause erasure of the size information from the
7212    // (erstwhile) header of all the coalesced blocks but the
7213    // first, so the first disjunct in the assert will not hold
7214    // in that specific case (in which case the second disjunct
7215    // will hold).
7216    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7217           "Otherwise the size info doesn't change at this step");
7218    NOT_PRODUCT(
7219      _numObjectsAlreadyFree++;
7220      _numWordsAlreadyFree += res;
7221    )
7222    NOT_PRODUCT(_last_fc = fc;)
7223  } else if (!_bitMap->isMarked(addr)) {
7224    // Chunk is fresh garbage
7225    res = do_garbage_chunk(fc);
7226    debug_only(_sp->verifyFreeLists());
7227    NOT_PRODUCT(
7228      _numObjectsFreed++;
7229      _numWordsFreed += res;
7230    )
7231  } else {
7232    // Chunk that is alive.
7233    res = do_live_chunk(fc);
7234    debug_only(_sp->verifyFreeLists());
7235    NOT_PRODUCT(
7236        _numObjectsLive++;
7237        _numWordsLive += res;
7238    )
7239  }
7240  return res;
7241}
7242
7243// For the smart allocation, record following
7244//  split deaths - a free chunk is removed from its free list because
7245//      it is being split into two or more chunks.
7246//  split birth - a free chunk is being added to its free list because
7247//      a larger free chunk has been split and resulted in this free chunk.
7248//  coal death - a free chunk is being removed from its free list because
7249//      it is being coalesced into a large free chunk.
7250//  coal birth - a free chunk is being added to its free list because
7251//      it was created when two or more free chunks where coalesced into
7252//      this free chunk.
7253//
7254// These statistics are used to determine the desired number of free
7255// chunks of a given size.  The desired number is chosen to be relative
7256// to the end of a CMS sweep.  The desired number at the end of a sweep
7257// is the
7258//      count-at-end-of-previous-sweep (an amount that was enough)
7259//              - count-at-beginning-of-current-sweep  (the excess)
7260//              + split-births  (gains in this size during interval)
7261//              - split-deaths  (demands on this size during interval)
7262// where the interval is from the end of one sweep to the end of the
7263// next.
7264//
7265// When sweeping the sweeper maintains an accumulated chunk which is
7266// the chunk that is made up of chunks that have been coalesced.  That
7267// will be termed the left-hand chunk.  A new chunk of garbage that
7268// is being considered for coalescing will be referred to as the
7269// right-hand chunk.
7270//
7271// When making a decision on whether to coalesce a right-hand chunk with
7272// the current left-hand chunk, the current count vs. the desired count
7273// of the left-hand chunk is considered.  Also if the right-hand chunk
7274// is near the large chunk at the end of the heap (see
7275// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7276// left-hand chunk is coalesced.
7277//
7278// When making a decision about whether to split a chunk, the desired count
7279// vs. the current count of the candidate to be split is also considered.
7280// If the candidate is underpopulated (currently fewer chunks than desired)
7281// a chunk of an overpopulated (currently more chunks than desired) size may
7282// be chosen.  The "hint" associated with a free list, if non-null, points
7283// to a free list which may be overpopulated.
7284//
7285
7286void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7287  const size_t size = fc->size();
7288  // Chunks that cannot be coalesced are not in the
7289  // free lists.
7290  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7291    assert(_sp->verify_chunk_in_free_list(fc),
7292           "free chunk should be in free lists");
7293  }
7294  // a chunk that is already free, should not have been
7295  // marked in the bit map
7296  HeapWord* const addr = (HeapWord*) fc;
7297  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7298  // Verify that the bit map has no bits marked between
7299  // addr and purported end of this block.
7300  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7301
7302  // Some chunks cannot be coalesced under any circumstances.
7303  // See the definition of cantCoalesce().
7304  if (!fc->cantCoalesce()) {
7305    // This chunk can potentially be coalesced.
7306    // All the work is done in
7307    do_post_free_or_garbage_chunk(fc, size);
7308    // Note that if the chunk is not coalescable (the else arm
7309    // below), we unconditionally flush, without needing to do
7310    // a "lookahead," as we do below.
7311    if (inFreeRange()) lookahead_and_flush(fc, size);
7312  } else {
7313    // Code path common to both original and adaptive free lists.
7314
7315    // cant coalesce with previous block; this should be treated
7316    // as the end of a free run if any
7317    if (inFreeRange()) {
7318      // we kicked some butt; time to pick up the garbage
7319      assert(freeFinger() < addr, "freeFinger points too high");
7320      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7321    }
7322    // else, nothing to do, just continue
7323  }
7324}
7325
7326size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7327  // This is a chunk of garbage.  It is not in any free list.
7328  // Add it to a free list or let it possibly be coalesced into
7329  // a larger chunk.
7330  HeapWord* const addr = (HeapWord*) fc;
7331  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7332
7333  // Verify that the bit map has no bits marked between
7334  // addr and purported end of just dead object.
7335  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7336  do_post_free_or_garbage_chunk(fc, size);
7337
7338  assert(_limit >= addr + size,
7339         "A freshly garbage chunk can't possibly straddle over _limit");
7340  if (inFreeRange()) lookahead_and_flush(fc, size);
7341  return size;
7342}
7343
7344size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7345  HeapWord* addr = (HeapWord*) fc;
7346  // The sweeper has just found a live object. Return any accumulated
7347  // left hand chunk to the free lists.
7348  if (inFreeRange()) {
7349    assert(freeFinger() < addr, "freeFinger points too high");
7350    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7351  }
7352
7353  // This object is live: we'd normally expect this to be
7354  // an oop, and like to assert the following:
7355  // assert(oop(addr)->is_oop(), "live block should be an oop");
7356  // However, as we commented above, this may be an object whose
7357  // header hasn't yet been initialized.
7358  size_t size;
7359  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7360  if (_bitMap->isMarked(addr + 1)) {
7361    // Determine the size from the bit map, rather than trying to
7362    // compute it from the object header.
7363    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7364    size = pointer_delta(nextOneAddr + 1, addr);
7365    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7366           "alignment problem");
7367
7368#ifdef ASSERT
7369      if (oop(addr)->klass_or_null() != NULL) {
7370        // Ignore mark word because we are running concurrent with mutators
7371        assert(oop(addr)->is_oop(true), "live block should be an oop");
7372        assert(size ==
7373               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7374               "P-mark and computed size do not agree");
7375      }
7376#endif
7377
7378  } else {
7379    // This should be an initialized object that's alive.
7380    assert(oop(addr)->klass_or_null() != NULL,
7381           "Should be an initialized object");
7382    // Ignore mark word because we are running concurrent with mutators
7383    assert(oop(addr)->is_oop(true), "live block should be an oop");
7384    // Verify that the bit map has no bits marked between
7385    // addr and purported end of this block.
7386    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7387    assert(size >= 3, "Necessary for Printezis marks to work");
7388    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7389    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7390  }
7391  return size;
7392}
7393
7394void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7395                                                 size_t chunkSize) {
7396  // do_post_free_or_garbage_chunk() should only be called in the case
7397  // of the adaptive free list allocator.
7398  const bool fcInFreeLists = fc->is_free();
7399  assert((HeapWord*)fc <= _limit, "sweep invariant");
7400  if (CMSTestInFreeList && fcInFreeLists) {
7401    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7402  }
7403
7404  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7405
7406  HeapWord* const fc_addr = (HeapWord*) fc;
7407
7408  bool coalesce = false;
7409  const size_t left  = pointer_delta(fc_addr, freeFinger());
7410  const size_t right = chunkSize;
7411  switch (FLSCoalescePolicy) {
7412    // numeric value forms a coalition aggressiveness metric
7413    case 0:  { // never coalesce
7414      coalesce = false;
7415      break;
7416    }
7417    case 1: { // coalesce if left & right chunks on overpopulated lists
7418      coalesce = _sp->coalOverPopulated(left) &&
7419                 _sp->coalOverPopulated(right);
7420      break;
7421    }
7422    case 2: { // coalesce if left chunk on overpopulated list (default)
7423      coalesce = _sp->coalOverPopulated(left);
7424      break;
7425    }
7426    case 3: { // coalesce if left OR right chunk on overpopulated list
7427      coalesce = _sp->coalOverPopulated(left) ||
7428                 _sp->coalOverPopulated(right);
7429      break;
7430    }
7431    case 4: { // always coalesce
7432      coalesce = true;
7433      break;
7434    }
7435    default:
7436     ShouldNotReachHere();
7437  }
7438
7439  // Should the current free range be coalesced?
7440  // If the chunk is in a free range and either we decided to coalesce above
7441  // or the chunk is near the large block at the end of the heap
7442  // (isNearLargestChunk() returns true), then coalesce this chunk.
7443  const bool doCoalesce = inFreeRange()
7444                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7445  if (doCoalesce) {
7446    // Coalesce the current free range on the left with the new
7447    // chunk on the right.  If either is on a free list,
7448    // it must be removed from the list and stashed in the closure.
7449    if (freeRangeInFreeLists()) {
7450      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7451      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7452             "Size of free range is inconsistent with chunk size.");
7453      if (CMSTestInFreeList) {
7454        assert(_sp->verify_chunk_in_free_list(ffc),
7455               "Chunk is not in free lists");
7456      }
7457      _sp->coalDeath(ffc->size());
7458      _sp->removeFreeChunkFromFreeLists(ffc);
7459      set_freeRangeInFreeLists(false);
7460    }
7461    if (fcInFreeLists) {
7462      _sp->coalDeath(chunkSize);
7463      assert(fc->size() == chunkSize,
7464        "The chunk has the wrong size or is not in the free lists");
7465      _sp->removeFreeChunkFromFreeLists(fc);
7466    }
7467    set_lastFreeRangeCoalesced(true);
7468    print_free_block_coalesced(fc);
7469  } else {  // not in a free range and/or should not coalesce
7470    // Return the current free range and start a new one.
7471    if (inFreeRange()) {
7472      // In a free range but cannot coalesce with the right hand chunk.
7473      // Put the current free range into the free lists.
7474      flush_cur_free_chunk(freeFinger(),
7475                           pointer_delta(fc_addr, freeFinger()));
7476    }
7477    // Set up for new free range.  Pass along whether the right hand
7478    // chunk is in the free lists.
7479    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7480  }
7481}
7482
7483// Lookahead flush:
7484// If we are tracking a free range, and this is the last chunk that
7485// we'll look at because its end crosses past _limit, we'll preemptively
7486// flush it along with any free range we may be holding on to. Note that
7487// this can be the case only for an already free or freshly garbage
7488// chunk. If this block is an object, it can never straddle
7489// over _limit. The "straddling" occurs when _limit is set at
7490// the previous end of the space when this cycle started, and
7491// a subsequent heap expansion caused the previously co-terminal
7492// free block to be coalesced with the newly expanded portion,
7493// thus rendering _limit a non-block-boundary making it dangerous
7494// for the sweeper to step over and examine.
7495void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7496  assert(inFreeRange(), "Should only be called if currently in a free range.");
7497  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7498  assert(_sp->used_region().contains(eob - 1),
7499         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7500         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7501         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7502         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7503  if (eob >= _limit) {
7504    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7505    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7506                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7507                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
7508                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7509    // Return the storage we are tracking back into the free lists.
7510    log_develop_trace(gc, sweep)("Flushing ... ");
7511    assert(freeFinger() < eob, "Error");
7512    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7513  }
7514}
7515
7516void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7517  assert(inFreeRange(), "Should only be called if currently in a free range.");
7518  assert(size > 0,
7519    "A zero sized chunk cannot be added to the free lists.");
7520  if (!freeRangeInFreeLists()) {
7521    if (CMSTestInFreeList) {
7522      FreeChunk* fc = (FreeChunk*) chunk;
7523      fc->set_size(size);
7524      assert(!_sp->verify_chunk_in_free_list(fc),
7525             "chunk should not be in free lists yet");
7526    }
7527    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7528    // A new free range is going to be starting.  The current
7529    // free range has not been added to the free lists yet or
7530    // was removed so add it back.
7531    // If the current free range was coalesced, then the death
7532    // of the free range was recorded.  Record a birth now.
7533    if (lastFreeRangeCoalesced()) {
7534      _sp->coalBirth(size);
7535    }
7536    _sp->addChunkAndRepairOffsetTable(chunk, size,
7537            lastFreeRangeCoalesced());
7538  } else {
7539    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7540  }
7541  set_inFreeRange(false);
7542  set_freeRangeInFreeLists(false);
7543}
7544
7545// We take a break if we've been at this for a while,
7546// so as to avoid monopolizing the locks involved.
7547void SweepClosure::do_yield_work(HeapWord* addr) {
7548  // Return current free chunk being used for coalescing (if any)
7549  // to the appropriate freelist.  After yielding, the next
7550  // free block encountered will start a coalescing range of
7551  // free blocks.  If the next free block is adjacent to the
7552  // chunk just flushed, they will need to wait for the next
7553  // sweep to be coalesced.
7554  if (inFreeRange()) {
7555    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7556  }
7557
7558  // First give up the locks, then yield, then re-lock.
7559  // We should probably use a constructor/destructor idiom to
7560  // do this unlock/lock or modify the MutexUnlocker class to
7561  // serve our purpose. XXX
7562  assert_lock_strong(_bitMap->lock());
7563  assert_lock_strong(_freelistLock);
7564  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7565         "CMS thread should hold CMS token");
7566  _bitMap->lock()->unlock();
7567  _freelistLock->unlock();
7568  ConcurrentMarkSweepThread::desynchronize(true);
7569  _collector->stopTimer();
7570  _collector->incrementYields();
7571
7572  // See the comment in coordinator_yield()
7573  for (unsigned i = 0; i < CMSYieldSleepCount &&
7574                       ConcurrentMarkSweepThread::should_yield() &&
7575                       !CMSCollector::foregroundGCIsActive(); ++i) {
7576    os::sleep(Thread::current(), 1, false);
7577  }
7578
7579  ConcurrentMarkSweepThread::synchronize(true);
7580  _freelistLock->lock();
7581  _bitMap->lock()->lock_without_safepoint_check();
7582  _collector->startTimer();
7583}
7584
7585#ifndef PRODUCT
7586// This is actually very useful in a product build if it can
7587// be called from the debugger.  Compile it into the product
7588// as needed.
7589bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7590  return debug_cms_space->verify_chunk_in_free_list(fc);
7591}
7592#endif
7593
7594void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7595  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7596                               p2i(fc), fc->size());
7597}
7598
7599// CMSIsAliveClosure
7600bool CMSIsAliveClosure::do_object_b(oop obj) {
7601  HeapWord* addr = (HeapWord*)obj;
7602  return addr != NULL &&
7603         (!_span.contains(addr) || _bit_map->isMarked(addr));
7604}
7605
7606
7607CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7608                      MemRegion span,
7609                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7610                      bool cpc):
7611  _collector(collector),
7612  _span(span),
7613  _bit_map(bit_map),
7614  _mark_stack(mark_stack),
7615  _concurrent_precleaning(cpc) {
7616  assert(!_span.is_empty(), "Empty span could spell trouble");
7617}
7618
7619
7620// CMSKeepAliveClosure: the serial version
7621void CMSKeepAliveClosure::do_oop(oop obj) {
7622  HeapWord* addr = (HeapWord*)obj;
7623  if (_span.contains(addr) &&
7624      !_bit_map->isMarked(addr)) {
7625    _bit_map->mark(addr);
7626    bool simulate_overflow = false;
7627    NOT_PRODUCT(
7628      if (CMSMarkStackOverflowALot &&
7629          _collector->simulate_overflow()) {
7630        // simulate a stack overflow
7631        simulate_overflow = true;
7632      }
7633    )
7634    if (simulate_overflow || !_mark_stack->push(obj)) {
7635      if (_concurrent_precleaning) {
7636        // We dirty the overflown object and let the remark
7637        // phase deal with it.
7638        assert(_collector->overflow_list_is_empty(), "Error");
7639        // In the case of object arrays, we need to dirty all of
7640        // the cards that the object spans. No locking or atomics
7641        // are needed since no one else can be mutating the mod union
7642        // table.
7643        if (obj->is_objArray()) {
7644          size_t sz = obj->size();
7645          HeapWord* end_card_addr =
7646            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7647          MemRegion redirty_range = MemRegion(addr, end_card_addr);
7648          assert(!redirty_range.is_empty(), "Arithmetical tautology");
7649          _collector->_modUnionTable.mark_range(redirty_range);
7650        } else {
7651          _collector->_modUnionTable.mark(addr);
7652        }
7653        _collector->_ser_kac_preclean_ovflw++;
7654      } else {
7655        _collector->push_on_overflow_list(obj);
7656        _collector->_ser_kac_ovflw++;
7657      }
7658    }
7659  }
7660}
7661
7662void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7663void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7664
7665// CMSParKeepAliveClosure: a parallel version of the above.
7666// The work queues are private to each closure (thread),
7667// but (may be) available for stealing by other threads.
7668void CMSParKeepAliveClosure::do_oop(oop obj) {
7669  HeapWord* addr = (HeapWord*)obj;
7670  if (_span.contains(addr) &&
7671      !_bit_map->isMarked(addr)) {
7672    // In general, during recursive tracing, several threads
7673    // may be concurrently getting here; the first one to
7674    // "tag" it, claims it.
7675    if (_bit_map->par_mark(addr)) {
7676      bool res = _work_queue->push(obj);
7677      assert(res, "Low water mark should be much less than capacity");
7678      // Do a recursive trim in the hope that this will keep
7679      // stack usage lower, but leave some oops for potential stealers
7680      trim_queue(_low_water_mark);
7681    } // Else, another thread got there first
7682  }
7683}
7684
7685void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7686void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7687
7688void CMSParKeepAliveClosure::trim_queue(uint max) {
7689  while (_work_queue->size() > max) {
7690    oop new_oop;
7691    if (_work_queue->pop_local(new_oop)) {
7692      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7693      assert(_bit_map->isMarked((HeapWord*)new_oop),
7694             "no white objects on this stack!");
7695      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7696      // iterate over the oops in this oop, marking and pushing
7697      // the ones in CMS heap (i.e. in _span).
7698      new_oop->oop_iterate(&_mark_and_push);
7699    }
7700  }
7701}
7702
7703CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7704                                CMSCollector* collector,
7705                                MemRegion span, CMSBitMap* bit_map,
7706                                OopTaskQueue* work_queue):
7707  _collector(collector),
7708  _span(span),
7709  _bit_map(bit_map),
7710  _work_queue(work_queue) { }
7711
7712void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7713  HeapWord* addr = (HeapWord*)obj;
7714  if (_span.contains(addr) &&
7715      !_bit_map->isMarked(addr)) {
7716    if (_bit_map->par_mark(addr)) {
7717      bool simulate_overflow = false;
7718      NOT_PRODUCT(
7719        if (CMSMarkStackOverflowALot &&
7720            _collector->par_simulate_overflow()) {
7721          // simulate a stack overflow
7722          simulate_overflow = true;
7723        }
7724      )
7725      if (simulate_overflow || !_work_queue->push(obj)) {
7726        _collector->par_push_on_overflow_list(obj);
7727        _collector->_par_kac_ovflw++;
7728      }
7729    } // Else another thread got there already
7730  }
7731}
7732
7733void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7734void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7735
7736//////////////////////////////////////////////////////////////////
7737//  CMSExpansionCause                /////////////////////////////
7738//////////////////////////////////////////////////////////////////
7739const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7740  switch (cause) {
7741    case _no_expansion:
7742      return "No expansion";
7743    case _satisfy_free_ratio:
7744      return "Free ratio";
7745    case _satisfy_promotion:
7746      return "Satisfy promotion";
7747    case _satisfy_allocation:
7748      return "allocation";
7749    case _allocate_par_lab:
7750      return "Par LAB";
7751    case _allocate_par_spooling_space:
7752      return "Par Spooling Space";
7753    case _adaptive_size_policy:
7754      return "Ergonomics";
7755    default:
7756      return "unknown";
7757  }
7758}
7759
7760void CMSDrainMarkingStackClosure::do_void() {
7761  // the max number to take from overflow list at a time
7762  const size_t num = _mark_stack->capacity()/4;
7763  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7764         "Overflow list should be NULL during concurrent phases");
7765  while (!_mark_stack->isEmpty() ||
7766         // if stack is empty, check the overflow list
7767         _collector->take_from_overflow_list(num, _mark_stack)) {
7768    oop obj = _mark_stack->pop();
7769    HeapWord* addr = (HeapWord*)obj;
7770    assert(_span.contains(addr), "Should be within span");
7771    assert(_bit_map->isMarked(addr), "Should be marked");
7772    assert(obj->is_oop(), "Should be an oop");
7773    obj->oop_iterate(_keep_alive);
7774  }
7775}
7776
7777void CMSParDrainMarkingStackClosure::do_void() {
7778  // drain queue
7779  trim_queue(0);
7780}
7781
7782// Trim our work_queue so its length is below max at return
7783void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7784  while (_work_queue->size() > max) {
7785    oop new_oop;
7786    if (_work_queue->pop_local(new_oop)) {
7787      assert(new_oop->is_oop(), "Expected an oop");
7788      assert(_bit_map->isMarked((HeapWord*)new_oop),
7789             "no white objects on this stack!");
7790      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7791      // iterate over the oops in this oop, marking and pushing
7792      // the ones in CMS heap (i.e. in _span).
7793      new_oop->oop_iterate(&_mark_and_push);
7794    }
7795  }
7796}
7797
7798////////////////////////////////////////////////////////////////////
7799// Support for Marking Stack Overflow list handling and related code
7800////////////////////////////////////////////////////////////////////
7801// Much of the following code is similar in shape and spirit to the
7802// code used in ParNewGC. We should try and share that code
7803// as much as possible in the future.
7804
7805#ifndef PRODUCT
7806// Debugging support for CMSStackOverflowALot
7807
7808// It's OK to call this multi-threaded;  the worst thing
7809// that can happen is that we'll get a bunch of closely
7810// spaced simulated overflows, but that's OK, in fact
7811// probably good as it would exercise the overflow code
7812// under contention.
7813bool CMSCollector::simulate_overflow() {
7814  if (_overflow_counter-- <= 0) { // just being defensive
7815    _overflow_counter = CMSMarkStackOverflowInterval;
7816    return true;
7817  } else {
7818    return false;
7819  }
7820}
7821
7822bool CMSCollector::par_simulate_overflow() {
7823  return simulate_overflow();
7824}
7825#endif
7826
7827// Single-threaded
7828bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7829  assert(stack->isEmpty(), "Expected precondition");
7830  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7831  size_t i = num;
7832  oop  cur = _overflow_list;
7833  const markOop proto = markOopDesc::prototype();
7834  NOT_PRODUCT(ssize_t n = 0;)
7835  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7836    next = oop(cur->mark());
7837    cur->set_mark(proto);   // until proven otherwise
7838    assert(cur->is_oop(), "Should be an oop");
7839    bool res = stack->push(cur);
7840    assert(res, "Bit off more than can chew?");
7841    NOT_PRODUCT(n++;)
7842  }
7843  _overflow_list = cur;
7844#ifndef PRODUCT
7845  assert(_num_par_pushes >= n, "Too many pops?");
7846  _num_par_pushes -=n;
7847#endif
7848  return !stack->isEmpty();
7849}
7850
7851#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7852// (MT-safe) Get a prefix of at most "num" from the list.
7853// The overflow list is chained through the mark word of
7854// each object in the list. We fetch the entire list,
7855// break off a prefix of the right size and return the
7856// remainder. If other threads try to take objects from
7857// the overflow list at that time, they will wait for
7858// some time to see if data becomes available. If (and
7859// only if) another thread places one or more object(s)
7860// on the global list before we have returned the suffix
7861// to the global list, we will walk down our local list
7862// to find its end and append the global list to
7863// our suffix before returning it. This suffix walk can
7864// prove to be expensive (quadratic in the amount of traffic)
7865// when there are many objects in the overflow list and
7866// there is much producer-consumer contention on the list.
7867// *NOTE*: The overflow list manipulation code here and
7868// in ParNewGeneration:: are very similar in shape,
7869// except that in the ParNew case we use the old (from/eden)
7870// copy of the object to thread the list via its klass word.
7871// Because of the common code, if you make any changes in
7872// the code below, please check the ParNew version to see if
7873// similar changes might be needed.
7874// CR 6797058 has been filed to consolidate the common code.
7875bool CMSCollector::par_take_from_overflow_list(size_t num,
7876                                               OopTaskQueue* work_q,
7877                                               int no_of_gc_threads) {
7878  assert(work_q->size() == 0, "First empty local work queue");
7879  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7880  if (_overflow_list == NULL) {
7881    return false;
7882  }
7883  // Grab the entire list; we'll put back a suffix
7884  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7885  Thread* tid = Thread::current();
7886  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7887  // set to ParallelGCThreads.
7888  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7889  size_t sleep_time_millis = MAX2((size_t)1, num/100);
7890  // If the list is busy, we spin for a short while,
7891  // sleeping between attempts to get the list.
7892  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7893    os::sleep(tid, sleep_time_millis, false);
7894    if (_overflow_list == NULL) {
7895      // Nothing left to take
7896      return false;
7897    } else if (_overflow_list != BUSY) {
7898      // Try and grab the prefix
7899      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7900    }
7901  }
7902  // If the list was found to be empty, or we spun long
7903  // enough, we give up and return empty-handed. If we leave
7904  // the list in the BUSY state below, it must be the case that
7905  // some other thread holds the overflow list and will set it
7906  // to a non-BUSY state in the future.
7907  if (prefix == NULL || prefix == BUSY) {
7908     // Nothing to take or waited long enough
7909     if (prefix == NULL) {
7910       // Write back the NULL in case we overwrote it with BUSY above
7911       // and it is still the same value.
7912       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7913     }
7914     return false;
7915  }
7916  assert(prefix != NULL && prefix != BUSY, "Error");
7917  size_t i = num;
7918  oop cur = prefix;
7919  // Walk down the first "num" objects, unless we reach the end.
7920  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7921  if (cur->mark() == NULL) {
7922    // We have "num" or fewer elements in the list, so there
7923    // is nothing to return to the global list.
7924    // Write back the NULL in lieu of the BUSY we wrote
7925    // above, if it is still the same value.
7926    if (_overflow_list == BUSY) {
7927      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7928    }
7929  } else {
7930    // Chop off the suffix and return it to the global list.
7931    assert(cur->mark() != BUSY, "Error");
7932    oop suffix_head = cur->mark(); // suffix will be put back on global list
7933    cur->set_mark(NULL);           // break off suffix
7934    // It's possible that the list is still in the empty(busy) state
7935    // we left it in a short while ago; in that case we may be
7936    // able to place back the suffix without incurring the cost
7937    // of a walk down the list.
7938    oop observed_overflow_list = _overflow_list;
7939    oop cur_overflow_list = observed_overflow_list;
7940    bool attached = false;
7941    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7942      observed_overflow_list =
7943        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7944      if (cur_overflow_list == observed_overflow_list) {
7945        attached = true;
7946        break;
7947      } else cur_overflow_list = observed_overflow_list;
7948    }
7949    if (!attached) {
7950      // Too bad, someone else sneaked in (at least) an element; we'll need
7951      // to do a splice. Find tail of suffix so we can prepend suffix to global
7952      // list.
7953      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7954      oop suffix_tail = cur;
7955      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7956             "Tautology");
7957      observed_overflow_list = _overflow_list;
7958      do {
7959        cur_overflow_list = observed_overflow_list;
7960        if (cur_overflow_list != BUSY) {
7961          // Do the splice ...
7962          suffix_tail->set_mark(markOop(cur_overflow_list));
7963        } else { // cur_overflow_list == BUSY
7964          suffix_tail->set_mark(NULL);
7965        }
7966        // ... and try to place spliced list back on overflow_list ...
7967        observed_overflow_list =
7968          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7969      } while (cur_overflow_list != observed_overflow_list);
7970      // ... until we have succeeded in doing so.
7971    }
7972  }
7973
7974  // Push the prefix elements on work_q
7975  assert(prefix != NULL, "control point invariant");
7976  const markOop proto = markOopDesc::prototype();
7977  oop next;
7978  NOT_PRODUCT(ssize_t n = 0;)
7979  for (cur = prefix; cur != NULL; cur = next) {
7980    next = oop(cur->mark());
7981    cur->set_mark(proto);   // until proven otherwise
7982    assert(cur->is_oop(), "Should be an oop");
7983    bool res = work_q->push(cur);
7984    assert(res, "Bit off more than we can chew?");
7985    NOT_PRODUCT(n++;)
7986  }
7987#ifndef PRODUCT
7988  assert(_num_par_pushes >= n, "Too many pops?");
7989  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7990#endif
7991  return true;
7992}
7993
7994// Single-threaded
7995void CMSCollector::push_on_overflow_list(oop p) {
7996  NOT_PRODUCT(_num_par_pushes++;)
7997  assert(p->is_oop(), "Not an oop");
7998  preserve_mark_if_necessary(p);
7999  p->set_mark((markOop)_overflow_list);
8000  _overflow_list = p;
8001}
8002
8003// Multi-threaded; use CAS to prepend to overflow list
8004void CMSCollector::par_push_on_overflow_list(oop p) {
8005  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8006  assert(p->is_oop(), "Not an oop");
8007  par_preserve_mark_if_necessary(p);
8008  oop observed_overflow_list = _overflow_list;
8009  oop cur_overflow_list;
8010  do {
8011    cur_overflow_list = observed_overflow_list;
8012    if (cur_overflow_list != BUSY) {
8013      p->set_mark(markOop(cur_overflow_list));
8014    } else {
8015      p->set_mark(NULL);
8016    }
8017    observed_overflow_list =
8018      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8019  } while (cur_overflow_list != observed_overflow_list);
8020}
8021#undef BUSY
8022
8023// Single threaded
8024// General Note on GrowableArray: pushes may silently fail
8025// because we are (temporarily) out of C-heap for expanding
8026// the stack. The problem is quite ubiquitous and affects
8027// a lot of code in the JVM. The prudent thing for GrowableArray
8028// to do (for now) is to exit with an error. However, that may
8029// be too draconian in some cases because the caller may be
8030// able to recover without much harm. For such cases, we
8031// should probably introduce a "soft_push" method which returns
8032// an indication of success or failure with the assumption that
8033// the caller may be able to recover from a failure; code in
8034// the VM can then be changed, incrementally, to deal with such
8035// failures where possible, thus, incrementally hardening the VM
8036// in such low resource situations.
8037void CMSCollector::preserve_mark_work(oop p, markOop m) {
8038  _preserved_oop_stack.push(p);
8039  _preserved_mark_stack.push(m);
8040  assert(m == p->mark(), "Mark word changed");
8041  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8042         "bijection");
8043}
8044
8045// Single threaded
8046void CMSCollector::preserve_mark_if_necessary(oop p) {
8047  markOop m = p->mark();
8048  if (m->must_be_preserved(p)) {
8049    preserve_mark_work(p, m);
8050  }
8051}
8052
8053void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8054  markOop m = p->mark();
8055  if (m->must_be_preserved(p)) {
8056    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8057    // Even though we read the mark word without holding
8058    // the lock, we are assured that it will not change
8059    // because we "own" this oop, so no other thread can
8060    // be trying to push it on the overflow list; see
8061    // the assertion in preserve_mark_work() that checks
8062    // that m == p->mark().
8063    preserve_mark_work(p, m);
8064  }
8065}
8066
8067// We should be able to do this multi-threaded,
8068// a chunk of stack being a task (this is
8069// correct because each oop only ever appears
8070// once in the overflow list. However, it's
8071// not very easy to completely overlap this with
8072// other operations, so will generally not be done
8073// until all work's been completed. Because we
8074// expect the preserved oop stack (set) to be small,
8075// it's probably fine to do this single-threaded.
8076// We can explore cleverer concurrent/overlapped/parallel
8077// processing of preserved marks if we feel the
8078// need for this in the future. Stack overflow should
8079// be so rare in practice and, when it happens, its
8080// effect on performance so great that this will
8081// likely just be in the noise anyway.
8082void CMSCollector::restore_preserved_marks_if_any() {
8083  assert(SafepointSynchronize::is_at_safepoint(),
8084         "world should be stopped");
8085  assert(Thread::current()->is_ConcurrentGC_thread() ||
8086         Thread::current()->is_VM_thread(),
8087         "should be single-threaded");
8088  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8089         "bijection");
8090
8091  while (!_preserved_oop_stack.is_empty()) {
8092    oop p = _preserved_oop_stack.pop();
8093    assert(p->is_oop(), "Should be an oop");
8094    assert(_span.contains(p), "oop should be in _span");
8095    assert(p->mark() == markOopDesc::prototype(),
8096           "Set when taken from overflow list");
8097    markOop m = _preserved_mark_stack.pop();
8098    p->set_mark(m);
8099  }
8100  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8101         "stacks were cleared above");
8102}
8103
8104#ifndef PRODUCT
8105bool CMSCollector::no_preserved_marks() const {
8106  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8107}
8108#endif
8109
8110// Transfer some number of overflown objects to usual marking
8111// stack. Return true if some objects were transferred.
8112bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8113  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8114                    (size_t)ParGCDesiredObjsFromOverflowList);
8115
8116  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8117  assert(_collector->overflow_list_is_empty() || res,
8118         "If list is not empty, we should have taken something");
8119  assert(!res || !_mark_stack->isEmpty(),
8120         "If we took something, it should now be on our stack");
8121  return res;
8122}
8123
8124size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8125  size_t res = _sp->block_size_no_stall(addr, _collector);
8126  if (_sp->block_is_obj(addr)) {
8127    if (_live_bit_map->isMarked(addr)) {
8128      // It can't have been dead in a previous cycle
8129      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8130    } else {
8131      _dead_bit_map->mark(addr);      // mark the dead object
8132    }
8133  }
8134  // Could be 0, if the block size could not be computed without stalling.
8135  return res;
8136}
8137
8138TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8139
8140  switch (phase) {
8141    case CMSCollector::InitialMarking:
8142      initialize(true  /* fullGC */ ,
8143                 cause /* cause of the GC */,
8144                 true  /* recordGCBeginTime */,
8145                 true  /* recordPreGCUsage */,
8146                 false /* recordPeakUsage */,
8147                 false /* recordPostGCusage */,
8148                 true  /* recordAccumulatedGCTime */,
8149                 false /* recordGCEndTime */,
8150                 false /* countCollection */  );
8151      break;
8152
8153    case CMSCollector::FinalMarking:
8154      initialize(true  /* fullGC */ ,
8155                 cause /* cause of the GC */,
8156                 false /* recordGCBeginTime */,
8157                 false /* recordPreGCUsage */,
8158                 false /* recordPeakUsage */,
8159                 false /* recordPostGCusage */,
8160                 true  /* recordAccumulatedGCTime */,
8161                 false /* recordGCEndTime */,
8162                 false /* countCollection */  );
8163      break;
8164
8165    case CMSCollector::Sweeping:
8166      initialize(true  /* fullGC */ ,
8167                 cause /* cause of the GC */,
8168                 false /* recordGCBeginTime */,
8169                 false /* recordPreGCUsage */,
8170                 true  /* recordPeakUsage */,
8171                 true  /* recordPostGCusage */,
8172                 false /* recordAccumulatedGCTime */,
8173                 true  /* recordGCEndTime */,
8174                 true  /* countCollection */  );
8175      break;
8176
8177    default:
8178      ShouldNotReachHere();
8179  }
8180}
8181