concurrentMarkSweepGeneration.cpp revision 9947:9a2baaa34464
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/symbolTable.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "code/codeCache.hpp"
31#include "gc/cms/cmsCollectorPolicy.hpp"
32#include "gc/cms/cmsOopClosures.inline.hpp"
33#include "gc/cms/compactibleFreeListSpace.hpp"
34#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
35#include "gc/cms/concurrentMarkSweepThread.hpp"
36#include "gc/cms/parNewGeneration.hpp"
37#include "gc/cms/vmCMSOperations.hpp"
38#include "gc/serial/genMarkSweep.hpp"
39#include "gc/serial/tenuredGeneration.hpp"
40#include "gc/shared/adaptiveSizePolicy.hpp"
41#include "gc/shared/cardGeneration.inline.hpp"
42#include "gc/shared/cardTableRS.hpp"
43#include "gc/shared/collectedHeap.inline.hpp"
44#include "gc/shared/collectorCounters.hpp"
45#include "gc/shared/collectorPolicy.hpp"
46#include "gc/shared/gcLocker.inline.hpp"
47#include "gc/shared/gcPolicyCounters.hpp"
48#include "gc/shared/gcTimer.hpp"
49#include "gc/shared/gcTrace.hpp"
50#include "gc/shared/gcTraceTime.inline.hpp"
51#include "gc/shared/genCollectedHeap.hpp"
52#include "gc/shared/genOopClosures.inline.hpp"
53#include "gc/shared/isGCActiveMark.hpp"
54#include "gc/shared/referencePolicy.hpp"
55#include "gc/shared/strongRootsScope.hpp"
56#include "gc/shared/taskqueue.inline.hpp"
57#include "logging/log.hpp"
58#include "memory/allocation.hpp"
59#include "memory/iterator.inline.hpp"
60#include "memory/padded.hpp"
61#include "memory/resourceArea.hpp"
62#include "oops/oop.inline.hpp"
63#include "prims/jvmtiExport.hpp"
64#include "runtime/atomic.inline.hpp"
65#include "runtime/globals_extension.hpp"
66#include "runtime/handles.inline.hpp"
67#include "runtime/java.hpp"
68#include "runtime/orderAccess.inline.hpp"
69#include "runtime/timer.hpp"
70#include "runtime/vmThread.hpp"
71#include "services/memoryService.hpp"
72#include "services/runtimeService.hpp"
73#include "utilities/stack.inline.hpp"
74
75// statics
76CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
77bool CMSCollector::_full_gc_requested = false;
78GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
79
80//////////////////////////////////////////////////////////////////
81// In support of CMS/VM thread synchronization
82//////////////////////////////////////////////////////////////////
83// We split use of the CGC_lock into 2 "levels".
84// The low-level locking is of the usual CGC_lock monitor. We introduce
85// a higher level "token" (hereafter "CMS token") built on top of the
86// low level monitor (hereafter "CGC lock").
87// The token-passing protocol gives priority to the VM thread. The
88// CMS-lock doesn't provide any fairness guarantees, but clients
89// should ensure that it is only held for very short, bounded
90// durations.
91//
92// When either of the CMS thread or the VM thread is involved in
93// collection operations during which it does not want the other
94// thread to interfere, it obtains the CMS token.
95//
96// If either thread tries to get the token while the other has
97// it, that thread waits. However, if the VM thread and CMS thread
98// both want the token, then the VM thread gets priority while the
99// CMS thread waits. This ensures, for instance, that the "concurrent"
100// phases of the CMS thread's work do not block out the VM thread
101// for long periods of time as the CMS thread continues to hog
102// the token. (See bug 4616232).
103//
104// The baton-passing functions are, however, controlled by the
105// flags _foregroundGCShouldWait and _foregroundGCIsActive,
106// and here the low-level CMS lock, not the high level token,
107// ensures mutual exclusion.
108//
109// Two important conditions that we have to satisfy:
110// 1. if a thread does a low-level wait on the CMS lock, then it
111//    relinquishes the CMS token if it were holding that token
112//    when it acquired the low-level CMS lock.
113// 2. any low-level notifications on the low-level lock
114//    should only be sent when a thread has relinquished the token.
115//
116// In the absence of either property, we'd have potential deadlock.
117//
118// We protect each of the CMS (concurrent and sequential) phases
119// with the CMS _token_, not the CMS _lock_.
120//
121// The only code protected by CMS lock is the token acquisition code
122// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
123// baton-passing code.
124//
125// Unfortunately, i couldn't come up with a good abstraction to factor and
126// hide the naked CGC_lock manipulation in the baton-passing code
127// further below. That's something we should try to do. Also, the proof
128// of correctness of this 2-level locking scheme is far from obvious,
129// and potentially quite slippery. We have an uneasy suspicion, for instance,
130// that there may be a theoretical possibility of delay/starvation in the
131// low-level lock/wait/notify scheme used for the baton-passing because of
132// potential interference with the priority scheme embodied in the
133// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
134// invocation further below and marked with "XXX 20011219YSR".
135// Indeed, as we note elsewhere, this may become yet more slippery
136// in the presence of multiple CMS and/or multiple VM threads. XXX
137
138class CMSTokenSync: public StackObj {
139 private:
140  bool _is_cms_thread;
141 public:
142  CMSTokenSync(bool is_cms_thread):
143    _is_cms_thread(is_cms_thread) {
144    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
145           "Incorrect argument to constructor");
146    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
147  }
148
149  ~CMSTokenSync() {
150    assert(_is_cms_thread ?
151             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
152             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
153          "Incorrect state");
154    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
155  }
156};
157
158// Convenience class that does a CMSTokenSync, and then acquires
159// upto three locks.
160class CMSTokenSyncWithLocks: public CMSTokenSync {
161 private:
162  // Note: locks are acquired in textual declaration order
163  // and released in the opposite order
164  MutexLockerEx _locker1, _locker2, _locker3;
165 public:
166  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
167                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
168    CMSTokenSync(is_cms_thread),
169    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
170    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
171    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
172  { }
173};
174
175
176//////////////////////////////////////////////////////////////////
177//  Concurrent Mark-Sweep Generation /////////////////////////////
178//////////////////////////////////////////////////////////////////
179
180NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
181
182// This struct contains per-thread things necessary to support parallel
183// young-gen collection.
184class CMSParGCThreadState: public CHeapObj<mtGC> {
185 public:
186  CompactibleFreeListSpaceLAB lab;
187  PromotionInfo promo;
188
189  // Constructor.
190  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
191    promo.setSpace(cfls);
192  }
193};
194
195ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
196     ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
197  CardGeneration(rs, initial_byte_size, ct),
198  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
199  _did_compact(false)
200{
201  HeapWord* bottom = (HeapWord*) _virtual_space.low();
202  HeapWord* end    = (HeapWord*) _virtual_space.high();
203
204  _direct_allocated_words = 0;
205  NOT_PRODUCT(
206    _numObjectsPromoted = 0;
207    _numWordsPromoted = 0;
208    _numObjectsAllocated = 0;
209    _numWordsAllocated = 0;
210  )
211
212  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
213  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
214  _cmsSpace->_old_gen = this;
215
216  _gc_stats = new CMSGCStats();
217
218  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
219  // offsets match. The ability to tell free chunks from objects
220  // depends on this property.
221  debug_only(
222    FreeChunk* junk = NULL;
223    assert(UseCompressedClassPointers ||
224           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
225           "Offset of FreeChunk::_prev within FreeChunk must match"
226           "  that of OopDesc::_klass within OopDesc");
227  )
228
229  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
230  for (uint i = 0; i < ParallelGCThreads; i++) {
231    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
232  }
233
234  _incremental_collection_failed = false;
235  // The "dilatation_factor" is the expansion that can occur on
236  // account of the fact that the minimum object size in the CMS
237  // generation may be larger than that in, say, a contiguous young
238  //  generation.
239  // Ideally, in the calculation below, we'd compute the dilatation
240  // factor as: MinChunkSize/(promoting_gen's min object size)
241  // Since we do not have such a general query interface for the
242  // promoting generation, we'll instead just use the minimum
243  // object size (which today is a header's worth of space);
244  // note that all arithmetic is in units of HeapWords.
245  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
246  assert(_dilatation_factor >= 1.0, "from previous assert");
247}
248
249
250// The field "_initiating_occupancy" represents the occupancy percentage
251// at which we trigger a new collection cycle.  Unless explicitly specified
252// via CMSInitiatingOccupancyFraction (argument "io" below), it
253// is calculated by:
254//
255//   Let "f" be MinHeapFreeRatio in
256//
257//    _initiating_occupancy = 100-f +
258//                           f * (CMSTriggerRatio/100)
259//   where CMSTriggerRatio is the argument "tr" below.
260//
261// That is, if we assume the heap is at its desired maximum occupancy at the
262// end of a collection, we let CMSTriggerRatio of the (purported) free
263// space be allocated before initiating a new collection cycle.
264//
265void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
266  assert(io <= 100 && tr <= 100, "Check the arguments");
267  if (io >= 0) {
268    _initiating_occupancy = (double)io / 100.0;
269  } else {
270    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
271                             (double)(tr * MinHeapFreeRatio) / 100.0)
272                            / 100.0;
273  }
274}
275
276void ConcurrentMarkSweepGeneration::ref_processor_init() {
277  assert(collector() != NULL, "no collector");
278  collector()->ref_processor_init();
279}
280
281void CMSCollector::ref_processor_init() {
282  if (_ref_processor == NULL) {
283    // Allocate and initialize a reference processor
284    _ref_processor =
285      new ReferenceProcessor(_span,                               // span
286                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
287                             ParallelGCThreads,                   // mt processing degree
288                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
289                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
290                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
291                             &_is_alive_closure);                 // closure for liveness info
292    // Initialize the _ref_processor field of CMSGen
293    _cmsGen->set_ref_processor(_ref_processor);
294
295  }
296}
297
298AdaptiveSizePolicy* CMSCollector::size_policy() {
299  GenCollectedHeap* gch = GenCollectedHeap::heap();
300  return gch->gen_policy()->size_policy();
301}
302
303void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
304
305  const char* gen_name = "old";
306  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
307  // Generation Counters - generation 1, 1 subspace
308  _gen_counters = new GenerationCounters(gen_name, 1, 1,
309      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
310
311  _space_counters = new GSpaceCounters(gen_name, 0,
312                                       _virtual_space.reserved_size(),
313                                       this, _gen_counters);
314}
315
316CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
317  _cms_gen(cms_gen)
318{
319  assert(alpha <= 100, "bad value");
320  _saved_alpha = alpha;
321
322  // Initialize the alphas to the bootstrap value of 100.
323  _gc0_alpha = _cms_alpha = 100;
324
325  _cms_begin_time.update();
326  _cms_end_time.update();
327
328  _gc0_duration = 0.0;
329  _gc0_period = 0.0;
330  _gc0_promoted = 0;
331
332  _cms_duration = 0.0;
333  _cms_period = 0.0;
334  _cms_allocated = 0;
335
336  _cms_used_at_gc0_begin = 0;
337  _cms_used_at_gc0_end = 0;
338  _allow_duty_cycle_reduction = false;
339  _valid_bits = 0;
340}
341
342double CMSStats::cms_free_adjustment_factor(size_t free) const {
343  // TBD: CR 6909490
344  return 1.0;
345}
346
347void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
348}
349
350// If promotion failure handling is on use
351// the padded average size of the promotion for each
352// young generation collection.
353double CMSStats::time_until_cms_gen_full() const {
354  size_t cms_free = _cms_gen->cmsSpace()->free();
355  GenCollectedHeap* gch = GenCollectedHeap::heap();
356  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
357                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
358  if (cms_free > expected_promotion) {
359    // Start a cms collection if there isn't enough space to promote
360    // for the next young collection.  Use the padded average as
361    // a safety factor.
362    cms_free -= expected_promotion;
363
364    // Adjust by the safety factor.
365    double cms_free_dbl = (double)cms_free;
366    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
367    // Apply a further correction factor which tries to adjust
368    // for recent occurance of concurrent mode failures.
369    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
370    cms_free_dbl = cms_free_dbl * cms_adjustment;
371
372    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
373                  cms_free, expected_promotion);
374    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
375    // Add 1 in case the consumption rate goes to zero.
376    return cms_free_dbl / (cms_consumption_rate() + 1.0);
377  }
378  return 0.0;
379}
380
381// Compare the duration of the cms collection to the
382// time remaining before the cms generation is empty.
383// Note that the time from the start of the cms collection
384// to the start of the cms sweep (less than the total
385// duration of the cms collection) can be used.  This
386// has been tried and some applications experienced
387// promotion failures early in execution.  This was
388// possibly because the averages were not accurate
389// enough at the beginning.
390double CMSStats::time_until_cms_start() const {
391  // We add "gc0_period" to the "work" calculation
392  // below because this query is done (mostly) at the
393  // end of a scavenge, so we need to conservatively
394  // account for that much possible delay
395  // in the query so as to avoid concurrent mode failures
396  // due to starting the collection just a wee bit too
397  // late.
398  double work = cms_duration() + gc0_period();
399  double deadline = time_until_cms_gen_full();
400  // If a concurrent mode failure occurred recently, we want to be
401  // more conservative and halve our expected time_until_cms_gen_full()
402  if (work > deadline) {
403    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
404                          cms_duration(), gc0_period(), time_until_cms_gen_full());
405    return 0.0;
406  }
407  return work - deadline;
408}
409
410#ifndef PRODUCT
411void CMSStats::print_on(outputStream *st) const {
412  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
413  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
414               gc0_duration(), gc0_period(), gc0_promoted());
415  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
416            cms_duration(), cms_period(), cms_allocated());
417  st->print(",cms_since_beg=%g,cms_since_end=%g",
418            cms_time_since_begin(), cms_time_since_end());
419  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
420            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
421
422  if (valid()) {
423    st->print(",promo_rate=%g,cms_alloc_rate=%g",
424              promotion_rate(), cms_allocation_rate());
425    st->print(",cms_consumption_rate=%g,time_until_full=%g",
426              cms_consumption_rate(), time_until_cms_gen_full());
427  }
428  st->print(" ");
429}
430#endif // #ifndef PRODUCT
431
432CMSCollector::CollectorState CMSCollector::_collectorState =
433                             CMSCollector::Idling;
434bool CMSCollector::_foregroundGCIsActive = false;
435bool CMSCollector::_foregroundGCShouldWait = false;
436
437CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
438                           CardTableRS*                   ct,
439                           ConcurrentMarkSweepPolicy*     cp):
440  _cmsGen(cmsGen),
441  _ct(ct),
442  _ref_processor(NULL),    // will be set later
443  _conc_workers(NULL),     // may be set later
444  _abort_preclean(false),
445  _start_sampling(false),
446  _between_prologue_and_epilogue(false),
447  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
448  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
449                 -1 /* lock-free */, "No_lock" /* dummy */),
450  _modUnionClosurePar(&_modUnionTable),
451  // Adjust my span to cover old (cms) gen
452  _span(cmsGen->reserved()),
453  // Construct the is_alive_closure with _span & markBitMap
454  _is_alive_closure(_span, &_markBitMap),
455  _restart_addr(NULL),
456  _overflow_list(NULL),
457  _stats(cmsGen),
458  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
459                             //verify that this lock should be acquired with safepoint check.
460                             Monitor::_safepoint_check_sometimes)),
461  _eden_chunk_array(NULL),     // may be set in ctor body
462  _eden_chunk_capacity(0),     // -- ditto --
463  _eden_chunk_index(0),        // -- ditto --
464  _survivor_plab_array(NULL),  // -- ditto --
465  _survivor_chunk_array(NULL), // -- ditto --
466  _survivor_chunk_capacity(0), // -- ditto --
467  _survivor_chunk_index(0),    // -- ditto --
468  _ser_pmc_preclean_ovflw(0),
469  _ser_kac_preclean_ovflw(0),
470  _ser_pmc_remark_ovflw(0),
471  _par_pmc_remark_ovflw(0),
472  _ser_kac_ovflw(0),
473  _par_kac_ovflw(0),
474#ifndef PRODUCT
475  _num_par_pushes(0),
476#endif
477  _collection_count_start(0),
478  _verifying(false),
479  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
480  _completed_initialization(false),
481  _collector_policy(cp),
482  _should_unload_classes(CMSClassUnloadingEnabled),
483  _concurrent_cycles_since_last_unload(0),
484  _roots_scanning_options(GenCollectedHeap::SO_None),
485  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
486  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
487  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
488  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
489  _cms_start_registered(false)
490{
491  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
492    ExplicitGCInvokesConcurrent = true;
493  }
494  // Now expand the span and allocate the collection support structures
495  // (MUT, marking bit map etc.) to cover both generations subject to
496  // collection.
497
498  // For use by dirty card to oop closures.
499  _cmsGen->cmsSpace()->set_collector(this);
500
501  // Allocate MUT and marking bit map
502  {
503    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
504    if (!_markBitMap.allocate(_span)) {
505      warning("Failed to allocate CMS Bit Map");
506      return;
507    }
508    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
509  }
510  {
511    _modUnionTable.allocate(_span);
512    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
513  }
514
515  if (!_markStack.allocate(MarkStackSize)) {
516    warning("Failed to allocate CMS Marking Stack");
517    return;
518  }
519
520  // Support for multi-threaded concurrent phases
521  if (CMSConcurrentMTEnabled) {
522    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
523      // just for now
524      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
525    }
526    if (ConcGCThreads > 1) {
527      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
528                                 ConcGCThreads, true);
529      if (_conc_workers == NULL) {
530        warning("GC/CMS: _conc_workers allocation failure: "
531              "forcing -CMSConcurrentMTEnabled");
532        CMSConcurrentMTEnabled = false;
533      } else {
534        _conc_workers->initialize_workers();
535      }
536    } else {
537      CMSConcurrentMTEnabled = false;
538    }
539  }
540  if (!CMSConcurrentMTEnabled) {
541    ConcGCThreads = 0;
542  } else {
543    // Turn off CMSCleanOnEnter optimization temporarily for
544    // the MT case where it's not fixed yet; see 6178663.
545    CMSCleanOnEnter = false;
546  }
547  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
548         "Inconsistency");
549
550  // Parallel task queues; these are shared for the
551  // concurrent and stop-world phases of CMS, but
552  // are not shared with parallel scavenge (ParNew).
553  {
554    uint i;
555    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
556
557    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
558         || ParallelRefProcEnabled)
559        && num_queues > 0) {
560      _task_queues = new OopTaskQueueSet(num_queues);
561      if (_task_queues == NULL) {
562        warning("task_queues allocation failure.");
563        return;
564      }
565      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
566      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
567      for (i = 0; i < num_queues; i++) {
568        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
569        if (q == NULL) {
570          warning("work_queue allocation failure.");
571          return;
572        }
573        _task_queues->register_queue(i, q);
574      }
575      for (i = 0; i < num_queues; i++) {
576        _task_queues->queue(i)->initialize();
577        _hash_seed[i] = 17;  // copied from ParNew
578      }
579    }
580  }
581
582  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
583
584  // Clip CMSBootstrapOccupancy between 0 and 100.
585  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
586
587  // Now tell CMS generations the identity of their collector
588  ConcurrentMarkSweepGeneration::set_collector(this);
589
590  // Create & start a CMS thread for this CMS collector
591  _cmsThread = ConcurrentMarkSweepThread::start(this);
592  assert(cmsThread() != NULL, "CMS Thread should have been created");
593  assert(cmsThread()->collector() == this,
594         "CMS Thread should refer to this gen");
595  assert(CGC_lock != NULL, "Where's the CGC_lock?");
596
597  // Support for parallelizing young gen rescan
598  GenCollectedHeap* gch = GenCollectedHeap::heap();
599  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
600  _young_gen = (ParNewGeneration*)gch->young_gen();
601  if (gch->supports_inline_contig_alloc()) {
602    _top_addr = gch->top_addr();
603    _end_addr = gch->end_addr();
604    assert(_young_gen != NULL, "no _young_gen");
605    _eden_chunk_index = 0;
606    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
607    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
608  }
609
610  // Support for parallelizing survivor space rescan
611  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
612    const size_t max_plab_samples =
613      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
614
615    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
616    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
617    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
618    _survivor_chunk_capacity = max_plab_samples;
619    for (uint i = 0; i < ParallelGCThreads; i++) {
620      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
621      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
622      assert(cur->end() == 0, "Should be 0");
623      assert(cur->array() == vec, "Should be vec");
624      assert(cur->capacity() == max_plab_samples, "Error");
625    }
626  }
627
628  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
629  _gc_counters = new CollectorCounters("CMS", 1);
630  _completed_initialization = true;
631  _inter_sweep_timer.start();  // start of time
632}
633
634const char* ConcurrentMarkSweepGeneration::name() const {
635  return "concurrent mark-sweep generation";
636}
637void ConcurrentMarkSweepGeneration::update_counters() {
638  if (UsePerfData) {
639    _space_counters->update_all();
640    _gen_counters->update_all();
641  }
642}
643
644// this is an optimized version of update_counters(). it takes the
645// used value as a parameter rather than computing it.
646//
647void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
648  if (UsePerfData) {
649    _space_counters->update_used(used);
650    _space_counters->update_capacity();
651    _gen_counters->update_all();
652  }
653}
654
655void ConcurrentMarkSweepGeneration::print() const {
656  Generation::print();
657  cmsSpace()->print();
658}
659
660#ifndef PRODUCT
661void ConcurrentMarkSweepGeneration::print_statistics() {
662  cmsSpace()->printFLCensus(0);
663}
664#endif
665
666size_t
667ConcurrentMarkSweepGeneration::contiguous_available() const {
668  // dld proposes an improvement in precision here. If the committed
669  // part of the space ends in a free block we should add that to
670  // uncommitted size in the calculation below. Will make this
671  // change later, staying with the approximation below for the
672  // time being. -- ysr.
673  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
674}
675
676size_t
677ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
678  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
679}
680
681size_t ConcurrentMarkSweepGeneration::max_available() const {
682  return free() + _virtual_space.uncommitted_size();
683}
684
685bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
686  size_t available = max_available();
687  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
688  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
689  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
690                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
691  return res;
692}
693
694// At a promotion failure dump information on block layout in heap
695// (cms old generation).
696void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
697  LogHandle(gc, promotion) log;
698  if (log.is_trace()) {
699    ResourceMark rm;
700    cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
701  }
702}
703
704void ConcurrentMarkSweepGeneration::reset_after_compaction() {
705  // Clear the promotion information.  These pointers can be adjusted
706  // along with all the other pointers into the heap but
707  // compaction is expected to be a rare event with
708  // a heap using cms so don't do it without seeing the need.
709  for (uint i = 0; i < ParallelGCThreads; i++) {
710    _par_gc_thread_states[i]->promo.reset();
711  }
712}
713
714void ConcurrentMarkSweepGeneration::compute_new_size() {
715  assert_locked_or_safepoint(Heap_lock);
716
717  // If incremental collection failed, we just want to expand
718  // to the limit.
719  if (incremental_collection_failed()) {
720    clear_incremental_collection_failed();
721    grow_to_reserved();
722    return;
723  }
724
725  // The heap has been compacted but not reset yet.
726  // Any metric such as free() or used() will be incorrect.
727
728  CardGeneration::compute_new_size();
729
730  // Reset again after a possible resizing
731  if (did_compact()) {
732    cmsSpace()->reset_after_compaction();
733  }
734}
735
736void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
737  assert_locked_or_safepoint(Heap_lock);
738
739  // If incremental collection failed, we just want to expand
740  // to the limit.
741  if (incremental_collection_failed()) {
742    clear_incremental_collection_failed();
743    grow_to_reserved();
744    return;
745  }
746
747  double free_percentage = ((double) free()) / capacity();
748  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
749  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
750
751  // compute expansion delta needed for reaching desired free percentage
752  if (free_percentage < desired_free_percentage) {
753    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
754    assert(desired_capacity >= capacity(), "invalid expansion size");
755    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
756    LogHandle(gc) log;
757    if (log.is_trace()) {
758      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
759      log.trace("From compute_new_size: ");
760      log.trace("  Free fraction %f", free_percentage);
761      log.trace("  Desired free fraction %f", desired_free_percentage);
762      log.trace("  Maximum free fraction %f", maximum_free_percentage);
763      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
764      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
765      GenCollectedHeap* gch = GenCollectedHeap::heap();
766      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
767      size_t young_size = gch->young_gen()->capacity();
768      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
769      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
770      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
771      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
772    }
773    // safe if expansion fails
774    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
775    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
776  } else {
777    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
778    assert(desired_capacity <= capacity(), "invalid expansion size");
779    size_t shrink_bytes = capacity() - desired_capacity;
780    // Don't shrink unless the delta is greater than the minimum shrink we want
781    if (shrink_bytes >= MinHeapDeltaBytes) {
782      shrink_free_list_by(shrink_bytes);
783    }
784  }
785}
786
787Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
788  return cmsSpace()->freelistLock();
789}
790
791HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
792  CMSSynchronousYieldRequest yr;
793  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
794  return have_lock_and_allocate(size, tlab);
795}
796
797HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
798                                                                bool   tlab /* ignored */) {
799  assert_lock_strong(freelistLock());
800  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
801  HeapWord* res = cmsSpace()->allocate(adjustedSize);
802  // Allocate the object live (grey) if the background collector has
803  // started marking. This is necessary because the marker may
804  // have passed this address and consequently this object will
805  // not otherwise be greyed and would be incorrectly swept up.
806  // Note that if this object contains references, the writing
807  // of those references will dirty the card containing this object
808  // allowing the object to be blackened (and its references scanned)
809  // either during a preclean phase or at the final checkpoint.
810  if (res != NULL) {
811    // We may block here with an uninitialized object with
812    // its mark-bit or P-bits not yet set. Such objects need
813    // to be safely navigable by block_start().
814    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
815    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
816    collector()->direct_allocated(res, adjustedSize);
817    _direct_allocated_words += adjustedSize;
818    // allocation counters
819    NOT_PRODUCT(
820      _numObjectsAllocated++;
821      _numWordsAllocated += (int)adjustedSize;
822    )
823  }
824  return res;
825}
826
827// In the case of direct allocation by mutators in a generation that
828// is being concurrently collected, the object must be allocated
829// live (grey) if the background collector has started marking.
830// This is necessary because the marker may
831// have passed this address and consequently this object will
832// not otherwise be greyed and would be incorrectly swept up.
833// Note that if this object contains references, the writing
834// of those references will dirty the card containing this object
835// allowing the object to be blackened (and its references scanned)
836// either during a preclean phase or at the final checkpoint.
837void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
838  assert(_markBitMap.covers(start, size), "Out of bounds");
839  if (_collectorState >= Marking) {
840    MutexLockerEx y(_markBitMap.lock(),
841                    Mutex::_no_safepoint_check_flag);
842    // [see comments preceding SweepClosure::do_blk() below for details]
843    //
844    // Can the P-bits be deleted now?  JJJ
845    //
846    // 1. need to mark the object as live so it isn't collected
847    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
848    // 3. need to mark the end of the object so marking, precleaning or sweeping
849    //    can skip over uninitialized or unparsable objects. An allocated
850    //    object is considered uninitialized for our purposes as long as
851    //    its klass word is NULL.  All old gen objects are parsable
852    //    as soon as they are initialized.)
853    _markBitMap.mark(start);          // object is live
854    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
855    _markBitMap.mark(start + size - 1);
856                                      // mark end of object
857  }
858  // check that oop looks uninitialized
859  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
860}
861
862void CMSCollector::promoted(bool par, HeapWord* start,
863                            bool is_obj_array, size_t obj_size) {
864  assert(_markBitMap.covers(start), "Out of bounds");
865  // See comment in direct_allocated() about when objects should
866  // be allocated live.
867  if (_collectorState >= Marking) {
868    // we already hold the marking bit map lock, taken in
869    // the prologue
870    if (par) {
871      _markBitMap.par_mark(start);
872    } else {
873      _markBitMap.mark(start);
874    }
875    // We don't need to mark the object as uninitialized (as
876    // in direct_allocated above) because this is being done with the
877    // world stopped and the object will be initialized by the
878    // time the marking, precleaning or sweeping get to look at it.
879    // But see the code for copying objects into the CMS generation,
880    // where we need to ensure that concurrent readers of the
881    // block offset table are able to safely navigate a block that
882    // is in flux from being free to being allocated (and in
883    // transition while being copied into) and subsequently
884    // becoming a bona-fide object when the copy/promotion is complete.
885    assert(SafepointSynchronize::is_at_safepoint(),
886           "expect promotion only at safepoints");
887
888    if (_collectorState < Sweeping) {
889      // Mark the appropriate cards in the modUnionTable, so that
890      // this object gets scanned before the sweep. If this is
891      // not done, CMS generation references in the object might
892      // not get marked.
893      // For the case of arrays, which are otherwise precisely
894      // marked, we need to dirty the entire array, not just its head.
895      if (is_obj_array) {
896        // The [par_]mark_range() method expects mr.end() below to
897        // be aligned to the granularity of a bit's representation
898        // in the heap. In the case of the MUT below, that's a
899        // card size.
900        MemRegion mr(start,
901                     (HeapWord*)round_to((intptr_t)(start + obj_size),
902                        CardTableModRefBS::card_size /* bytes */));
903        if (par) {
904          _modUnionTable.par_mark_range(mr);
905        } else {
906          _modUnionTable.mark_range(mr);
907        }
908      } else {  // not an obj array; we can just mark the head
909        if (par) {
910          _modUnionTable.par_mark(start);
911        } else {
912          _modUnionTable.mark(start);
913        }
914      }
915    }
916  }
917}
918
919oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
920  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
921  // allocate, copy and if necessary update promoinfo --
922  // delegate to underlying space.
923  assert_lock_strong(freelistLock());
924
925#ifndef PRODUCT
926  if (GenCollectedHeap::heap()->promotion_should_fail()) {
927    return NULL;
928  }
929#endif  // #ifndef PRODUCT
930
931  oop res = _cmsSpace->promote(obj, obj_size);
932  if (res == NULL) {
933    // expand and retry
934    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
935    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
936    // Since this is the old generation, we don't try to promote
937    // into a more senior generation.
938    res = _cmsSpace->promote(obj, obj_size);
939  }
940  if (res != NULL) {
941    // See comment in allocate() about when objects should
942    // be allocated live.
943    assert(obj->is_oop(), "Will dereference klass pointer below");
944    collector()->promoted(false,           // Not parallel
945                          (HeapWord*)res, obj->is_objArray(), obj_size);
946    // promotion counters
947    NOT_PRODUCT(
948      _numObjectsPromoted++;
949      _numWordsPromoted +=
950        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
951    )
952  }
953  return res;
954}
955
956
957// IMPORTANT: Notes on object size recognition in CMS.
958// ---------------------------------------------------
959// A block of storage in the CMS generation is always in
960// one of three states. A free block (FREE), an allocated
961// object (OBJECT) whose size() method reports the correct size,
962// and an intermediate state (TRANSIENT) in which its size cannot
963// be accurately determined.
964// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
965// -----------------------------------------------------
966// FREE:      klass_word & 1 == 1; mark_word holds block size
967//
968// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
969//            obj->size() computes correct size
970//
971// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
972//
973// STATE IDENTIFICATION: (64 bit+COOPS)
974// ------------------------------------
975// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
976//
977// OBJECT:    klass_word installed; klass_word != 0;
978//            obj->size() computes correct size
979//
980// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
981//
982//
983// STATE TRANSITION DIAGRAM
984//
985//        mut / parnew                     mut  /  parnew
986// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
987//  ^                                                                   |
988//  |------------------------ DEAD <------------------------------------|
989//         sweep                            mut
990//
991// While a block is in TRANSIENT state its size cannot be determined
992// so readers will either need to come back later or stall until
993// the size can be determined. Note that for the case of direct
994// allocation, P-bits, when available, may be used to determine the
995// size of an object that may not yet have been initialized.
996
997// Things to support parallel young-gen collection.
998oop
999ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1000                                           oop old, markOop m,
1001                                           size_t word_sz) {
1002#ifndef PRODUCT
1003  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1004    return NULL;
1005  }
1006#endif  // #ifndef PRODUCT
1007
1008  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1009  PromotionInfo* promoInfo = &ps->promo;
1010  // if we are tracking promotions, then first ensure space for
1011  // promotion (including spooling space for saving header if necessary).
1012  // then allocate and copy, then track promoted info if needed.
1013  // When tracking (see PromotionInfo::track()), the mark word may
1014  // be displaced and in this case restoration of the mark word
1015  // occurs in the (oop_since_save_marks_)iterate phase.
1016  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1017    // Out of space for allocating spooling buffers;
1018    // try expanding and allocating spooling buffers.
1019    if (!expand_and_ensure_spooling_space(promoInfo)) {
1020      return NULL;
1021    }
1022  }
1023  assert(promoInfo->has_spooling_space(), "Control point invariant");
1024  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1025  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1026  if (obj_ptr == NULL) {
1027     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1028     if (obj_ptr == NULL) {
1029       return NULL;
1030     }
1031  }
1032  oop obj = oop(obj_ptr);
1033  OrderAccess::storestore();
1034  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1035  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1036  // IMPORTANT: See note on object initialization for CMS above.
1037  // Otherwise, copy the object.  Here we must be careful to insert the
1038  // klass pointer last, since this marks the block as an allocated object.
1039  // Except with compressed oops it's the mark word.
1040  HeapWord* old_ptr = (HeapWord*)old;
1041  // Restore the mark word copied above.
1042  obj->set_mark(m);
1043  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1044  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1045  OrderAccess::storestore();
1046
1047  if (UseCompressedClassPointers) {
1048    // Copy gap missed by (aligned) header size calculation below
1049    obj->set_klass_gap(old->klass_gap());
1050  }
1051  if (word_sz > (size_t)oopDesc::header_size()) {
1052    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1053                                 obj_ptr + oopDesc::header_size(),
1054                                 word_sz - oopDesc::header_size());
1055  }
1056
1057  // Now we can track the promoted object, if necessary.  We take care
1058  // to delay the transition from uninitialized to full object
1059  // (i.e., insertion of klass pointer) until after, so that it
1060  // atomically becomes a promoted object.
1061  if (promoInfo->tracking()) {
1062    promoInfo->track((PromotedObject*)obj, old->klass());
1063  }
1064  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1065  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1066  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1067
1068  // Finally, install the klass pointer (this should be volatile).
1069  OrderAccess::storestore();
1070  obj->set_klass(old->klass());
1071  // We should now be able to calculate the right size for this object
1072  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1073
1074  collector()->promoted(true,          // parallel
1075                        obj_ptr, old->is_objArray(), word_sz);
1076
1077  NOT_PRODUCT(
1078    Atomic::inc_ptr(&_numObjectsPromoted);
1079    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1080  )
1081
1082  return obj;
1083}
1084
1085void
1086ConcurrentMarkSweepGeneration::
1087par_promote_alloc_done(int thread_num) {
1088  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1089  ps->lab.retire(thread_num);
1090}
1091
1092void
1093ConcurrentMarkSweepGeneration::
1094par_oop_since_save_marks_iterate_done(int thread_num) {
1095  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1096  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1097  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1098}
1099
1100bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1101                                                   size_t size,
1102                                                   bool   tlab)
1103{
1104  // We allow a STW collection only if a full
1105  // collection was requested.
1106  return full || should_allocate(size, tlab); // FIX ME !!!
1107  // This and promotion failure handling are connected at the
1108  // hip and should be fixed by untying them.
1109}
1110
1111bool CMSCollector::shouldConcurrentCollect() {
1112  if (_full_gc_requested) {
1113    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1114    return true;
1115  }
1116
1117  FreelistLocker x(this);
1118  // ------------------------------------------------------------------
1119  // Print out lots of information which affects the initiation of
1120  // a collection.
1121  LogHandle(gc) log;
1122  if (log.is_trace() && stats().valid()) {
1123    log.trace("CMSCollector shouldConcurrentCollect: ");
1124    ResourceMark rm;
1125    stats().print_on(log.debug_stream());
1126    log.trace("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1127    log.trace("free=" SIZE_FORMAT, _cmsGen->free());
1128    log.trace("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1129    log.trace("promotion_rate=%g", stats().promotion_rate());
1130    log.trace("cms_allocation_rate=%g", stats().cms_allocation_rate());
1131    log.trace("occupancy=%3.7f", _cmsGen->occupancy());
1132    log.trace("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1133    log.trace("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1134    log.trace("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1135    log.trace("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1136  }
1137  // ------------------------------------------------------------------
1138
1139  // If the estimated time to complete a cms collection (cms_duration())
1140  // is less than the estimated time remaining until the cms generation
1141  // is full, start a collection.
1142  if (!UseCMSInitiatingOccupancyOnly) {
1143    if (stats().valid()) {
1144      if (stats().time_until_cms_start() == 0.0) {
1145        return true;
1146      }
1147    } else {
1148      // We want to conservatively collect somewhat early in order
1149      // to try and "bootstrap" our CMS/promotion statistics;
1150      // this branch will not fire after the first successful CMS
1151      // collection because the stats should then be valid.
1152      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1153        log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1154                      _cmsGen->occupancy(), _bootstrap_occupancy);
1155        return true;
1156      }
1157    }
1158  }
1159
1160  // Otherwise, we start a collection cycle if
1161  // old gen want a collection cycle started. Each may use
1162  // an appropriate criterion for making this decision.
1163  // XXX We need to make sure that the gen expansion
1164  // criterion dovetails well with this. XXX NEED TO FIX THIS
1165  if (_cmsGen->should_concurrent_collect()) {
1166    log_trace(gc)("CMS old gen initiated");
1167    return true;
1168  }
1169
1170  // We start a collection if we believe an incremental collection may fail;
1171  // this is not likely to be productive in practice because it's probably too
1172  // late anyway.
1173  GenCollectedHeap* gch = GenCollectedHeap::heap();
1174  assert(gch->collector_policy()->is_generation_policy(),
1175         "You may want to check the correctness of the following");
1176  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1177    log_trace(gc)("CMSCollector: collect because incremental collection will fail ");
1178    return true;
1179  }
1180
1181  if (MetaspaceGC::should_concurrent_collect()) {
1182    log_trace(gc)("CMSCollector: collect for metadata allocation ");
1183    return true;
1184  }
1185
1186  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1187  if (CMSTriggerInterval >= 0) {
1188    if (CMSTriggerInterval == 0) {
1189      // Trigger always
1190      return true;
1191    }
1192
1193    // Check the CMS time since begin (we do not check the stats validity
1194    // as we want to be able to trigger the first CMS cycle as well)
1195    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1196      if (stats().valid()) {
1197        log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1198                      stats().cms_time_since_begin());
1199      } else {
1200        log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)");
1201      }
1202      return true;
1203    }
1204  }
1205
1206  return false;
1207}
1208
1209void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1210
1211// Clear _expansion_cause fields of constituent generations
1212void CMSCollector::clear_expansion_cause() {
1213  _cmsGen->clear_expansion_cause();
1214}
1215
1216// We should be conservative in starting a collection cycle.  To
1217// start too eagerly runs the risk of collecting too often in the
1218// extreme.  To collect too rarely falls back on full collections,
1219// which works, even if not optimum in terms of concurrent work.
1220// As a work around for too eagerly collecting, use the flag
1221// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1222// giving the user an easily understandable way of controlling the
1223// collections.
1224// We want to start a new collection cycle if any of the following
1225// conditions hold:
1226// . our current occupancy exceeds the configured initiating occupancy
1227//   for this generation, or
1228// . we recently needed to expand this space and have not, since that
1229//   expansion, done a collection of this generation, or
1230// . the underlying space believes that it may be a good idea to initiate
1231//   a concurrent collection (this may be based on criteria such as the
1232//   following: the space uses linear allocation and linear allocation is
1233//   going to fail, or there is believed to be excessive fragmentation in
1234//   the generation, etc... or ...
1235// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1236//   the case of the old generation; see CR 6543076):
1237//   we may be approaching a point at which allocation requests may fail because
1238//   we will be out of sufficient free space given allocation rate estimates.]
1239bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1240
1241  assert_lock_strong(freelistLock());
1242  if (occupancy() > initiating_occupancy()) {
1243    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1244                  short_name(), occupancy(), initiating_occupancy());
1245    return true;
1246  }
1247  if (UseCMSInitiatingOccupancyOnly) {
1248    return false;
1249  }
1250  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1251    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1252    return true;
1253  }
1254  return false;
1255}
1256
1257void ConcurrentMarkSweepGeneration::collect(bool   full,
1258                                            bool   clear_all_soft_refs,
1259                                            size_t size,
1260                                            bool   tlab)
1261{
1262  collector()->collect(full, clear_all_soft_refs, size, tlab);
1263}
1264
1265void CMSCollector::collect(bool   full,
1266                           bool   clear_all_soft_refs,
1267                           size_t size,
1268                           bool   tlab)
1269{
1270  // The following "if" branch is present for defensive reasons.
1271  // In the current uses of this interface, it can be replaced with:
1272  // assert(!GCLocker.is_active(), "Can't be called otherwise");
1273  // But I am not placing that assert here to allow future
1274  // generality in invoking this interface.
1275  if (GCLocker::is_active()) {
1276    // A consistency test for GCLocker
1277    assert(GCLocker::needs_gc(), "Should have been set already");
1278    // Skip this foreground collection, instead
1279    // expanding the heap if necessary.
1280    // Need the free list locks for the call to free() in compute_new_size()
1281    compute_new_size();
1282    return;
1283  }
1284  acquire_control_and_collect(full, clear_all_soft_refs);
1285}
1286
1287void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1288  GenCollectedHeap* gch = GenCollectedHeap::heap();
1289  unsigned int gc_count = gch->total_full_collections();
1290  if (gc_count == full_gc_count) {
1291    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1292    _full_gc_requested = true;
1293    _full_gc_cause = cause;
1294    CGC_lock->notify();   // nudge CMS thread
1295  } else {
1296    assert(gc_count > full_gc_count, "Error: causal loop");
1297  }
1298}
1299
1300bool CMSCollector::is_external_interruption() {
1301  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1302  return GCCause::is_user_requested_gc(cause) ||
1303         GCCause::is_serviceability_requested_gc(cause);
1304}
1305
1306void CMSCollector::report_concurrent_mode_interruption() {
1307  if (is_external_interruption()) {
1308    log_debug(gc)("Concurrent mode interrupted");
1309  } else {
1310    log_debug(gc)("Concurrent mode failure");
1311    _gc_tracer_cm->report_concurrent_mode_failure();
1312  }
1313}
1314
1315
1316// The foreground and background collectors need to coordinate in order
1317// to make sure that they do not mutually interfere with CMS collections.
1318// When a background collection is active,
1319// the foreground collector may need to take over (preempt) and
1320// synchronously complete an ongoing collection. Depending on the
1321// frequency of the background collections and the heap usage
1322// of the application, this preemption can be seldom or frequent.
1323// There are only certain
1324// points in the background collection that the "collection-baton"
1325// can be passed to the foreground collector.
1326//
1327// The foreground collector will wait for the baton before
1328// starting any part of the collection.  The foreground collector
1329// will only wait at one location.
1330//
1331// The background collector will yield the baton before starting a new
1332// phase of the collection (e.g., before initial marking, marking from roots,
1333// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1334// of the loop which switches the phases. The background collector does some
1335// of the phases (initial mark, final re-mark) with the world stopped.
1336// Because of locking involved in stopping the world,
1337// the foreground collector should not block waiting for the background
1338// collector when it is doing a stop-the-world phase.  The background
1339// collector will yield the baton at an additional point just before
1340// it enters a stop-the-world phase.  Once the world is stopped, the
1341// background collector checks the phase of the collection.  If the
1342// phase has not changed, it proceeds with the collection.  If the
1343// phase has changed, it skips that phase of the collection.  See
1344// the comments on the use of the Heap_lock in collect_in_background().
1345//
1346// Variable used in baton passing.
1347//   _foregroundGCIsActive - Set to true by the foreground collector when
1348//      it wants the baton.  The foreground clears it when it has finished
1349//      the collection.
1350//   _foregroundGCShouldWait - Set to true by the background collector
1351//        when it is running.  The foreground collector waits while
1352//      _foregroundGCShouldWait is true.
1353//  CGC_lock - monitor used to protect access to the above variables
1354//      and to notify the foreground and background collectors.
1355//  _collectorState - current state of the CMS collection.
1356//
1357// The foreground collector
1358//   acquires the CGC_lock
1359//   sets _foregroundGCIsActive
1360//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1361//     various locks acquired in preparation for the collection
1362//     are released so as not to block the background collector
1363//     that is in the midst of a collection
1364//   proceeds with the collection
1365//   clears _foregroundGCIsActive
1366//   returns
1367//
1368// The background collector in a loop iterating on the phases of the
1369//      collection
1370//   acquires the CGC_lock
1371//   sets _foregroundGCShouldWait
1372//   if _foregroundGCIsActive is set
1373//     clears _foregroundGCShouldWait, notifies _CGC_lock
1374//     waits on _CGC_lock for _foregroundGCIsActive to become false
1375//     and exits the loop.
1376//   otherwise
1377//     proceed with that phase of the collection
1378//     if the phase is a stop-the-world phase,
1379//       yield the baton once more just before enqueueing
1380//       the stop-world CMS operation (executed by the VM thread).
1381//   returns after all phases of the collection are done
1382//
1383
1384void CMSCollector::acquire_control_and_collect(bool full,
1385        bool clear_all_soft_refs) {
1386  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1387  assert(!Thread::current()->is_ConcurrentGC_thread(),
1388         "shouldn't try to acquire control from self!");
1389
1390  // Start the protocol for acquiring control of the
1391  // collection from the background collector (aka CMS thread).
1392  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1393         "VM thread should have CMS token");
1394  // Remember the possibly interrupted state of an ongoing
1395  // concurrent collection
1396  CollectorState first_state = _collectorState;
1397
1398  // Signal to a possibly ongoing concurrent collection that
1399  // we want to do a foreground collection.
1400  _foregroundGCIsActive = true;
1401
1402  // release locks and wait for a notify from the background collector
1403  // releasing the locks in only necessary for phases which
1404  // do yields to improve the granularity of the collection.
1405  assert_lock_strong(bitMapLock());
1406  // We need to lock the Free list lock for the space that we are
1407  // currently collecting.
1408  assert(haveFreelistLocks(), "Must be holding free list locks");
1409  bitMapLock()->unlock();
1410  releaseFreelistLocks();
1411  {
1412    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1413    if (_foregroundGCShouldWait) {
1414      // We are going to be waiting for action for the CMS thread;
1415      // it had better not be gone (for instance at shutdown)!
1416      assert(ConcurrentMarkSweepThread::cmst() != NULL,
1417             "CMS thread must be running");
1418      // Wait here until the background collector gives us the go-ahead
1419      ConcurrentMarkSweepThread::clear_CMS_flag(
1420        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1421      // Get a possibly blocked CMS thread going:
1422      //   Note that we set _foregroundGCIsActive true above,
1423      //   without protection of the CGC_lock.
1424      CGC_lock->notify();
1425      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1426             "Possible deadlock");
1427      while (_foregroundGCShouldWait) {
1428        // wait for notification
1429        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1430        // Possibility of delay/starvation here, since CMS token does
1431        // not know to give priority to VM thread? Actually, i think
1432        // there wouldn't be any delay/starvation, but the proof of
1433        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1434      }
1435      ConcurrentMarkSweepThread::set_CMS_flag(
1436        ConcurrentMarkSweepThread::CMS_vm_has_token);
1437    }
1438  }
1439  // The CMS_token is already held.  Get back the other locks.
1440  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1441         "VM thread should have CMS token");
1442  getFreelistLocks();
1443  bitMapLock()->lock_without_safepoint_check();
1444  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1445                       p2i(Thread::current()), first_state);
1446  log_debug(gc, state)("    gets control with state %d", _collectorState);
1447
1448  // Inform cms gen if this was due to partial collection failing.
1449  // The CMS gen may use this fact to determine its expansion policy.
1450  GenCollectedHeap* gch = GenCollectedHeap::heap();
1451  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1452    assert(!_cmsGen->incremental_collection_failed(),
1453           "Should have been noticed, reacted to and cleared");
1454    _cmsGen->set_incremental_collection_failed();
1455  }
1456
1457  if (first_state > Idling) {
1458    report_concurrent_mode_interruption();
1459  }
1460
1461  set_did_compact(true);
1462
1463  // If the collection is being acquired from the background
1464  // collector, there may be references on the discovered
1465  // references lists.  Abandon those references, since some
1466  // of them may have become unreachable after concurrent
1467  // discovery; the STW compacting collector will redo discovery
1468  // more precisely, without being subject to floating garbage.
1469  // Leaving otherwise unreachable references in the discovered
1470  // lists would require special handling.
1471  ref_processor()->disable_discovery();
1472  ref_processor()->abandon_partial_discovery();
1473  ref_processor()->verify_no_references_recorded();
1474
1475  if (first_state > Idling) {
1476    save_heap_summary();
1477  }
1478
1479  do_compaction_work(clear_all_soft_refs);
1480
1481  // Has the GC time limit been exceeded?
1482  size_t max_eden_size = _young_gen->max_eden_size();
1483  GCCause::Cause gc_cause = gch->gc_cause();
1484  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1485                                         _young_gen->eden()->used(),
1486                                         _cmsGen->max_capacity(),
1487                                         max_eden_size,
1488                                         full,
1489                                         gc_cause,
1490                                         gch->collector_policy());
1491
1492  // Reset the expansion cause, now that we just completed
1493  // a collection cycle.
1494  clear_expansion_cause();
1495  _foregroundGCIsActive = false;
1496  return;
1497}
1498
1499// Resize the tenured generation
1500// after obtaining the free list locks for the
1501// two generations.
1502void CMSCollector::compute_new_size() {
1503  assert_locked_or_safepoint(Heap_lock);
1504  FreelistLocker z(this);
1505  MetaspaceGC::compute_new_size();
1506  _cmsGen->compute_new_size_free_list();
1507}
1508
1509// A work method used by the foreground collector to do
1510// a mark-sweep-compact.
1511void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1512  GenCollectedHeap* gch = GenCollectedHeap::heap();
1513
1514  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1515  gc_timer->register_gc_start();
1516
1517  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1518  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1519
1520  GCTraceTime(Trace, gc) t("CMS:MSC");
1521
1522  // Temporarily widen the span of the weak reference processing to
1523  // the entire heap.
1524  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1525  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1526  // Temporarily, clear the "is_alive_non_header" field of the
1527  // reference processor.
1528  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1529  // Temporarily make reference _processing_ single threaded (non-MT).
1530  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1531  // Temporarily make refs discovery atomic
1532  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1533  // Temporarily make reference _discovery_ single threaded (non-MT)
1534  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1535
1536  ref_processor()->set_enqueuing_is_done(false);
1537  ref_processor()->enable_discovery();
1538  ref_processor()->setup_policy(clear_all_soft_refs);
1539  // If an asynchronous collection finishes, the _modUnionTable is
1540  // all clear.  If we are assuming the collection from an asynchronous
1541  // collection, clear the _modUnionTable.
1542  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1543    "_modUnionTable should be clear if the baton was not passed");
1544  _modUnionTable.clear_all();
1545  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1546    "mod union for klasses should be clear if the baton was passed");
1547  _ct->klass_rem_set()->clear_mod_union();
1548
1549  // We must adjust the allocation statistics being maintained
1550  // in the free list space. We do so by reading and clearing
1551  // the sweep timer and updating the block flux rate estimates below.
1552  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1553  if (_inter_sweep_timer.is_active()) {
1554    _inter_sweep_timer.stop();
1555    // Note that we do not use this sample to update the _inter_sweep_estimate.
1556    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1557                                            _inter_sweep_estimate.padded_average(),
1558                                            _intra_sweep_estimate.padded_average());
1559  }
1560
1561  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1562  #ifdef ASSERT
1563    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1564    size_t free_size = cms_space->free();
1565    assert(free_size ==
1566           pointer_delta(cms_space->end(), cms_space->compaction_top())
1567           * HeapWordSize,
1568      "All the free space should be compacted into one chunk at top");
1569    assert(cms_space->dictionary()->total_chunk_size(
1570                                      debug_only(cms_space->freelistLock())) == 0 ||
1571           cms_space->totalSizeInIndexedFreeLists() == 0,
1572      "All the free space should be in a single chunk");
1573    size_t num = cms_space->totalCount();
1574    assert((free_size == 0 && num == 0) ||
1575           (free_size > 0  && (num == 1 || num == 2)),
1576         "There should be at most 2 free chunks after compaction");
1577  #endif // ASSERT
1578  _collectorState = Resetting;
1579  assert(_restart_addr == NULL,
1580         "Should have been NULL'd before baton was passed");
1581  reset_stw();
1582  _cmsGen->reset_after_compaction();
1583  _concurrent_cycles_since_last_unload = 0;
1584
1585  // Clear any data recorded in the PLAB chunk arrays.
1586  if (_survivor_plab_array != NULL) {
1587    reset_survivor_plab_arrays();
1588  }
1589
1590  // Adjust the per-size allocation stats for the next epoch.
1591  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1592  // Restart the "inter sweep timer" for the next epoch.
1593  _inter_sweep_timer.reset();
1594  _inter_sweep_timer.start();
1595
1596  gc_timer->register_gc_end();
1597
1598  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1599
1600  // For a mark-sweep-compact, compute_new_size() will be called
1601  // in the heap's do_collection() method.
1602}
1603
1604void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1605  LogHandle(gc, heap) log;
1606  if (!log.is_trace()) {
1607    return;
1608  }
1609
1610  ContiguousSpace* eden_space = _young_gen->eden();
1611  ContiguousSpace* from_space = _young_gen->from();
1612  ContiguousSpace* to_space   = _young_gen->to();
1613  // Eden
1614  if (_eden_chunk_array != NULL) {
1615    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1616              p2i(eden_space->bottom()), p2i(eden_space->top()),
1617              p2i(eden_space->end()), eden_space->capacity());
1618    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1619              _eden_chunk_index, _eden_chunk_capacity);
1620    for (size_t i = 0; i < _eden_chunk_index; i++) {
1621      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1622    }
1623  }
1624  // Survivor
1625  if (_survivor_chunk_array != NULL) {
1626    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1627              p2i(from_space->bottom()), p2i(from_space->top()),
1628              p2i(from_space->end()), from_space->capacity());
1629    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1630              _survivor_chunk_index, _survivor_chunk_capacity);
1631    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1632      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1633    }
1634  }
1635}
1636
1637void CMSCollector::getFreelistLocks() const {
1638  // Get locks for all free lists in all generations that this
1639  // collector is responsible for
1640  _cmsGen->freelistLock()->lock_without_safepoint_check();
1641}
1642
1643void CMSCollector::releaseFreelistLocks() const {
1644  // Release locks for all free lists in all generations that this
1645  // collector is responsible for
1646  _cmsGen->freelistLock()->unlock();
1647}
1648
1649bool CMSCollector::haveFreelistLocks() const {
1650  // Check locks for all free lists in all generations that this
1651  // collector is responsible for
1652  assert_lock_strong(_cmsGen->freelistLock());
1653  PRODUCT_ONLY(ShouldNotReachHere());
1654  return true;
1655}
1656
1657// A utility class that is used by the CMS collector to
1658// temporarily "release" the foreground collector from its
1659// usual obligation to wait for the background collector to
1660// complete an ongoing phase before proceeding.
1661class ReleaseForegroundGC: public StackObj {
1662 private:
1663  CMSCollector* _c;
1664 public:
1665  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1666    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1667    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1668    // allow a potentially blocked foreground collector to proceed
1669    _c->_foregroundGCShouldWait = false;
1670    if (_c->_foregroundGCIsActive) {
1671      CGC_lock->notify();
1672    }
1673    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1674           "Possible deadlock");
1675  }
1676
1677  ~ReleaseForegroundGC() {
1678    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1679    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1680    _c->_foregroundGCShouldWait = true;
1681  }
1682};
1683
1684void CMSCollector::collect_in_background(GCCause::Cause cause) {
1685  assert(Thread::current()->is_ConcurrentGC_thread(),
1686    "A CMS asynchronous collection is only allowed on a CMS thread.");
1687
1688  GenCollectedHeap* gch = GenCollectedHeap::heap();
1689  {
1690    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1691    MutexLockerEx hl(Heap_lock, safepoint_check);
1692    FreelistLocker fll(this);
1693    MutexLockerEx x(CGC_lock, safepoint_check);
1694    if (_foregroundGCIsActive) {
1695      // The foreground collector is. Skip this
1696      // background collection.
1697      assert(!_foregroundGCShouldWait, "Should be clear");
1698      return;
1699    } else {
1700      assert(_collectorState == Idling, "Should be idling before start.");
1701      _collectorState = InitialMarking;
1702      register_gc_start(cause);
1703      // Reset the expansion cause, now that we are about to begin
1704      // a new cycle.
1705      clear_expansion_cause();
1706
1707      // Clear the MetaspaceGC flag since a concurrent collection
1708      // is starting but also clear it after the collection.
1709      MetaspaceGC::set_should_concurrent_collect(false);
1710    }
1711    // Decide if we want to enable class unloading as part of the
1712    // ensuing concurrent GC cycle.
1713    update_should_unload_classes();
1714    _full_gc_requested = false;           // acks all outstanding full gc requests
1715    _full_gc_cause = GCCause::_no_gc;
1716    // Signal that we are about to start a collection
1717    gch->increment_total_full_collections();  // ... starting a collection cycle
1718    _collection_count_start = gch->total_full_collections();
1719  }
1720
1721  size_t prev_used = _cmsGen->used();
1722
1723  // The change of the collection state is normally done at this level;
1724  // the exceptions are phases that are executed while the world is
1725  // stopped.  For those phases the change of state is done while the
1726  // world is stopped.  For baton passing purposes this allows the
1727  // background collector to finish the phase and change state atomically.
1728  // The foreground collector cannot wait on a phase that is done
1729  // while the world is stopped because the foreground collector already
1730  // has the world stopped and would deadlock.
1731  while (_collectorState != Idling) {
1732    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1733                         p2i(Thread::current()), _collectorState);
1734    // The foreground collector
1735    //   holds the Heap_lock throughout its collection.
1736    //   holds the CMS token (but not the lock)
1737    //     except while it is waiting for the background collector to yield.
1738    //
1739    // The foreground collector should be blocked (not for long)
1740    //   if the background collector is about to start a phase
1741    //   executed with world stopped.  If the background
1742    //   collector has already started such a phase, the
1743    //   foreground collector is blocked waiting for the
1744    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1745    //   are executed in the VM thread.
1746    //
1747    // The locking order is
1748    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1749    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1750    //   CMS token  (claimed in
1751    //                stop_world_and_do() -->
1752    //                  safepoint_synchronize() -->
1753    //                    CMSThread::synchronize())
1754
1755    {
1756      // Check if the FG collector wants us to yield.
1757      CMSTokenSync x(true); // is cms thread
1758      if (waitForForegroundGC()) {
1759        // We yielded to a foreground GC, nothing more to be
1760        // done this round.
1761        assert(_foregroundGCShouldWait == false, "We set it to false in "
1762               "waitForForegroundGC()");
1763        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1764                             p2i(Thread::current()), _collectorState);
1765        return;
1766      } else {
1767        // The background collector can run but check to see if the
1768        // foreground collector has done a collection while the
1769        // background collector was waiting to get the CGC_lock
1770        // above.  If yes, break so that _foregroundGCShouldWait
1771        // is cleared before returning.
1772        if (_collectorState == Idling) {
1773          break;
1774        }
1775      }
1776    }
1777
1778    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1779      "should be waiting");
1780
1781    switch (_collectorState) {
1782      case InitialMarking:
1783        {
1784          ReleaseForegroundGC x(this);
1785          stats().record_cms_begin();
1786          VM_CMS_Initial_Mark initial_mark_op(this);
1787          VMThread::execute(&initial_mark_op);
1788        }
1789        // The collector state may be any legal state at this point
1790        // since the background collector may have yielded to the
1791        // foreground collector.
1792        break;
1793      case Marking:
1794        // initial marking in checkpointRootsInitialWork has been completed
1795        if (markFromRoots()) { // we were successful
1796          assert(_collectorState == Precleaning, "Collector state should "
1797            "have changed");
1798        } else {
1799          assert(_foregroundGCIsActive, "Internal state inconsistency");
1800        }
1801        break;
1802      case Precleaning:
1803        // marking from roots in markFromRoots has been completed
1804        preclean();
1805        assert(_collectorState == AbortablePreclean ||
1806               _collectorState == FinalMarking,
1807               "Collector state should have changed");
1808        break;
1809      case AbortablePreclean:
1810        abortable_preclean();
1811        assert(_collectorState == FinalMarking, "Collector state should "
1812          "have changed");
1813        break;
1814      case FinalMarking:
1815        {
1816          ReleaseForegroundGC x(this);
1817
1818          VM_CMS_Final_Remark final_remark_op(this);
1819          VMThread::execute(&final_remark_op);
1820        }
1821        assert(_foregroundGCShouldWait, "block post-condition");
1822        break;
1823      case Sweeping:
1824        // final marking in checkpointRootsFinal has been completed
1825        sweep();
1826        assert(_collectorState == Resizing, "Collector state change "
1827          "to Resizing must be done under the free_list_lock");
1828
1829      case Resizing: {
1830        // Sweeping has been completed...
1831        // At this point the background collection has completed.
1832        // Don't move the call to compute_new_size() down
1833        // into code that might be executed if the background
1834        // collection was preempted.
1835        {
1836          ReleaseForegroundGC x(this);   // unblock FG collection
1837          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1838          CMSTokenSync        z(true);   // not strictly needed.
1839          if (_collectorState == Resizing) {
1840            compute_new_size();
1841            save_heap_summary();
1842            _collectorState = Resetting;
1843          } else {
1844            assert(_collectorState == Idling, "The state should only change"
1845                   " because the foreground collector has finished the collection");
1846          }
1847        }
1848        break;
1849      }
1850      case Resetting:
1851        // CMS heap resizing has been completed
1852        reset_concurrent();
1853        assert(_collectorState == Idling, "Collector state should "
1854          "have changed");
1855
1856        MetaspaceGC::set_should_concurrent_collect(false);
1857
1858        stats().record_cms_end();
1859        // Don't move the concurrent_phases_end() and compute_new_size()
1860        // calls to here because a preempted background collection
1861        // has it's state set to "Resetting".
1862        break;
1863      case Idling:
1864      default:
1865        ShouldNotReachHere();
1866        break;
1867    }
1868    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1869                         p2i(Thread::current()), _collectorState);
1870    assert(_foregroundGCShouldWait, "block post-condition");
1871  }
1872
1873  // Should this be in gc_epilogue?
1874  collector_policy()->counters()->update_counters();
1875
1876  {
1877    // Clear _foregroundGCShouldWait and, in the event that the
1878    // foreground collector is waiting, notify it, before
1879    // returning.
1880    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1881    _foregroundGCShouldWait = false;
1882    if (_foregroundGCIsActive) {
1883      CGC_lock->notify();
1884    }
1885    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1886           "Possible deadlock");
1887  }
1888  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1889                       p2i(Thread::current()), _collectorState);
1890  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1891                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1892}
1893
1894void CMSCollector::register_gc_start(GCCause::Cause cause) {
1895  _cms_start_registered = true;
1896  _gc_timer_cm->register_gc_start();
1897  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1898}
1899
1900void CMSCollector::register_gc_end() {
1901  if (_cms_start_registered) {
1902    report_heap_summary(GCWhen::AfterGC);
1903
1904    _gc_timer_cm->register_gc_end();
1905    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1906    _cms_start_registered = false;
1907  }
1908}
1909
1910void CMSCollector::save_heap_summary() {
1911  GenCollectedHeap* gch = GenCollectedHeap::heap();
1912  _last_heap_summary = gch->create_heap_summary();
1913  _last_metaspace_summary = gch->create_metaspace_summary();
1914}
1915
1916void CMSCollector::report_heap_summary(GCWhen::Type when) {
1917  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1918  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1919}
1920
1921bool CMSCollector::waitForForegroundGC() {
1922  bool res = false;
1923  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1924         "CMS thread should have CMS token");
1925  // Block the foreground collector until the
1926  // background collectors decides whether to
1927  // yield.
1928  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1929  _foregroundGCShouldWait = true;
1930  if (_foregroundGCIsActive) {
1931    // The background collector yields to the
1932    // foreground collector and returns a value
1933    // indicating that it has yielded.  The foreground
1934    // collector can proceed.
1935    res = true;
1936    _foregroundGCShouldWait = false;
1937    ConcurrentMarkSweepThread::clear_CMS_flag(
1938      ConcurrentMarkSweepThread::CMS_cms_has_token);
1939    ConcurrentMarkSweepThread::set_CMS_flag(
1940      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1941    // Get a possibly blocked foreground thread going
1942    CGC_lock->notify();
1943    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1944                         p2i(Thread::current()), _collectorState);
1945    while (_foregroundGCIsActive) {
1946      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1947    }
1948    ConcurrentMarkSweepThread::set_CMS_flag(
1949      ConcurrentMarkSweepThread::CMS_cms_has_token);
1950    ConcurrentMarkSweepThread::clear_CMS_flag(
1951      ConcurrentMarkSweepThread::CMS_cms_wants_token);
1952  }
1953  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1954                       p2i(Thread::current()), _collectorState);
1955  return res;
1956}
1957
1958// Because of the need to lock the free lists and other structures in
1959// the collector, common to all the generations that the collector is
1960// collecting, we need the gc_prologues of individual CMS generations
1961// delegate to their collector. It may have been simpler had the
1962// current infrastructure allowed one to call a prologue on a
1963// collector. In the absence of that we have the generation's
1964// prologue delegate to the collector, which delegates back
1965// some "local" work to a worker method in the individual generations
1966// that it's responsible for collecting, while itself doing any
1967// work common to all generations it's responsible for. A similar
1968// comment applies to the  gc_epilogue()'s.
1969// The role of the variable _between_prologue_and_epilogue is to
1970// enforce the invocation protocol.
1971void CMSCollector::gc_prologue(bool full) {
1972  // Call gc_prologue_work() for the CMSGen
1973  // we are responsible for.
1974
1975  // The following locking discipline assumes that we are only called
1976  // when the world is stopped.
1977  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1978
1979  // The CMSCollector prologue must call the gc_prologues for the
1980  // "generations" that it's responsible
1981  // for.
1982
1983  assert(   Thread::current()->is_VM_thread()
1984         || (   CMSScavengeBeforeRemark
1985             && Thread::current()->is_ConcurrentGC_thread()),
1986         "Incorrect thread type for prologue execution");
1987
1988  if (_between_prologue_and_epilogue) {
1989    // We have already been invoked; this is a gc_prologue delegation
1990    // from yet another CMS generation that we are responsible for, just
1991    // ignore it since all relevant work has already been done.
1992    return;
1993  }
1994
1995  // set a bit saying prologue has been called; cleared in epilogue
1996  _between_prologue_and_epilogue = true;
1997  // Claim locks for common data structures, then call gc_prologue_work()
1998  // for each CMSGen.
1999
2000  getFreelistLocks();   // gets free list locks on constituent spaces
2001  bitMapLock()->lock_without_safepoint_check();
2002
2003  // Should call gc_prologue_work() for all cms gens we are responsible for
2004  bool duringMarking =    _collectorState >= Marking
2005                         && _collectorState < Sweeping;
2006
2007  // The young collections clear the modified oops state, which tells if
2008  // there are any modified oops in the class. The remark phase also needs
2009  // that information. Tell the young collection to save the union of all
2010  // modified klasses.
2011  if (duringMarking) {
2012    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2013  }
2014
2015  bool registerClosure = duringMarking;
2016
2017  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2018
2019  if (!full) {
2020    stats().record_gc0_begin();
2021  }
2022}
2023
2024void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2025
2026  _capacity_at_prologue = capacity();
2027  _used_at_prologue = used();
2028
2029  // Delegate to CMScollector which knows how to coordinate between
2030  // this and any other CMS generations that it is responsible for
2031  // collecting.
2032  collector()->gc_prologue(full);
2033}
2034
2035// This is a "private" interface for use by this generation's CMSCollector.
2036// Not to be called directly by any other entity (for instance,
2037// GenCollectedHeap, which calls the "public" gc_prologue method above).
2038void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2039  bool registerClosure, ModUnionClosure* modUnionClosure) {
2040  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2041  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2042    "Should be NULL");
2043  if (registerClosure) {
2044    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2045  }
2046  cmsSpace()->gc_prologue();
2047  // Clear stat counters
2048  NOT_PRODUCT(
2049    assert(_numObjectsPromoted == 0, "check");
2050    assert(_numWordsPromoted   == 0, "check");
2051    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2052                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2053    _numObjectsAllocated = 0;
2054    _numWordsAllocated   = 0;
2055  )
2056}
2057
2058void CMSCollector::gc_epilogue(bool full) {
2059  // The following locking discipline assumes that we are only called
2060  // when the world is stopped.
2061  assert(SafepointSynchronize::is_at_safepoint(),
2062         "world is stopped assumption");
2063
2064  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2065  // if linear allocation blocks need to be appropriately marked to allow the
2066  // the blocks to be parsable. We also check here whether we need to nudge the
2067  // CMS collector thread to start a new cycle (if it's not already active).
2068  assert(   Thread::current()->is_VM_thread()
2069         || (   CMSScavengeBeforeRemark
2070             && Thread::current()->is_ConcurrentGC_thread()),
2071         "Incorrect thread type for epilogue execution");
2072
2073  if (!_between_prologue_and_epilogue) {
2074    // We have already been invoked; this is a gc_epilogue delegation
2075    // from yet another CMS generation that we are responsible for, just
2076    // ignore it since all relevant work has already been done.
2077    return;
2078  }
2079  assert(haveFreelistLocks(), "must have freelist locks");
2080  assert_lock_strong(bitMapLock());
2081
2082  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2083
2084  _cmsGen->gc_epilogue_work(full);
2085
2086  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2087    // in case sampling was not already enabled, enable it
2088    _start_sampling = true;
2089  }
2090  // reset _eden_chunk_array so sampling starts afresh
2091  _eden_chunk_index = 0;
2092
2093  size_t cms_used   = _cmsGen->cmsSpace()->used();
2094
2095  // update performance counters - this uses a special version of
2096  // update_counters() that allows the utilization to be passed as a
2097  // parameter, avoiding multiple calls to used().
2098  //
2099  _cmsGen->update_counters(cms_used);
2100
2101  bitMapLock()->unlock();
2102  releaseFreelistLocks();
2103
2104  if (!CleanChunkPoolAsync) {
2105    Chunk::clean_chunk_pool();
2106  }
2107
2108  set_did_compact(false);
2109  _between_prologue_and_epilogue = false;  // ready for next cycle
2110}
2111
2112void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2113  collector()->gc_epilogue(full);
2114
2115  // Also reset promotion tracking in par gc thread states.
2116  for (uint i = 0; i < ParallelGCThreads; i++) {
2117    _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2118  }
2119}
2120
2121void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2122  assert(!incremental_collection_failed(), "Should have been cleared");
2123  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2124  cmsSpace()->gc_epilogue();
2125    // Print stat counters
2126  NOT_PRODUCT(
2127    assert(_numObjectsAllocated == 0, "check");
2128    assert(_numWordsAllocated == 0, "check");
2129    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2130                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2131    _numObjectsPromoted = 0;
2132    _numWordsPromoted   = 0;
2133  )
2134
2135  // Call down the chain in contiguous_available needs the freelistLock
2136  // so print this out before releasing the freeListLock.
2137  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2138}
2139
2140#ifndef PRODUCT
2141bool CMSCollector::have_cms_token() {
2142  Thread* thr = Thread::current();
2143  if (thr->is_VM_thread()) {
2144    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2145  } else if (thr->is_ConcurrentGC_thread()) {
2146    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2147  } else if (thr->is_GC_task_thread()) {
2148    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2149           ParGCRareEvent_lock->owned_by_self();
2150  }
2151  return false;
2152}
2153
2154// Check reachability of the given heap address in CMS generation,
2155// treating all other generations as roots.
2156bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2157  // We could "guarantee" below, rather than assert, but I'll
2158  // leave these as "asserts" so that an adventurous debugger
2159  // could try this in the product build provided some subset of
2160  // the conditions were met, provided they were interested in the
2161  // results and knew that the computation below wouldn't interfere
2162  // with other concurrent computations mutating the structures
2163  // being read or written.
2164  assert(SafepointSynchronize::is_at_safepoint(),
2165         "Else mutations in object graph will make answer suspect");
2166  assert(have_cms_token(), "Should hold cms token");
2167  assert(haveFreelistLocks(), "must hold free list locks");
2168  assert_lock_strong(bitMapLock());
2169
2170  // Clear the marking bit map array before starting, but, just
2171  // for kicks, first report if the given address is already marked
2172  tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2173                _markBitMap.isMarked(addr) ? "" : " not");
2174
2175  if (verify_after_remark()) {
2176    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2177    bool result = verification_mark_bm()->isMarked(addr);
2178    tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2179                  result ? "IS" : "is NOT");
2180    return result;
2181  } else {
2182    tty->print_cr("Could not compute result");
2183    return false;
2184  }
2185}
2186#endif
2187
2188void
2189CMSCollector::print_on_error(outputStream* st) {
2190  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2191  if (collector != NULL) {
2192    CMSBitMap* bitmap = &collector->_markBitMap;
2193    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2194    bitmap->print_on_error(st, " Bits: ");
2195
2196    st->cr();
2197
2198    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2199    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2200    mut_bitmap->print_on_error(st, " Bits: ");
2201  }
2202}
2203
2204////////////////////////////////////////////////////////
2205// CMS Verification Support
2206////////////////////////////////////////////////////////
2207// Following the remark phase, the following invariant
2208// should hold -- each object in the CMS heap which is
2209// marked in markBitMap() should be marked in the verification_mark_bm().
2210
2211class VerifyMarkedClosure: public BitMapClosure {
2212  CMSBitMap* _marks;
2213  bool       _failed;
2214
2215 public:
2216  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2217
2218  bool do_bit(size_t offset) {
2219    HeapWord* addr = _marks->offsetToHeapWord(offset);
2220    if (!_marks->isMarked(addr)) {
2221      LogHandle(gc, verify) log;
2222      ResourceMark rm;
2223      oop(addr)->print_on(log.info_stream());
2224      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2225      _failed = true;
2226    }
2227    return true;
2228  }
2229
2230  bool failed() { return _failed; }
2231};
2232
2233bool CMSCollector::verify_after_remark() {
2234  GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
2235  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2236  static bool init = false;
2237
2238  assert(SafepointSynchronize::is_at_safepoint(),
2239         "Else mutations in object graph will make answer suspect");
2240  assert(have_cms_token(),
2241         "Else there may be mutual interference in use of "
2242         " verification data structures");
2243  assert(_collectorState > Marking && _collectorState <= Sweeping,
2244         "Else marking info checked here may be obsolete");
2245  assert(haveFreelistLocks(), "must hold free list locks");
2246  assert_lock_strong(bitMapLock());
2247
2248
2249  // Allocate marking bit map if not already allocated
2250  if (!init) { // first time
2251    if (!verification_mark_bm()->allocate(_span)) {
2252      return false;
2253    }
2254    init = true;
2255  }
2256
2257  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2258
2259  // Turn off refs discovery -- so we will be tracing through refs.
2260  // This is as intended, because by this time
2261  // GC must already have cleared any refs that need to be cleared,
2262  // and traced those that need to be marked; moreover,
2263  // the marking done here is not going to interfere in any
2264  // way with the marking information used by GC.
2265  NoRefDiscovery no_discovery(ref_processor());
2266
2267#if defined(COMPILER2) || INCLUDE_JVMCI
2268  DerivedPointerTableDeactivate dpt_deact;
2269#endif
2270
2271  // Clear any marks from a previous round
2272  verification_mark_bm()->clear_all();
2273  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2274  verify_work_stacks_empty();
2275
2276  GenCollectedHeap* gch = GenCollectedHeap::heap();
2277  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2278  // Update the saved marks which may affect the root scans.
2279  gch->save_marks();
2280
2281  if (CMSRemarkVerifyVariant == 1) {
2282    // In this first variant of verification, we complete
2283    // all marking, then check if the new marks-vector is
2284    // a subset of the CMS marks-vector.
2285    verify_after_remark_work_1();
2286  } else if (CMSRemarkVerifyVariant == 2) {
2287    // In this second variant of verification, we flag an error
2288    // (i.e. an object reachable in the new marks-vector not reachable
2289    // in the CMS marks-vector) immediately, also indicating the
2290    // identify of an object (A) that references the unmarked object (B) --
2291    // presumably, a mutation to A failed to be picked up by preclean/remark?
2292    verify_after_remark_work_2();
2293  } else {
2294    warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2295            CMSRemarkVerifyVariant);
2296  }
2297  return true;
2298}
2299
2300void CMSCollector::verify_after_remark_work_1() {
2301  ResourceMark rm;
2302  HandleMark  hm;
2303  GenCollectedHeap* gch = GenCollectedHeap::heap();
2304
2305  // Get a clear set of claim bits for the roots processing to work with.
2306  ClassLoaderDataGraph::clear_claimed_marks();
2307
2308  // Mark from roots one level into CMS
2309  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2310  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2311
2312  {
2313    StrongRootsScope srs(1);
2314
2315    gch->gen_process_roots(&srs,
2316                           GenCollectedHeap::OldGen,
2317                           true,   // young gen as roots
2318                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2319                           should_unload_classes(),
2320                           &notOlder,
2321                           NULL,
2322                           NULL);
2323  }
2324
2325  // Now mark from the roots
2326  MarkFromRootsClosure markFromRootsClosure(this, _span,
2327    verification_mark_bm(), verification_mark_stack(),
2328    false /* don't yield */, true /* verifying */);
2329  assert(_restart_addr == NULL, "Expected pre-condition");
2330  verification_mark_bm()->iterate(&markFromRootsClosure);
2331  while (_restart_addr != NULL) {
2332    // Deal with stack overflow: by restarting at the indicated
2333    // address.
2334    HeapWord* ra = _restart_addr;
2335    markFromRootsClosure.reset(ra);
2336    _restart_addr = NULL;
2337    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2338  }
2339  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2340  verify_work_stacks_empty();
2341
2342  // Marking completed -- now verify that each bit marked in
2343  // verification_mark_bm() is also marked in markBitMap(); flag all
2344  // errors by printing corresponding objects.
2345  VerifyMarkedClosure vcl(markBitMap());
2346  verification_mark_bm()->iterate(&vcl);
2347  if (vcl.failed()) {
2348    LogHandle(gc, verify) log;
2349    log.info("Verification failed");
2350    ResourceMark rm;
2351    gch->print_on(log.info_stream());
2352    fatal("CMS: failed marking verification after remark");
2353  }
2354}
2355
2356class VerifyKlassOopsKlassClosure : public KlassClosure {
2357  class VerifyKlassOopsClosure : public OopClosure {
2358    CMSBitMap* _bitmap;
2359   public:
2360    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2361    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2362    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2363  } _oop_closure;
2364 public:
2365  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2366  void do_klass(Klass* k) {
2367    k->oops_do(&_oop_closure);
2368  }
2369};
2370
2371void CMSCollector::verify_after_remark_work_2() {
2372  ResourceMark rm;
2373  HandleMark  hm;
2374  GenCollectedHeap* gch = GenCollectedHeap::heap();
2375
2376  // Get a clear set of claim bits for the roots processing to work with.
2377  ClassLoaderDataGraph::clear_claimed_marks();
2378
2379  // Mark from roots one level into CMS
2380  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2381                                     markBitMap());
2382  CLDToOopClosure cld_closure(&notOlder, true);
2383
2384  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2385
2386  {
2387    StrongRootsScope srs(1);
2388
2389    gch->gen_process_roots(&srs,
2390                           GenCollectedHeap::OldGen,
2391                           true,   // young gen as roots
2392                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2393                           should_unload_classes(),
2394                           &notOlder,
2395                           NULL,
2396                           &cld_closure);
2397  }
2398
2399  // Now mark from the roots
2400  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2401    verification_mark_bm(), markBitMap(), verification_mark_stack());
2402  assert(_restart_addr == NULL, "Expected pre-condition");
2403  verification_mark_bm()->iterate(&markFromRootsClosure);
2404  while (_restart_addr != NULL) {
2405    // Deal with stack overflow: by restarting at the indicated
2406    // address.
2407    HeapWord* ra = _restart_addr;
2408    markFromRootsClosure.reset(ra);
2409    _restart_addr = NULL;
2410    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2411  }
2412  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2413  verify_work_stacks_empty();
2414
2415  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2416  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2417
2418  // Marking completed -- now verify that each bit marked in
2419  // verification_mark_bm() is also marked in markBitMap(); flag all
2420  // errors by printing corresponding objects.
2421  VerifyMarkedClosure vcl(markBitMap());
2422  verification_mark_bm()->iterate(&vcl);
2423  assert(!vcl.failed(), "Else verification above should not have succeeded");
2424}
2425
2426void ConcurrentMarkSweepGeneration::save_marks() {
2427  // delegate to CMS space
2428  cmsSpace()->save_marks();
2429  for (uint i = 0; i < ParallelGCThreads; i++) {
2430    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2431  }
2432}
2433
2434bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2435  return cmsSpace()->no_allocs_since_save_marks();
2436}
2437
2438#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2439                                                                \
2440void ConcurrentMarkSweepGeneration::                            \
2441oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2442  cl->set_generation(this);                                     \
2443  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2444  cl->reset_generation();                                       \
2445  save_marks();                                                 \
2446}
2447
2448ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2449
2450void
2451ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2452  if (freelistLock()->owned_by_self()) {
2453    Generation::oop_iterate(cl);
2454  } else {
2455    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2456    Generation::oop_iterate(cl);
2457  }
2458}
2459
2460void
2461ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2462  if (freelistLock()->owned_by_self()) {
2463    Generation::object_iterate(cl);
2464  } else {
2465    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2466    Generation::object_iterate(cl);
2467  }
2468}
2469
2470void
2471ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2472  if (freelistLock()->owned_by_self()) {
2473    Generation::safe_object_iterate(cl);
2474  } else {
2475    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2476    Generation::safe_object_iterate(cl);
2477  }
2478}
2479
2480void
2481ConcurrentMarkSweepGeneration::post_compact() {
2482}
2483
2484void
2485ConcurrentMarkSweepGeneration::prepare_for_verify() {
2486  // Fix the linear allocation blocks to look like free blocks.
2487
2488  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2489  // are not called when the heap is verified during universe initialization and
2490  // at vm shutdown.
2491  if (freelistLock()->owned_by_self()) {
2492    cmsSpace()->prepare_for_verify();
2493  } else {
2494    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2495    cmsSpace()->prepare_for_verify();
2496  }
2497}
2498
2499void
2500ConcurrentMarkSweepGeneration::verify() {
2501  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2502  // are not called when the heap is verified during universe initialization and
2503  // at vm shutdown.
2504  if (freelistLock()->owned_by_self()) {
2505    cmsSpace()->verify();
2506  } else {
2507    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2508    cmsSpace()->verify();
2509  }
2510}
2511
2512void CMSCollector::verify() {
2513  _cmsGen->verify();
2514}
2515
2516#ifndef PRODUCT
2517bool CMSCollector::overflow_list_is_empty() const {
2518  assert(_num_par_pushes >= 0, "Inconsistency");
2519  if (_overflow_list == NULL) {
2520    assert(_num_par_pushes == 0, "Inconsistency");
2521  }
2522  return _overflow_list == NULL;
2523}
2524
2525// The methods verify_work_stacks_empty() and verify_overflow_empty()
2526// merely consolidate assertion checks that appear to occur together frequently.
2527void CMSCollector::verify_work_stacks_empty() const {
2528  assert(_markStack.isEmpty(), "Marking stack should be empty");
2529  assert(overflow_list_is_empty(), "Overflow list should be empty");
2530}
2531
2532void CMSCollector::verify_overflow_empty() const {
2533  assert(overflow_list_is_empty(), "Overflow list should be empty");
2534  assert(no_preserved_marks(), "No preserved marks");
2535}
2536#endif // PRODUCT
2537
2538// Decide if we want to enable class unloading as part of the
2539// ensuing concurrent GC cycle. We will collect and
2540// unload classes if it's the case that:
2541// (1) an explicit gc request has been made and the flag
2542//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2543// (2) (a) class unloading is enabled at the command line, and
2544//     (b) old gen is getting really full
2545// NOTE: Provided there is no change in the state of the heap between
2546// calls to this method, it should have idempotent results. Moreover,
2547// its results should be monotonically increasing (i.e. going from 0 to 1,
2548// but not 1 to 0) between successive calls between which the heap was
2549// not collected. For the implementation below, it must thus rely on
2550// the property that concurrent_cycles_since_last_unload()
2551// will not decrease unless a collection cycle happened and that
2552// _cmsGen->is_too_full() are
2553// themselves also monotonic in that sense. See check_monotonicity()
2554// below.
2555void CMSCollector::update_should_unload_classes() {
2556  _should_unload_classes = false;
2557  // Condition 1 above
2558  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2559    _should_unload_classes = true;
2560  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2561    // Disjuncts 2.b.(i,ii,iii) above
2562    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2563                              CMSClassUnloadingMaxInterval)
2564                           || _cmsGen->is_too_full();
2565  }
2566}
2567
2568bool ConcurrentMarkSweepGeneration::is_too_full() const {
2569  bool res = should_concurrent_collect();
2570  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2571  return res;
2572}
2573
2574void CMSCollector::setup_cms_unloading_and_verification_state() {
2575  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2576                             || VerifyBeforeExit;
2577  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2578
2579  // We set the proper root for this CMS cycle here.
2580  if (should_unload_classes()) {   // Should unload classes this cycle
2581    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2582    set_verifying(should_verify);    // Set verification state for this cycle
2583    return;                            // Nothing else needs to be done at this time
2584  }
2585
2586  // Not unloading classes this cycle
2587  assert(!should_unload_classes(), "Inconsistency!");
2588
2589  // If we are not unloading classes then add SO_AllCodeCache to root
2590  // scanning options.
2591  add_root_scanning_option(rso);
2592
2593  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2594    set_verifying(true);
2595  } else if (verifying() && !should_verify) {
2596    // We were verifying, but some verification flags got disabled.
2597    set_verifying(false);
2598    // Exclude symbols, strings and code cache elements from root scanning to
2599    // reduce IM and RM pauses.
2600    remove_root_scanning_option(rso);
2601  }
2602}
2603
2604
2605#ifndef PRODUCT
2606HeapWord* CMSCollector::block_start(const void* p) const {
2607  const HeapWord* addr = (HeapWord*)p;
2608  if (_span.contains(p)) {
2609    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2610      return _cmsGen->cmsSpace()->block_start(p);
2611    }
2612  }
2613  return NULL;
2614}
2615#endif
2616
2617HeapWord*
2618ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2619                                                   bool   tlab,
2620                                                   bool   parallel) {
2621  CMSSynchronousYieldRequest yr;
2622  assert(!tlab, "Can't deal with TLAB allocation");
2623  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2624  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2625  if (GCExpandToAllocateDelayMillis > 0) {
2626    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2627  }
2628  return have_lock_and_allocate(word_size, tlab);
2629}
2630
2631void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2632    size_t bytes,
2633    size_t expand_bytes,
2634    CMSExpansionCause::Cause cause)
2635{
2636
2637  bool success = expand(bytes, expand_bytes);
2638
2639  // remember why we expanded; this information is used
2640  // by shouldConcurrentCollect() when making decisions on whether to start
2641  // a new CMS cycle.
2642  if (success) {
2643    set_expansion_cause(cause);
2644    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2645  }
2646}
2647
2648HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2649  HeapWord* res = NULL;
2650  MutexLocker x(ParGCRareEvent_lock);
2651  while (true) {
2652    // Expansion by some other thread might make alloc OK now:
2653    res = ps->lab.alloc(word_sz);
2654    if (res != NULL) return res;
2655    // If there's not enough expansion space available, give up.
2656    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2657      return NULL;
2658    }
2659    // Otherwise, we try expansion.
2660    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2661    // Now go around the loop and try alloc again;
2662    // A competing par_promote might beat us to the expansion space,
2663    // so we may go around the loop again if promotion fails again.
2664    if (GCExpandToAllocateDelayMillis > 0) {
2665      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2666    }
2667  }
2668}
2669
2670
2671bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2672  PromotionInfo* promo) {
2673  MutexLocker x(ParGCRareEvent_lock);
2674  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2675  while (true) {
2676    // Expansion by some other thread might make alloc OK now:
2677    if (promo->ensure_spooling_space()) {
2678      assert(promo->has_spooling_space(),
2679             "Post-condition of successful ensure_spooling_space()");
2680      return true;
2681    }
2682    // If there's not enough expansion space available, give up.
2683    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2684      return false;
2685    }
2686    // Otherwise, we try expansion.
2687    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2688    // Now go around the loop and try alloc again;
2689    // A competing allocation might beat us to the expansion space,
2690    // so we may go around the loop again if allocation fails again.
2691    if (GCExpandToAllocateDelayMillis > 0) {
2692      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2693    }
2694  }
2695}
2696
2697void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2698  // Only shrink if a compaction was done so that all the free space
2699  // in the generation is in a contiguous block at the end.
2700  if (did_compact()) {
2701    CardGeneration::shrink(bytes);
2702  }
2703}
2704
2705void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2706  assert_locked_or_safepoint(Heap_lock);
2707}
2708
2709void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2710  assert_locked_or_safepoint(Heap_lock);
2711  assert_lock_strong(freelistLock());
2712  log_trace(gc)("Shrinking of CMS not yet implemented");
2713  return;
2714}
2715
2716
2717// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2718// phases.
2719class CMSPhaseAccounting: public StackObj {
2720 public:
2721  CMSPhaseAccounting(CMSCollector *collector,
2722                     const char *title);
2723  ~CMSPhaseAccounting();
2724
2725 private:
2726  CMSCollector *_collector;
2727  const char *_title;
2728  GCTraceConcTime(Info, gc) _trace_time;
2729
2730 public:
2731  // Not MT-safe; so do not pass around these StackObj's
2732  // where they may be accessed by other threads.
2733  double wallclock_millis() {
2734    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2735  }
2736};
2737
2738CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2739                                       const char *title) :
2740  _collector(collector), _title(title), _trace_time(title) {
2741
2742  _collector->resetYields();
2743  _collector->resetTimer();
2744  _collector->startTimer();
2745  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2746}
2747
2748CMSPhaseAccounting::~CMSPhaseAccounting() {
2749  _collector->gc_timer_cm()->register_gc_concurrent_end();
2750  _collector->stopTimer();
2751  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2752  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2753}
2754
2755// CMS work
2756
2757// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2758class CMSParMarkTask : public AbstractGangTask {
2759 protected:
2760  CMSCollector*     _collector;
2761  uint              _n_workers;
2762  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2763      AbstractGangTask(name),
2764      _collector(collector),
2765      _n_workers(n_workers) {}
2766  // Work method in support of parallel rescan ... of young gen spaces
2767  void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2768                             ContiguousSpace* space,
2769                             HeapWord** chunk_array, size_t chunk_top);
2770  void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2771};
2772
2773// Parallel initial mark task
2774class CMSParInitialMarkTask: public CMSParMarkTask {
2775  StrongRootsScope* _strong_roots_scope;
2776 public:
2777  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2778      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2779      _strong_roots_scope(strong_roots_scope) {}
2780  void work(uint worker_id);
2781};
2782
2783// Checkpoint the roots into this generation from outside
2784// this generation. [Note this initial checkpoint need only
2785// be approximate -- we'll do a catch up phase subsequently.]
2786void CMSCollector::checkpointRootsInitial() {
2787  assert(_collectorState == InitialMarking, "Wrong collector state");
2788  check_correct_thread_executing();
2789  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2790
2791  save_heap_summary();
2792  report_heap_summary(GCWhen::BeforeGC);
2793
2794  ReferenceProcessor* rp = ref_processor();
2795  assert(_restart_addr == NULL, "Control point invariant");
2796  {
2797    // acquire locks for subsequent manipulations
2798    MutexLockerEx x(bitMapLock(),
2799                    Mutex::_no_safepoint_check_flag);
2800    checkpointRootsInitialWork();
2801    // enable ("weak") refs discovery
2802    rp->enable_discovery();
2803    _collectorState = Marking;
2804  }
2805}
2806
2807void CMSCollector::checkpointRootsInitialWork() {
2808  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2809  assert(_collectorState == InitialMarking, "just checking");
2810
2811  // Already have locks.
2812  assert_lock_strong(bitMapLock());
2813  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2814
2815  // Setup the verification and class unloading state for this
2816  // CMS collection cycle.
2817  setup_cms_unloading_and_verification_state();
2818
2819  GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
2820
2821  // Reset all the PLAB chunk arrays if necessary.
2822  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2823    reset_survivor_plab_arrays();
2824  }
2825
2826  ResourceMark rm;
2827  HandleMark  hm;
2828
2829  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2830  GenCollectedHeap* gch = GenCollectedHeap::heap();
2831
2832  verify_work_stacks_empty();
2833  verify_overflow_empty();
2834
2835  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2836  // Update the saved marks which may affect the root scans.
2837  gch->save_marks();
2838
2839  // weak reference processing has not started yet.
2840  ref_processor()->set_enqueuing_is_done(false);
2841
2842  // Need to remember all newly created CLDs,
2843  // so that we can guarantee that the remark finds them.
2844  ClassLoaderDataGraph::remember_new_clds(true);
2845
2846  // Whenever a CLD is found, it will be claimed before proceeding to mark
2847  // the klasses. The claimed marks need to be cleared before marking starts.
2848  ClassLoaderDataGraph::clear_claimed_marks();
2849
2850  print_eden_and_survivor_chunk_arrays();
2851
2852  {
2853#if defined(COMPILER2) || INCLUDE_JVMCI
2854    DerivedPointerTableDeactivate dpt_deact;
2855#endif
2856    if (CMSParallelInitialMarkEnabled) {
2857      // The parallel version.
2858      WorkGang* workers = gch->workers();
2859      assert(workers != NULL, "Need parallel worker threads.");
2860      uint n_workers = workers->active_workers();
2861
2862      StrongRootsScope srs(n_workers);
2863
2864      CMSParInitialMarkTask tsk(this, &srs, n_workers);
2865      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2866      if (n_workers > 1) {
2867        workers->run_task(&tsk);
2868      } else {
2869        tsk.work(0);
2870      }
2871    } else {
2872      // The serial version.
2873      CLDToOopClosure cld_closure(&notOlder, true);
2874      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2875
2876      StrongRootsScope srs(1);
2877
2878      gch->gen_process_roots(&srs,
2879                             GenCollectedHeap::OldGen,
2880                             true,   // young gen as roots
2881                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
2882                             should_unload_classes(),
2883                             &notOlder,
2884                             NULL,
2885                             &cld_closure);
2886    }
2887  }
2888
2889  // Clear mod-union table; it will be dirtied in the prologue of
2890  // CMS generation per each young generation collection.
2891
2892  assert(_modUnionTable.isAllClear(),
2893       "Was cleared in most recent final checkpoint phase"
2894       " or no bits are set in the gc_prologue before the start of the next "
2895       "subsequent marking phase.");
2896
2897  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2898
2899  // Save the end of the used_region of the constituent generations
2900  // to be used to limit the extent of sweep in each generation.
2901  save_sweep_limits();
2902  verify_overflow_empty();
2903}
2904
2905bool CMSCollector::markFromRoots() {
2906  // we might be tempted to assert that:
2907  // assert(!SafepointSynchronize::is_at_safepoint(),
2908  //        "inconsistent argument?");
2909  // However that wouldn't be right, because it's possible that
2910  // a safepoint is indeed in progress as a young generation
2911  // stop-the-world GC happens even as we mark in this generation.
2912  assert(_collectorState == Marking, "inconsistent state?");
2913  check_correct_thread_executing();
2914  verify_overflow_empty();
2915
2916  // Weak ref discovery note: We may be discovering weak
2917  // refs in this generation concurrent (but interleaved) with
2918  // weak ref discovery by the young generation collector.
2919
2920  CMSTokenSyncWithLocks ts(true, bitMapLock());
2921  GCTraceCPUTime tcpu;
2922  CMSPhaseAccounting pa(this, "Concrurrent Mark");
2923  bool res = markFromRootsWork();
2924  if (res) {
2925    _collectorState = Precleaning;
2926  } else { // We failed and a foreground collection wants to take over
2927    assert(_foregroundGCIsActive, "internal state inconsistency");
2928    assert(_restart_addr == NULL,  "foreground will restart from scratch");
2929    log_debug(gc)("bailing out to foreground collection");
2930  }
2931  verify_overflow_empty();
2932  return res;
2933}
2934
2935bool CMSCollector::markFromRootsWork() {
2936  // iterate over marked bits in bit map, doing a full scan and mark
2937  // from these roots using the following algorithm:
2938  // . if oop is to the right of the current scan pointer,
2939  //   mark corresponding bit (we'll process it later)
2940  // . else (oop is to left of current scan pointer)
2941  //   push oop on marking stack
2942  // . drain the marking stack
2943
2944  // Note that when we do a marking step we need to hold the
2945  // bit map lock -- recall that direct allocation (by mutators)
2946  // and promotion (by the young generation collector) is also
2947  // marking the bit map. [the so-called allocate live policy.]
2948  // Because the implementation of bit map marking is not
2949  // robust wrt simultaneous marking of bits in the same word,
2950  // we need to make sure that there is no such interference
2951  // between concurrent such updates.
2952
2953  // already have locks
2954  assert_lock_strong(bitMapLock());
2955
2956  verify_work_stacks_empty();
2957  verify_overflow_empty();
2958  bool result = false;
2959  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2960    result = do_marking_mt();
2961  } else {
2962    result = do_marking_st();
2963  }
2964  return result;
2965}
2966
2967// Forward decl
2968class CMSConcMarkingTask;
2969
2970class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2971  CMSCollector*       _collector;
2972  CMSConcMarkingTask* _task;
2973 public:
2974  virtual void yield();
2975
2976  // "n_threads" is the number of threads to be terminated.
2977  // "queue_set" is a set of work queues of other threads.
2978  // "collector" is the CMS collector associated with this task terminator.
2979  // "yield" indicates whether we need the gang as a whole to yield.
2980  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
2981    ParallelTaskTerminator(n_threads, queue_set),
2982    _collector(collector) { }
2983
2984  void set_task(CMSConcMarkingTask* task) {
2985    _task = task;
2986  }
2987};
2988
2989class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
2990  CMSConcMarkingTask* _task;
2991 public:
2992  bool should_exit_termination();
2993  void set_task(CMSConcMarkingTask* task) {
2994    _task = task;
2995  }
2996};
2997
2998// MT Concurrent Marking Task
2999class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3000  CMSCollector* _collector;
3001  uint          _n_workers;       // requested/desired # workers
3002  bool          _result;
3003  CompactibleFreeListSpace*  _cms_space;
3004  char          _pad_front[64];   // padding to ...
3005  HeapWord*     _global_finger;   // ... avoid sharing cache line
3006  char          _pad_back[64];
3007  HeapWord*     _restart_addr;
3008
3009  //  Exposed here for yielding support
3010  Mutex* const _bit_map_lock;
3011
3012  // The per thread work queues, available here for stealing
3013  OopTaskQueueSet*  _task_queues;
3014
3015  // Termination (and yielding) support
3016  CMSConcMarkingTerminator _term;
3017  CMSConcMarkingTerminatorTerminator _term_term;
3018
3019 public:
3020  CMSConcMarkingTask(CMSCollector* collector,
3021                 CompactibleFreeListSpace* cms_space,
3022                 YieldingFlexibleWorkGang* workers,
3023                 OopTaskQueueSet* task_queues):
3024    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3025    _collector(collector),
3026    _cms_space(cms_space),
3027    _n_workers(0), _result(true),
3028    _task_queues(task_queues),
3029    _term(_n_workers, task_queues, _collector),
3030    _bit_map_lock(collector->bitMapLock())
3031  {
3032    _requested_size = _n_workers;
3033    _term.set_task(this);
3034    _term_term.set_task(this);
3035    _restart_addr = _global_finger = _cms_space->bottom();
3036  }
3037
3038
3039  OopTaskQueueSet* task_queues()  { return _task_queues; }
3040
3041  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3042
3043  HeapWord** global_finger_addr() { return &_global_finger; }
3044
3045  CMSConcMarkingTerminator* terminator() { return &_term; }
3046
3047  virtual void set_for_termination(uint active_workers) {
3048    terminator()->reset_for_reuse(active_workers);
3049  }
3050
3051  void work(uint worker_id);
3052  bool should_yield() {
3053    return    ConcurrentMarkSweepThread::should_yield()
3054           && !_collector->foregroundGCIsActive();
3055  }
3056
3057  virtual void coordinator_yield();  // stuff done by coordinator
3058  bool result() { return _result; }
3059
3060  void reset(HeapWord* ra) {
3061    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3062    _restart_addr = _global_finger = ra;
3063    _term.reset_for_reuse();
3064  }
3065
3066  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3067                                           OopTaskQueue* work_q);
3068
3069 private:
3070  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3071  void do_work_steal(int i);
3072  void bump_global_finger(HeapWord* f);
3073};
3074
3075bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3076  assert(_task != NULL, "Error");
3077  return _task->yielding();
3078  // Note that we do not need the disjunct || _task->should_yield() above
3079  // because we want terminating threads to yield only if the task
3080  // is already in the midst of yielding, which happens only after at least one
3081  // thread has yielded.
3082}
3083
3084void CMSConcMarkingTerminator::yield() {
3085  if (_task->should_yield()) {
3086    _task->yield();
3087  } else {
3088    ParallelTaskTerminator::yield();
3089  }
3090}
3091
3092////////////////////////////////////////////////////////////////
3093// Concurrent Marking Algorithm Sketch
3094////////////////////////////////////////////////////////////////
3095// Until all tasks exhausted (both spaces):
3096// -- claim next available chunk
3097// -- bump global finger via CAS
3098// -- find first object that starts in this chunk
3099//    and start scanning bitmap from that position
3100// -- scan marked objects for oops
3101// -- CAS-mark target, and if successful:
3102//    . if target oop is above global finger (volatile read)
3103//      nothing to do
3104//    . if target oop is in chunk and above local finger
3105//        then nothing to do
3106//    . else push on work-queue
3107// -- Deal with possible overflow issues:
3108//    . local work-queue overflow causes stuff to be pushed on
3109//      global (common) overflow queue
3110//    . always first empty local work queue
3111//    . then get a batch of oops from global work queue if any
3112//    . then do work stealing
3113// -- When all tasks claimed (both spaces)
3114//    and local work queue empty,
3115//    then in a loop do:
3116//    . check global overflow stack; steal a batch of oops and trace
3117//    . try to steal from other threads oif GOS is empty
3118//    . if neither is available, offer termination
3119// -- Terminate and return result
3120//
3121void CMSConcMarkingTask::work(uint worker_id) {
3122  elapsedTimer _timer;
3123  ResourceMark rm;
3124  HandleMark hm;
3125
3126  DEBUG_ONLY(_collector->verify_overflow_empty();)
3127
3128  // Before we begin work, our work queue should be empty
3129  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3130  // Scan the bitmap covering _cms_space, tracing through grey objects.
3131  _timer.start();
3132  do_scan_and_mark(worker_id, _cms_space);
3133  _timer.stop();
3134  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3135
3136  // ... do work stealing
3137  _timer.reset();
3138  _timer.start();
3139  do_work_steal(worker_id);
3140  _timer.stop();
3141  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3142  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3143  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3144  // Note that under the current task protocol, the
3145  // following assertion is true even of the spaces
3146  // expanded since the completion of the concurrent
3147  // marking. XXX This will likely change under a strict
3148  // ABORT semantics.
3149  // After perm removal the comparison was changed to
3150  // greater than or equal to from strictly greater than.
3151  // Before perm removal the highest address sweep would
3152  // have been at the end of perm gen but now is at the
3153  // end of the tenured gen.
3154  assert(_global_finger >=  _cms_space->end(),
3155         "All tasks have been completed");
3156  DEBUG_ONLY(_collector->verify_overflow_empty();)
3157}
3158
3159void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3160  HeapWord* read = _global_finger;
3161  HeapWord* cur  = read;
3162  while (f > read) {
3163    cur = read;
3164    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3165    if (cur == read) {
3166      // our cas succeeded
3167      assert(_global_finger >= f, "protocol consistency");
3168      break;
3169    }
3170  }
3171}
3172
3173// This is really inefficient, and should be redone by
3174// using (not yet available) block-read and -write interfaces to the
3175// stack and the work_queue. XXX FIX ME !!!
3176bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3177                                                      OopTaskQueue* work_q) {
3178  // Fast lock-free check
3179  if (ovflw_stk->length() == 0) {
3180    return false;
3181  }
3182  assert(work_q->size() == 0, "Shouldn't steal");
3183  MutexLockerEx ml(ovflw_stk->par_lock(),
3184                   Mutex::_no_safepoint_check_flag);
3185  // Grab up to 1/4 the size of the work queue
3186  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3187                    (size_t)ParGCDesiredObjsFromOverflowList);
3188  num = MIN2(num, ovflw_stk->length());
3189  for (int i = (int) num; i > 0; i--) {
3190    oop cur = ovflw_stk->pop();
3191    assert(cur != NULL, "Counted wrong?");
3192    work_q->push(cur);
3193  }
3194  return num > 0;
3195}
3196
3197void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3198  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3199  int n_tasks = pst->n_tasks();
3200  // We allow that there may be no tasks to do here because
3201  // we are restarting after a stack overflow.
3202  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3203  uint nth_task = 0;
3204
3205  HeapWord* aligned_start = sp->bottom();
3206  if (sp->used_region().contains(_restart_addr)) {
3207    // Align down to a card boundary for the start of 0th task
3208    // for this space.
3209    aligned_start =
3210      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3211                                 CardTableModRefBS::card_size);
3212  }
3213
3214  size_t chunk_size = sp->marking_task_size();
3215  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3216    // Having claimed the nth task in this space,
3217    // compute the chunk that it corresponds to:
3218    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3219                               aligned_start + (nth_task+1)*chunk_size);
3220    // Try and bump the global finger via a CAS;
3221    // note that we need to do the global finger bump
3222    // _before_ taking the intersection below, because
3223    // the task corresponding to that region will be
3224    // deemed done even if the used_region() expands
3225    // because of allocation -- as it almost certainly will
3226    // during start-up while the threads yield in the
3227    // closure below.
3228    HeapWord* finger = span.end();
3229    bump_global_finger(finger);   // atomically
3230    // There are null tasks here corresponding to chunks
3231    // beyond the "top" address of the space.
3232    span = span.intersection(sp->used_region());
3233    if (!span.is_empty()) {  // Non-null task
3234      HeapWord* prev_obj;
3235      assert(!span.contains(_restart_addr) || nth_task == 0,
3236             "Inconsistency");
3237      if (nth_task == 0) {
3238        // For the 0th task, we'll not need to compute a block_start.
3239        if (span.contains(_restart_addr)) {
3240          // In the case of a restart because of stack overflow,
3241          // we might additionally skip a chunk prefix.
3242          prev_obj = _restart_addr;
3243        } else {
3244          prev_obj = span.start();
3245        }
3246      } else {
3247        // We want to skip the first object because
3248        // the protocol is to scan any object in its entirety
3249        // that _starts_ in this span; a fortiori, any
3250        // object starting in an earlier span is scanned
3251        // as part of an earlier claimed task.
3252        // Below we use the "careful" version of block_start
3253        // so we do not try to navigate uninitialized objects.
3254        prev_obj = sp->block_start_careful(span.start());
3255        // Below we use a variant of block_size that uses the
3256        // Printezis bits to avoid waiting for allocated
3257        // objects to become initialized/parsable.
3258        while (prev_obj < span.start()) {
3259          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3260          if (sz > 0) {
3261            prev_obj += sz;
3262          } else {
3263            // In this case we may end up doing a bit of redundant
3264            // scanning, but that appears unavoidable, short of
3265            // locking the free list locks; see bug 6324141.
3266            break;
3267          }
3268        }
3269      }
3270      if (prev_obj < span.end()) {
3271        MemRegion my_span = MemRegion(prev_obj, span.end());
3272        // Do the marking work within a non-empty span --
3273        // the last argument to the constructor indicates whether the
3274        // iteration should be incremental with periodic yields.
3275        ParMarkFromRootsClosure cl(this, _collector, my_span,
3276                                   &_collector->_markBitMap,
3277                                   work_queue(i),
3278                                   &_collector->_markStack);
3279        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3280      } // else nothing to do for this task
3281    }   // else nothing to do for this task
3282  }
3283  // We'd be tempted to assert here that since there are no
3284  // more tasks left to claim in this space, the global_finger
3285  // must exceed space->top() and a fortiori space->end(). However,
3286  // that would not quite be correct because the bumping of
3287  // global_finger occurs strictly after the claiming of a task,
3288  // so by the time we reach here the global finger may not yet
3289  // have been bumped up by the thread that claimed the last
3290  // task.
3291  pst->all_tasks_completed();
3292}
3293
3294class ParConcMarkingClosure: public MetadataAwareOopClosure {
3295 private:
3296  CMSCollector* _collector;
3297  CMSConcMarkingTask* _task;
3298  MemRegion     _span;
3299  CMSBitMap*    _bit_map;
3300  CMSMarkStack* _overflow_stack;
3301  OopTaskQueue* _work_queue;
3302 protected:
3303  DO_OOP_WORK_DEFN
3304 public:
3305  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3306                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3307    MetadataAwareOopClosure(collector->ref_processor()),
3308    _collector(collector),
3309    _task(task),
3310    _span(collector->_span),
3311    _work_queue(work_queue),
3312    _bit_map(bit_map),
3313    _overflow_stack(overflow_stack)
3314  { }
3315  virtual void do_oop(oop* p);
3316  virtual void do_oop(narrowOop* p);
3317
3318  void trim_queue(size_t max);
3319  void handle_stack_overflow(HeapWord* lost);
3320  void do_yield_check() {
3321    if (_task->should_yield()) {
3322      _task->yield();
3323    }
3324  }
3325};
3326
3327// Grey object scanning during work stealing phase --
3328// the salient assumption here is that any references
3329// that are in these stolen objects being scanned must
3330// already have been initialized (else they would not have
3331// been published), so we do not need to check for
3332// uninitialized objects before pushing here.
3333void ParConcMarkingClosure::do_oop(oop obj) {
3334  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3335  HeapWord* addr = (HeapWord*)obj;
3336  // Check if oop points into the CMS generation
3337  // and is not marked
3338  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3339    // a white object ...
3340    // If we manage to "claim" the object, by being the
3341    // first thread to mark it, then we push it on our
3342    // marking stack
3343    if (_bit_map->par_mark(addr)) {     // ... now grey
3344      // push on work queue (grey set)
3345      bool simulate_overflow = false;
3346      NOT_PRODUCT(
3347        if (CMSMarkStackOverflowALot &&
3348            _collector->simulate_overflow()) {
3349          // simulate a stack overflow
3350          simulate_overflow = true;
3351        }
3352      )
3353      if (simulate_overflow ||
3354          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3355        // stack overflow
3356        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3357        // We cannot assert that the overflow stack is full because
3358        // it may have been emptied since.
3359        assert(simulate_overflow ||
3360               _work_queue->size() == _work_queue->max_elems(),
3361              "Else push should have succeeded");
3362        handle_stack_overflow(addr);
3363      }
3364    } // Else, some other thread got there first
3365    do_yield_check();
3366  }
3367}
3368
3369void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3370void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3371
3372void ParConcMarkingClosure::trim_queue(size_t max) {
3373  while (_work_queue->size() > max) {
3374    oop new_oop;
3375    if (_work_queue->pop_local(new_oop)) {
3376      assert(new_oop->is_oop(), "Should be an oop");
3377      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3378      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3379      new_oop->oop_iterate(this);  // do_oop() above
3380      do_yield_check();
3381    }
3382  }
3383}
3384
3385// Upon stack overflow, we discard (part of) the stack,
3386// remembering the least address amongst those discarded
3387// in CMSCollector's _restart_address.
3388void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3389  // We need to do this under a mutex to prevent other
3390  // workers from interfering with the work done below.
3391  MutexLockerEx ml(_overflow_stack->par_lock(),
3392                   Mutex::_no_safepoint_check_flag);
3393  // Remember the least grey address discarded
3394  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3395  _collector->lower_restart_addr(ra);
3396  _overflow_stack->reset();  // discard stack contents
3397  _overflow_stack->expand(); // expand the stack if possible
3398}
3399
3400
3401void CMSConcMarkingTask::do_work_steal(int i) {
3402  OopTaskQueue* work_q = work_queue(i);
3403  oop obj_to_scan;
3404  CMSBitMap* bm = &(_collector->_markBitMap);
3405  CMSMarkStack* ovflw = &(_collector->_markStack);
3406  int* seed = _collector->hash_seed(i);
3407  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3408  while (true) {
3409    cl.trim_queue(0);
3410    assert(work_q->size() == 0, "Should have been emptied above");
3411    if (get_work_from_overflow_stack(ovflw, work_q)) {
3412      // Can't assert below because the work obtained from the
3413      // overflow stack may already have been stolen from us.
3414      // assert(work_q->size() > 0, "Work from overflow stack");
3415      continue;
3416    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3417      assert(obj_to_scan->is_oop(), "Should be an oop");
3418      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3419      obj_to_scan->oop_iterate(&cl);
3420    } else if (terminator()->offer_termination(&_term_term)) {
3421      assert(work_q->size() == 0, "Impossible!");
3422      break;
3423    } else if (yielding() || should_yield()) {
3424      yield();
3425    }
3426  }
3427}
3428
3429// This is run by the CMS (coordinator) thread.
3430void CMSConcMarkingTask::coordinator_yield() {
3431  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3432         "CMS thread should hold CMS token");
3433  // First give up the locks, then yield, then re-lock
3434  // We should probably use a constructor/destructor idiom to
3435  // do this unlock/lock or modify the MutexUnlocker class to
3436  // serve our purpose. XXX
3437  assert_lock_strong(_bit_map_lock);
3438  _bit_map_lock->unlock();
3439  ConcurrentMarkSweepThread::desynchronize(true);
3440  _collector->stopTimer();
3441  _collector->incrementYields();
3442
3443  // It is possible for whichever thread initiated the yield request
3444  // not to get a chance to wake up and take the bitmap lock between
3445  // this thread releasing it and reacquiring it. So, while the
3446  // should_yield() flag is on, let's sleep for a bit to give the
3447  // other thread a chance to wake up. The limit imposed on the number
3448  // of iterations is defensive, to avoid any unforseen circumstances
3449  // putting us into an infinite loop. Since it's always been this
3450  // (coordinator_yield()) method that was observed to cause the
3451  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3452  // which is by default non-zero. For the other seven methods that
3453  // also perform the yield operation, as are using a different
3454  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3455  // can enable the sleeping for those methods too, if necessary.
3456  // See 6442774.
3457  //
3458  // We really need to reconsider the synchronization between the GC
3459  // thread and the yield-requesting threads in the future and we
3460  // should really use wait/notify, which is the recommended
3461  // way of doing this type of interaction. Additionally, we should
3462  // consolidate the eight methods that do the yield operation and they
3463  // are almost identical into one for better maintainability and
3464  // readability. See 6445193.
3465  //
3466  // Tony 2006.06.29
3467  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3468                   ConcurrentMarkSweepThread::should_yield() &&
3469                   !CMSCollector::foregroundGCIsActive(); ++i) {
3470    os::sleep(Thread::current(), 1, false);
3471  }
3472
3473  ConcurrentMarkSweepThread::synchronize(true);
3474  _bit_map_lock->lock_without_safepoint_check();
3475  _collector->startTimer();
3476}
3477
3478bool CMSCollector::do_marking_mt() {
3479  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3480  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3481                                                                  conc_workers()->active_workers(),
3482                                                                  Threads::number_of_non_daemon_threads());
3483  conc_workers()->set_active_workers(num_workers);
3484
3485  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3486
3487  CMSConcMarkingTask tsk(this,
3488                         cms_space,
3489                         conc_workers(),
3490                         task_queues());
3491
3492  // Since the actual number of workers we get may be different
3493  // from the number we requested above, do we need to do anything different
3494  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3495  // class?? XXX
3496  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3497
3498  // Refs discovery is already non-atomic.
3499  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3500  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3501  conc_workers()->start_task(&tsk);
3502  while (tsk.yielded()) {
3503    tsk.coordinator_yield();
3504    conc_workers()->continue_task(&tsk);
3505  }
3506  // If the task was aborted, _restart_addr will be non-NULL
3507  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3508  while (_restart_addr != NULL) {
3509    // XXX For now we do not make use of ABORTED state and have not
3510    // yet implemented the right abort semantics (even in the original
3511    // single-threaded CMS case). That needs some more investigation
3512    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3513    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3514    // If _restart_addr is non-NULL, a marking stack overflow
3515    // occurred; we need to do a fresh marking iteration from the
3516    // indicated restart address.
3517    if (_foregroundGCIsActive) {
3518      // We may be running into repeated stack overflows, having
3519      // reached the limit of the stack size, while making very
3520      // slow forward progress. It may be best to bail out and
3521      // let the foreground collector do its job.
3522      // Clear _restart_addr, so that foreground GC
3523      // works from scratch. This avoids the headache of
3524      // a "rescan" which would otherwise be needed because
3525      // of the dirty mod union table & card table.
3526      _restart_addr = NULL;
3527      return false;
3528    }
3529    // Adjust the task to restart from _restart_addr
3530    tsk.reset(_restart_addr);
3531    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3532                  _restart_addr);
3533    _restart_addr = NULL;
3534    // Get the workers going again
3535    conc_workers()->start_task(&tsk);
3536    while (tsk.yielded()) {
3537      tsk.coordinator_yield();
3538      conc_workers()->continue_task(&tsk);
3539    }
3540  }
3541  assert(tsk.completed(), "Inconsistency");
3542  assert(tsk.result() == true, "Inconsistency");
3543  return true;
3544}
3545
3546bool CMSCollector::do_marking_st() {
3547  ResourceMark rm;
3548  HandleMark   hm;
3549
3550  // Temporarily make refs discovery single threaded (non-MT)
3551  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3552  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3553    &_markStack, CMSYield);
3554  // the last argument to iterate indicates whether the iteration
3555  // should be incremental with periodic yields.
3556  _markBitMap.iterate(&markFromRootsClosure);
3557  // If _restart_addr is non-NULL, a marking stack overflow
3558  // occurred; we need to do a fresh iteration from the
3559  // indicated restart address.
3560  while (_restart_addr != NULL) {
3561    if (_foregroundGCIsActive) {
3562      // We may be running into repeated stack overflows, having
3563      // reached the limit of the stack size, while making very
3564      // slow forward progress. It may be best to bail out and
3565      // let the foreground collector do its job.
3566      // Clear _restart_addr, so that foreground GC
3567      // works from scratch. This avoids the headache of
3568      // a "rescan" which would otherwise be needed because
3569      // of the dirty mod union table & card table.
3570      _restart_addr = NULL;
3571      return false;  // indicating failure to complete marking
3572    }
3573    // Deal with stack overflow:
3574    // we restart marking from _restart_addr
3575    HeapWord* ra = _restart_addr;
3576    markFromRootsClosure.reset(ra);
3577    _restart_addr = NULL;
3578    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3579  }
3580  return true;
3581}
3582
3583void CMSCollector::preclean() {
3584  check_correct_thread_executing();
3585  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3586  verify_work_stacks_empty();
3587  verify_overflow_empty();
3588  _abort_preclean = false;
3589  if (CMSPrecleaningEnabled) {
3590    if (!CMSEdenChunksRecordAlways) {
3591      _eden_chunk_index = 0;
3592    }
3593    size_t used = get_eden_used();
3594    size_t capacity = get_eden_capacity();
3595    // Don't start sampling unless we will get sufficiently
3596    // many samples.
3597    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3598                * CMSScheduleRemarkEdenPenetration)) {
3599      _start_sampling = true;
3600    } else {
3601      _start_sampling = false;
3602    }
3603    GCTraceCPUTime tcpu;
3604    CMSPhaseAccounting pa(this, "Concurrent Preclean");
3605    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3606  }
3607  CMSTokenSync x(true); // is cms thread
3608  if (CMSPrecleaningEnabled) {
3609    sample_eden();
3610    _collectorState = AbortablePreclean;
3611  } else {
3612    _collectorState = FinalMarking;
3613  }
3614  verify_work_stacks_empty();
3615  verify_overflow_empty();
3616}
3617
3618// Try and schedule the remark such that young gen
3619// occupancy is CMSScheduleRemarkEdenPenetration %.
3620void CMSCollector::abortable_preclean() {
3621  check_correct_thread_executing();
3622  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3623  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3624
3625  // If Eden's current occupancy is below this threshold,
3626  // immediately schedule the remark; else preclean
3627  // past the next scavenge in an effort to
3628  // schedule the pause as described above. By choosing
3629  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3630  // we will never do an actual abortable preclean cycle.
3631  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3632    GCTraceCPUTime tcpu;
3633    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3634    // We need more smarts in the abortable preclean
3635    // loop below to deal with cases where allocation
3636    // in young gen is very very slow, and our precleaning
3637    // is running a losing race against a horde of
3638    // mutators intent on flooding us with CMS updates
3639    // (dirty cards).
3640    // One, admittedly dumb, strategy is to give up
3641    // after a certain number of abortable precleaning loops
3642    // or after a certain maximum time. We want to make
3643    // this smarter in the next iteration.
3644    // XXX FIX ME!!! YSR
3645    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3646    while (!(should_abort_preclean() ||
3647             ConcurrentMarkSweepThread::should_terminate())) {
3648      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3649      cumworkdone += workdone;
3650      loops++;
3651      // Voluntarily terminate abortable preclean phase if we have
3652      // been at it for too long.
3653      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3654          loops >= CMSMaxAbortablePrecleanLoops) {
3655        log_debug(gc)(" CMS: abort preclean due to loops ");
3656        break;
3657      }
3658      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3659        log_debug(gc)(" CMS: abort preclean due to time ");
3660        break;
3661      }
3662      // If we are doing little work each iteration, we should
3663      // take a short break.
3664      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3665        // Sleep for some time, waiting for work to accumulate
3666        stopTimer();
3667        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3668        startTimer();
3669        waited++;
3670      }
3671    }
3672    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3673                               loops, waited, cumworkdone);
3674  }
3675  CMSTokenSync x(true); // is cms thread
3676  if (_collectorState != Idling) {
3677    assert(_collectorState == AbortablePreclean,
3678           "Spontaneous state transition?");
3679    _collectorState = FinalMarking;
3680  } // Else, a foreground collection completed this CMS cycle.
3681  return;
3682}
3683
3684// Respond to an Eden sampling opportunity
3685void CMSCollector::sample_eden() {
3686  // Make sure a young gc cannot sneak in between our
3687  // reading and recording of a sample.
3688  assert(Thread::current()->is_ConcurrentGC_thread(),
3689         "Only the cms thread may collect Eden samples");
3690  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3691         "Should collect samples while holding CMS token");
3692  if (!_start_sampling) {
3693    return;
3694  }
3695  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3696  // is populated by the young generation.
3697  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3698    if (_eden_chunk_index < _eden_chunk_capacity) {
3699      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3700      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3701             "Unexpected state of Eden");
3702      // We'd like to check that what we just sampled is an oop-start address;
3703      // however, we cannot do that here since the object may not yet have been
3704      // initialized. So we'll instead do the check when we _use_ this sample
3705      // later.
3706      if (_eden_chunk_index == 0 ||
3707          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3708                         _eden_chunk_array[_eden_chunk_index-1])
3709           >= CMSSamplingGrain)) {
3710        _eden_chunk_index++;  // commit sample
3711      }
3712    }
3713  }
3714  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3715    size_t used = get_eden_used();
3716    size_t capacity = get_eden_capacity();
3717    assert(used <= capacity, "Unexpected state of Eden");
3718    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3719      _abort_preclean = true;
3720    }
3721  }
3722}
3723
3724
3725size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3726  assert(_collectorState == Precleaning ||
3727         _collectorState == AbortablePreclean, "incorrect state");
3728  ResourceMark rm;
3729  HandleMark   hm;
3730
3731  // Precleaning is currently not MT but the reference processor
3732  // may be set for MT.  Disable it temporarily here.
3733  ReferenceProcessor* rp = ref_processor();
3734  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3735
3736  // Do one pass of scrubbing the discovered reference lists
3737  // to remove any reference objects with strongly-reachable
3738  // referents.
3739  if (clean_refs) {
3740    CMSPrecleanRefsYieldClosure yield_cl(this);
3741    assert(rp->span().equals(_span), "Spans should be equal");
3742    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3743                                   &_markStack, true /* preclean */);
3744    CMSDrainMarkingStackClosure complete_trace(this,
3745                                   _span, &_markBitMap, &_markStack,
3746                                   &keep_alive, true /* preclean */);
3747
3748    // We don't want this step to interfere with a young
3749    // collection because we don't want to take CPU
3750    // or memory bandwidth away from the young GC threads
3751    // (which may be as many as there are CPUs).
3752    // Note that we don't need to protect ourselves from
3753    // interference with mutators because they can't
3754    // manipulate the discovered reference lists nor affect
3755    // the computed reachability of the referents, the
3756    // only properties manipulated by the precleaning
3757    // of these reference lists.
3758    stopTimer();
3759    CMSTokenSyncWithLocks x(true /* is cms thread */,
3760                            bitMapLock());
3761    startTimer();
3762    sample_eden();
3763
3764    // The following will yield to allow foreground
3765    // collection to proceed promptly. XXX YSR:
3766    // The code in this method may need further
3767    // tweaking for better performance and some restructuring
3768    // for cleaner interfaces.
3769    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3770    rp->preclean_discovered_references(
3771          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3772          gc_timer);
3773  }
3774
3775  if (clean_survivor) {  // preclean the active survivor space(s)
3776    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3777                             &_markBitMap, &_modUnionTable,
3778                             &_markStack, true /* precleaning phase */);
3779    stopTimer();
3780    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3781                             bitMapLock());
3782    startTimer();
3783    unsigned int before_count =
3784      GenCollectedHeap::heap()->total_collections();
3785    SurvivorSpacePrecleanClosure
3786      sss_cl(this, _span, &_markBitMap, &_markStack,
3787             &pam_cl, before_count, CMSYield);
3788    _young_gen->from()->object_iterate_careful(&sss_cl);
3789    _young_gen->to()->object_iterate_careful(&sss_cl);
3790  }
3791  MarkRefsIntoAndScanClosure
3792    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3793             &_markStack, this, CMSYield,
3794             true /* precleaning phase */);
3795  // CAUTION: The following closure has persistent state that may need to
3796  // be reset upon a decrease in the sequence of addresses it
3797  // processes.
3798  ScanMarkedObjectsAgainCarefullyClosure
3799    smoac_cl(this, _span,
3800      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3801
3802  // Preclean dirty cards in ModUnionTable and CardTable using
3803  // appropriate convergence criterion;
3804  // repeat CMSPrecleanIter times unless we find that
3805  // we are losing.
3806  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3807  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3808         "Bad convergence multiplier");
3809  assert(CMSPrecleanThreshold >= 100,
3810         "Unreasonably low CMSPrecleanThreshold");
3811
3812  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3813  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3814       numIter < CMSPrecleanIter;
3815       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3816    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3817    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3818    // Either there are very few dirty cards, so re-mark
3819    // pause will be small anyway, or our pre-cleaning isn't
3820    // that much faster than the rate at which cards are being
3821    // dirtied, so we might as well stop and re-mark since
3822    // precleaning won't improve our re-mark time by much.
3823    if (curNumCards <= CMSPrecleanThreshold ||
3824        (numIter > 0 &&
3825         (curNumCards * CMSPrecleanDenominator >
3826         lastNumCards * CMSPrecleanNumerator))) {
3827      numIter++;
3828      cumNumCards += curNumCards;
3829      break;
3830    }
3831  }
3832
3833  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3834
3835  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3836  cumNumCards += curNumCards;
3837  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3838                             curNumCards, cumNumCards, numIter);
3839  return cumNumCards;   // as a measure of useful work done
3840}
3841
3842// PRECLEANING NOTES:
3843// Precleaning involves:
3844// . reading the bits of the modUnionTable and clearing the set bits.
3845// . For the cards corresponding to the set bits, we scan the
3846//   objects on those cards. This means we need the free_list_lock
3847//   so that we can safely iterate over the CMS space when scanning
3848//   for oops.
3849// . When we scan the objects, we'll be both reading and setting
3850//   marks in the marking bit map, so we'll need the marking bit map.
3851// . For protecting _collector_state transitions, we take the CGC_lock.
3852//   Note that any races in the reading of of card table entries by the
3853//   CMS thread on the one hand and the clearing of those entries by the
3854//   VM thread or the setting of those entries by the mutator threads on the
3855//   other are quite benign. However, for efficiency it makes sense to keep
3856//   the VM thread from racing with the CMS thread while the latter is
3857//   dirty card info to the modUnionTable. We therefore also use the
3858//   CGC_lock to protect the reading of the card table and the mod union
3859//   table by the CM thread.
3860// . We run concurrently with mutator updates, so scanning
3861//   needs to be done carefully  -- we should not try to scan
3862//   potentially uninitialized objects.
3863//
3864// Locking strategy: While holding the CGC_lock, we scan over and
3865// reset a maximal dirty range of the mod union / card tables, then lock
3866// the free_list_lock and bitmap lock to do a full marking, then
3867// release these locks; and repeat the cycle. This allows for a
3868// certain amount of fairness in the sharing of these locks between
3869// the CMS collector on the one hand, and the VM thread and the
3870// mutators on the other.
3871
3872// NOTE: preclean_mod_union_table() and preclean_card_table()
3873// further below are largely identical; if you need to modify
3874// one of these methods, please check the other method too.
3875
3876size_t CMSCollector::preclean_mod_union_table(
3877  ConcurrentMarkSweepGeneration* old_gen,
3878  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3879  verify_work_stacks_empty();
3880  verify_overflow_empty();
3881
3882  // strategy: starting with the first card, accumulate contiguous
3883  // ranges of dirty cards; clear these cards, then scan the region
3884  // covered by these cards.
3885
3886  // Since all of the MUT is committed ahead, we can just use
3887  // that, in case the generations expand while we are precleaning.
3888  // It might also be fine to just use the committed part of the
3889  // generation, but we might potentially miss cards when the
3890  // generation is rapidly expanding while we are in the midst
3891  // of precleaning.
3892  HeapWord* startAddr = old_gen->reserved().start();
3893  HeapWord* endAddr   = old_gen->reserved().end();
3894
3895  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3896
3897  size_t numDirtyCards, cumNumDirtyCards;
3898  HeapWord *nextAddr, *lastAddr;
3899  for (cumNumDirtyCards = numDirtyCards = 0,
3900       nextAddr = lastAddr = startAddr;
3901       nextAddr < endAddr;
3902       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3903
3904    ResourceMark rm;
3905    HandleMark   hm;
3906
3907    MemRegion dirtyRegion;
3908    {
3909      stopTimer();
3910      // Potential yield point
3911      CMSTokenSync ts(true);
3912      startTimer();
3913      sample_eden();
3914      // Get dirty region starting at nextOffset (inclusive),
3915      // simultaneously clearing it.
3916      dirtyRegion =
3917        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3918      assert(dirtyRegion.start() >= nextAddr,
3919             "returned region inconsistent?");
3920    }
3921    // Remember where the next search should begin.
3922    // The returned region (if non-empty) is a right open interval,
3923    // so lastOffset is obtained from the right end of that
3924    // interval.
3925    lastAddr = dirtyRegion.end();
3926    // Should do something more transparent and less hacky XXX
3927    numDirtyCards =
3928      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3929
3930    // We'll scan the cards in the dirty region (with periodic
3931    // yields for foreground GC as needed).
3932    if (!dirtyRegion.is_empty()) {
3933      assert(numDirtyCards > 0, "consistency check");
3934      HeapWord* stop_point = NULL;
3935      stopTimer();
3936      // Potential yield point
3937      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3938                               bitMapLock());
3939      startTimer();
3940      {
3941        verify_work_stacks_empty();
3942        verify_overflow_empty();
3943        sample_eden();
3944        stop_point =
3945          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3946      }
3947      if (stop_point != NULL) {
3948        // The careful iteration stopped early either because it found an
3949        // uninitialized object, or because we were in the midst of an
3950        // "abortable preclean", which should now be aborted. Redirty
3951        // the bits corresponding to the partially-scanned or unscanned
3952        // cards. We'll either restart at the next block boundary or
3953        // abort the preclean.
3954        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3955               "Should only be AbortablePreclean.");
3956        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3957        if (should_abort_preclean()) {
3958          break; // out of preclean loop
3959        } else {
3960          // Compute the next address at which preclean should pick up;
3961          // might need bitMapLock in order to read P-bits.
3962          lastAddr = next_card_start_after_block(stop_point);
3963        }
3964      }
3965    } else {
3966      assert(lastAddr == endAddr, "consistency check");
3967      assert(numDirtyCards == 0, "consistency check");
3968      break;
3969    }
3970  }
3971  verify_work_stacks_empty();
3972  verify_overflow_empty();
3973  return cumNumDirtyCards;
3974}
3975
3976// NOTE: preclean_mod_union_table() above and preclean_card_table()
3977// below are largely identical; if you need to modify
3978// one of these methods, please check the other method too.
3979
3980size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
3981  ScanMarkedObjectsAgainCarefullyClosure* cl) {
3982  // strategy: it's similar to precleamModUnionTable above, in that
3983  // we accumulate contiguous ranges of dirty cards, mark these cards
3984  // precleaned, then scan the region covered by these cards.
3985  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
3986  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
3987
3988  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3989
3990  size_t numDirtyCards, cumNumDirtyCards;
3991  HeapWord *lastAddr, *nextAddr;
3992
3993  for (cumNumDirtyCards = numDirtyCards = 0,
3994       nextAddr = lastAddr = startAddr;
3995       nextAddr < endAddr;
3996       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3997
3998    ResourceMark rm;
3999    HandleMark   hm;
4000
4001    MemRegion dirtyRegion;
4002    {
4003      // See comments in "Precleaning notes" above on why we
4004      // do this locking. XXX Could the locking overheads be
4005      // too high when dirty cards are sparse? [I don't think so.]
4006      stopTimer();
4007      CMSTokenSync x(true); // is cms thread
4008      startTimer();
4009      sample_eden();
4010      // Get and clear dirty region from card table
4011      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4012                                    MemRegion(nextAddr, endAddr),
4013                                    true,
4014                                    CardTableModRefBS::precleaned_card_val());
4015
4016      assert(dirtyRegion.start() >= nextAddr,
4017             "returned region inconsistent?");
4018    }
4019    lastAddr = dirtyRegion.end();
4020    numDirtyCards =
4021      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4022
4023    if (!dirtyRegion.is_empty()) {
4024      stopTimer();
4025      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4026      startTimer();
4027      sample_eden();
4028      verify_work_stacks_empty();
4029      verify_overflow_empty();
4030      HeapWord* stop_point =
4031        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4032      if (stop_point != NULL) {
4033        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4034               "Should only be AbortablePreclean.");
4035        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4036        if (should_abort_preclean()) {
4037          break; // out of preclean loop
4038        } else {
4039          // Compute the next address at which preclean should pick up.
4040          lastAddr = next_card_start_after_block(stop_point);
4041        }
4042      }
4043    } else {
4044      break;
4045    }
4046  }
4047  verify_work_stacks_empty();
4048  verify_overflow_empty();
4049  return cumNumDirtyCards;
4050}
4051
4052class PrecleanKlassClosure : public KlassClosure {
4053  KlassToOopClosure _cm_klass_closure;
4054 public:
4055  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4056  void do_klass(Klass* k) {
4057    if (k->has_accumulated_modified_oops()) {
4058      k->clear_accumulated_modified_oops();
4059
4060      _cm_klass_closure.do_klass(k);
4061    }
4062  }
4063};
4064
4065// The freelist lock is needed to prevent asserts, is it really needed?
4066void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4067
4068  cl->set_freelistLock(freelistLock);
4069
4070  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4071
4072  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4073  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4074  PrecleanKlassClosure preclean_klass_closure(cl);
4075  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4076
4077  verify_work_stacks_empty();
4078  verify_overflow_empty();
4079}
4080
4081void CMSCollector::checkpointRootsFinal() {
4082  assert(_collectorState == FinalMarking, "incorrect state transition?");
4083  check_correct_thread_executing();
4084  // world is stopped at this checkpoint
4085  assert(SafepointSynchronize::is_at_safepoint(),
4086         "world should be stopped");
4087  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4088
4089  verify_work_stacks_empty();
4090  verify_overflow_empty();
4091
4092  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4093                _young_gen->used() / K, _young_gen->capacity() / K);
4094  {
4095    if (CMSScavengeBeforeRemark) {
4096      GenCollectedHeap* gch = GenCollectedHeap::heap();
4097      // Temporarily set flag to false, GCH->do_collection will
4098      // expect it to be false and set to true
4099      FlagSetting fl(gch->_is_gc_active, false);
4100
4101      GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
4102
4103      gch->do_collection(true,                      // full (i.e. force, see below)
4104                         false,                     // !clear_all_soft_refs
4105                         0,                         // size
4106                         false,                     // is_tlab
4107                         GenCollectedHeap::YoungGen // type
4108        );
4109    }
4110    FreelistLocker x(this);
4111    MutexLockerEx y(bitMapLock(),
4112                    Mutex::_no_safepoint_check_flag);
4113    checkpointRootsFinalWork();
4114  }
4115  verify_work_stacks_empty();
4116  verify_overflow_empty();
4117}
4118
4119void CMSCollector::checkpointRootsFinalWork() {
4120  GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
4121
4122  assert(haveFreelistLocks(), "must have free list locks");
4123  assert_lock_strong(bitMapLock());
4124
4125  ResourceMark rm;
4126  HandleMark   hm;
4127
4128  GenCollectedHeap* gch = GenCollectedHeap::heap();
4129
4130  if (should_unload_classes()) {
4131    CodeCache::gc_prologue();
4132  }
4133  assert(haveFreelistLocks(), "must have free list locks");
4134  assert_lock_strong(bitMapLock());
4135
4136  // We might assume that we need not fill TLAB's when
4137  // CMSScavengeBeforeRemark is set, because we may have just done
4138  // a scavenge which would have filled all TLAB's -- and besides
4139  // Eden would be empty. This however may not always be the case --
4140  // for instance although we asked for a scavenge, it may not have
4141  // happened because of a JNI critical section. We probably need
4142  // a policy for deciding whether we can in that case wait until
4143  // the critical section releases and then do the remark following
4144  // the scavenge, and skip it here. In the absence of that policy,
4145  // or of an indication of whether the scavenge did indeed occur,
4146  // we cannot rely on TLAB's having been filled and must do
4147  // so here just in case a scavenge did not happen.
4148  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4149  // Update the saved marks which may affect the root scans.
4150  gch->save_marks();
4151
4152  print_eden_and_survivor_chunk_arrays();
4153
4154  {
4155#if defined(COMPILER2) || INCLUDE_JVMCI
4156    DerivedPointerTableDeactivate dpt_deact;
4157#endif
4158
4159    // Note on the role of the mod union table:
4160    // Since the marker in "markFromRoots" marks concurrently with
4161    // mutators, it is possible for some reachable objects not to have been
4162    // scanned. For instance, an only reference to an object A was
4163    // placed in object B after the marker scanned B. Unless B is rescanned,
4164    // A would be collected. Such updates to references in marked objects
4165    // are detected via the mod union table which is the set of all cards
4166    // dirtied since the first checkpoint in this GC cycle and prior to
4167    // the most recent young generation GC, minus those cleaned up by the
4168    // concurrent precleaning.
4169    if (CMSParallelRemarkEnabled) {
4170      GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
4171      do_remark_parallel();
4172    } else {
4173      GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
4174      do_remark_non_parallel();
4175    }
4176  }
4177  verify_work_stacks_empty();
4178  verify_overflow_empty();
4179
4180  {
4181    GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
4182    refProcessingWork();
4183  }
4184  verify_work_stacks_empty();
4185  verify_overflow_empty();
4186
4187  if (should_unload_classes()) {
4188    CodeCache::gc_epilogue();
4189  }
4190  JvmtiExport::gc_epilogue();
4191
4192  // If we encountered any (marking stack / work queue) overflow
4193  // events during the current CMS cycle, take appropriate
4194  // remedial measures, where possible, so as to try and avoid
4195  // recurrence of that condition.
4196  assert(_markStack.isEmpty(), "No grey objects");
4197  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4198                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4199  if (ser_ovflw > 0) {
4200    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4201                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4202    _markStack.expand();
4203    _ser_pmc_remark_ovflw = 0;
4204    _ser_pmc_preclean_ovflw = 0;
4205    _ser_kac_preclean_ovflw = 0;
4206    _ser_kac_ovflw = 0;
4207  }
4208  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4209     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4210                          _par_pmc_remark_ovflw, _par_kac_ovflw);
4211     _par_pmc_remark_ovflw = 0;
4212    _par_kac_ovflw = 0;
4213  }
4214   if (_markStack._hit_limit > 0) {
4215     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4216                          _markStack._hit_limit);
4217   }
4218   if (_markStack._failed_double > 0) {
4219     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4220                          _markStack._failed_double, _markStack.capacity());
4221   }
4222  _markStack._hit_limit = 0;
4223  _markStack._failed_double = 0;
4224
4225  if ((VerifyAfterGC || VerifyDuringGC) &&
4226      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4227    verify_after_remark();
4228  }
4229
4230  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4231
4232  // Change under the freelistLocks.
4233  _collectorState = Sweeping;
4234  // Call isAllClear() under bitMapLock
4235  assert(_modUnionTable.isAllClear(),
4236      "Should be clear by end of the final marking");
4237  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4238      "Should be clear by end of the final marking");
4239}
4240
4241void CMSParInitialMarkTask::work(uint worker_id) {
4242  elapsedTimer _timer;
4243  ResourceMark rm;
4244  HandleMark   hm;
4245
4246  // ---------- scan from roots --------------
4247  _timer.start();
4248  GenCollectedHeap* gch = GenCollectedHeap::heap();
4249  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4250
4251  // ---------- young gen roots --------------
4252  {
4253    work_on_young_gen_roots(worker_id, &par_mri_cl);
4254    _timer.stop();
4255    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4256  }
4257
4258  // ---------- remaining roots --------------
4259  _timer.reset();
4260  _timer.start();
4261
4262  CLDToOopClosure cld_closure(&par_mri_cl, true);
4263
4264  gch->gen_process_roots(_strong_roots_scope,
4265                         GenCollectedHeap::OldGen,
4266                         false,     // yg was scanned above
4267                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4268                         _collector->should_unload_classes(),
4269                         &par_mri_cl,
4270                         NULL,
4271                         &cld_closure);
4272  assert(_collector->should_unload_classes()
4273         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4274         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4275  _timer.stop();
4276  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4277}
4278
4279// Parallel remark task
4280class CMSParRemarkTask: public CMSParMarkTask {
4281  CompactibleFreeListSpace* _cms_space;
4282
4283  // The per-thread work queues, available here for stealing.
4284  OopTaskQueueSet*       _task_queues;
4285  ParallelTaskTerminator _term;
4286  StrongRootsScope*      _strong_roots_scope;
4287
4288 public:
4289  // A value of 0 passed to n_workers will cause the number of
4290  // workers to be taken from the active workers in the work gang.
4291  CMSParRemarkTask(CMSCollector* collector,
4292                   CompactibleFreeListSpace* cms_space,
4293                   uint n_workers, WorkGang* workers,
4294                   OopTaskQueueSet* task_queues,
4295                   StrongRootsScope* strong_roots_scope):
4296    CMSParMarkTask("Rescan roots and grey objects in parallel",
4297                   collector, n_workers),
4298    _cms_space(cms_space),
4299    _task_queues(task_queues),
4300    _term(n_workers, task_queues),
4301    _strong_roots_scope(strong_roots_scope) { }
4302
4303  OopTaskQueueSet* task_queues() { return _task_queues; }
4304
4305  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4306
4307  ParallelTaskTerminator* terminator() { return &_term; }
4308  uint n_workers() { return _n_workers; }
4309
4310  void work(uint worker_id);
4311
4312 private:
4313  // ... of  dirty cards in old space
4314  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4315                                  ParMarkRefsIntoAndScanClosure* cl);
4316
4317  // ... work stealing for the above
4318  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4319};
4320
4321class RemarkKlassClosure : public KlassClosure {
4322  KlassToOopClosure _cm_klass_closure;
4323 public:
4324  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4325  void do_klass(Klass* k) {
4326    // Check if we have modified any oops in the Klass during the concurrent marking.
4327    if (k->has_accumulated_modified_oops()) {
4328      k->clear_accumulated_modified_oops();
4329
4330      // We could have transfered the current modified marks to the accumulated marks,
4331      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4332    } else if (k->has_modified_oops()) {
4333      // Don't clear anything, this info is needed by the next young collection.
4334    } else {
4335      // No modified oops in the Klass.
4336      return;
4337    }
4338
4339    // The klass has modified fields, need to scan the klass.
4340    _cm_klass_closure.do_klass(k);
4341  }
4342};
4343
4344void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4345  ParNewGeneration* young_gen = _collector->_young_gen;
4346  ContiguousSpace* eden_space = young_gen->eden();
4347  ContiguousSpace* from_space = young_gen->from();
4348  ContiguousSpace* to_space   = young_gen->to();
4349
4350  HeapWord** eca = _collector->_eden_chunk_array;
4351  size_t     ect = _collector->_eden_chunk_index;
4352  HeapWord** sca = _collector->_survivor_chunk_array;
4353  size_t     sct = _collector->_survivor_chunk_index;
4354
4355  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4356  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4357
4358  do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4359  do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4360  do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4361}
4362
4363// work_queue(i) is passed to the closure
4364// ParMarkRefsIntoAndScanClosure.  The "i" parameter
4365// also is passed to do_dirty_card_rescan_tasks() and to
4366// do_work_steal() to select the i-th task_queue.
4367
4368void CMSParRemarkTask::work(uint worker_id) {
4369  elapsedTimer _timer;
4370  ResourceMark rm;
4371  HandleMark   hm;
4372
4373  // ---------- rescan from roots --------------
4374  _timer.start();
4375  GenCollectedHeap* gch = GenCollectedHeap::heap();
4376  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4377    _collector->_span, _collector->ref_processor(),
4378    &(_collector->_markBitMap),
4379    work_queue(worker_id));
4380
4381  // Rescan young gen roots first since these are likely
4382  // coarsely partitioned and may, on that account, constitute
4383  // the critical path; thus, it's best to start off that
4384  // work first.
4385  // ---------- young gen roots --------------
4386  {
4387    work_on_young_gen_roots(worker_id, &par_mrias_cl);
4388    _timer.stop();
4389    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4390  }
4391
4392  // ---------- remaining roots --------------
4393  _timer.reset();
4394  _timer.start();
4395  gch->gen_process_roots(_strong_roots_scope,
4396                         GenCollectedHeap::OldGen,
4397                         false,     // yg was scanned above
4398                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4399                         _collector->should_unload_classes(),
4400                         &par_mrias_cl,
4401                         NULL,
4402                         NULL);     // The dirty klasses will be handled below
4403
4404  assert(_collector->should_unload_classes()
4405         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4406         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4407  _timer.stop();
4408  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4409
4410  // ---------- unhandled CLD scanning ----------
4411  if (worker_id == 0) { // Single threaded at the moment.
4412    _timer.reset();
4413    _timer.start();
4414
4415    // Scan all new class loader data objects and new dependencies that were
4416    // introduced during concurrent marking.
4417    ResourceMark rm;
4418    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4419    for (int i = 0; i < array->length(); i++) {
4420      par_mrias_cl.do_cld_nv(array->at(i));
4421    }
4422
4423    // We don't need to keep track of new CLDs anymore.
4424    ClassLoaderDataGraph::remember_new_clds(false);
4425
4426    _timer.stop();
4427    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4428  }
4429
4430  // ---------- dirty klass scanning ----------
4431  if (worker_id == 0) { // Single threaded at the moment.
4432    _timer.reset();
4433    _timer.start();
4434
4435    // Scan all classes that was dirtied during the concurrent marking phase.
4436    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4437    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4438
4439    _timer.stop();
4440    log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4441  }
4442
4443  // We might have added oops to ClassLoaderData::_handles during the
4444  // concurrent marking phase. These oops point to newly allocated objects
4445  // that are guaranteed to be kept alive. Either by the direct allocation
4446  // code, or when the young collector processes the roots. Hence,
4447  // we don't have to revisit the _handles block during the remark phase.
4448
4449  // ---------- rescan dirty cards ------------
4450  _timer.reset();
4451  _timer.start();
4452
4453  // Do the rescan tasks for each of the two spaces
4454  // (cms_space) in turn.
4455  // "worker_id" is passed to select the task_queue for "worker_id"
4456  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4457  _timer.stop();
4458  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4459
4460  // ---------- steal work from other threads ...
4461  // ---------- ... and drain overflow list.
4462  _timer.reset();
4463  _timer.start();
4464  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4465  _timer.stop();
4466  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4467}
4468
4469// Note that parameter "i" is not used.
4470void
4471CMSParMarkTask::do_young_space_rescan(uint worker_id,
4472  OopsInGenClosure* cl, ContiguousSpace* space,
4473  HeapWord** chunk_array, size_t chunk_top) {
4474  // Until all tasks completed:
4475  // . claim an unclaimed task
4476  // . compute region boundaries corresponding to task claimed
4477  //   using chunk_array
4478  // . par_oop_iterate(cl) over that region
4479
4480  ResourceMark rm;
4481  HandleMark   hm;
4482
4483  SequentialSubTasksDone* pst = space->par_seq_tasks();
4484
4485  uint nth_task = 0;
4486  uint n_tasks  = pst->n_tasks();
4487
4488  if (n_tasks > 0) {
4489    assert(pst->valid(), "Uninitialized use?");
4490    HeapWord *start, *end;
4491    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4492      // We claimed task # nth_task; compute its boundaries.
4493      if (chunk_top == 0) {  // no samples were taken
4494        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4495        start = space->bottom();
4496        end   = space->top();
4497      } else if (nth_task == 0) {
4498        start = space->bottom();
4499        end   = chunk_array[nth_task];
4500      } else if (nth_task < (uint)chunk_top) {
4501        assert(nth_task >= 1, "Control point invariant");
4502        start = chunk_array[nth_task - 1];
4503        end   = chunk_array[nth_task];
4504      } else {
4505        assert(nth_task == (uint)chunk_top, "Control point invariant");
4506        start = chunk_array[chunk_top - 1];
4507        end   = space->top();
4508      }
4509      MemRegion mr(start, end);
4510      // Verify that mr is in space
4511      assert(mr.is_empty() || space->used_region().contains(mr),
4512             "Should be in space");
4513      // Verify that "start" is an object boundary
4514      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4515             "Should be an oop");
4516      space->par_oop_iterate(mr, cl);
4517    }
4518    pst->all_tasks_completed();
4519  }
4520}
4521
4522void
4523CMSParRemarkTask::do_dirty_card_rescan_tasks(
4524  CompactibleFreeListSpace* sp, int i,
4525  ParMarkRefsIntoAndScanClosure* cl) {
4526  // Until all tasks completed:
4527  // . claim an unclaimed task
4528  // . compute region boundaries corresponding to task claimed
4529  // . transfer dirty bits ct->mut for that region
4530  // . apply rescanclosure to dirty mut bits for that region
4531
4532  ResourceMark rm;
4533  HandleMark   hm;
4534
4535  OopTaskQueue* work_q = work_queue(i);
4536  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4537  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4538  // CAUTION: This closure has state that persists across calls to
4539  // the work method dirty_range_iterate_clear() in that it has
4540  // embedded in it a (subtype of) UpwardsObjectClosure. The
4541  // use of that state in the embedded UpwardsObjectClosure instance
4542  // assumes that the cards are always iterated (even if in parallel
4543  // by several threads) in monotonically increasing order per each
4544  // thread. This is true of the implementation below which picks
4545  // card ranges (chunks) in monotonically increasing order globally
4546  // and, a-fortiori, in monotonically increasing order per thread
4547  // (the latter order being a subsequence of the former).
4548  // If the work code below is ever reorganized into a more chaotic
4549  // work-partitioning form than the current "sequential tasks"
4550  // paradigm, the use of that persistent state will have to be
4551  // revisited and modified appropriately. See also related
4552  // bug 4756801 work on which should examine this code to make
4553  // sure that the changes there do not run counter to the
4554  // assumptions made here and necessary for correctness and
4555  // efficiency. Note also that this code might yield inefficient
4556  // behavior in the case of very large objects that span one or
4557  // more work chunks. Such objects would potentially be scanned
4558  // several times redundantly. Work on 4756801 should try and
4559  // address that performance anomaly if at all possible. XXX
4560  MemRegion  full_span  = _collector->_span;
4561  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4562  MarkFromDirtyCardsClosure
4563    greyRescanClosure(_collector, full_span, // entire span of interest
4564                      sp, bm, work_q, cl);
4565
4566  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4567  assert(pst->valid(), "Uninitialized use?");
4568  uint nth_task = 0;
4569  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4570  MemRegion span = sp->used_region();
4571  HeapWord* start_addr = span.start();
4572  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4573                                           alignment);
4574  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4575  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4576         start_addr, "Check alignment");
4577  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4578         chunk_size, "Check alignment");
4579
4580  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4581    // Having claimed the nth_task, compute corresponding mem-region,
4582    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4583    // The alignment restriction ensures that we do not need any
4584    // synchronization with other gang-workers while setting or
4585    // clearing bits in thus chunk of the MUT.
4586    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4587                                    start_addr + (nth_task+1)*chunk_size);
4588    // The last chunk's end might be way beyond end of the
4589    // used region. In that case pull back appropriately.
4590    if (this_span.end() > end_addr) {
4591      this_span.set_end(end_addr);
4592      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4593    }
4594    // Iterate over the dirty cards covering this chunk, marking them
4595    // precleaned, and setting the corresponding bits in the mod union
4596    // table. Since we have been careful to partition at Card and MUT-word
4597    // boundaries no synchronization is needed between parallel threads.
4598    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4599                                                 &modUnionClosure);
4600
4601    // Having transferred these marks into the modUnionTable,
4602    // rescan the marked objects on the dirty cards in the modUnionTable.
4603    // Even if this is at a synchronous collection, the initial marking
4604    // may have been done during an asynchronous collection so there
4605    // may be dirty bits in the mod-union table.
4606    _collector->_modUnionTable.dirty_range_iterate_clear(
4607                  this_span, &greyRescanClosure);
4608    _collector->_modUnionTable.verifyNoOneBitsInRange(
4609                                 this_span.start(),
4610                                 this_span.end());
4611  }
4612  pst->all_tasks_completed();  // declare that i am done
4613}
4614
4615// . see if we can share work_queues with ParNew? XXX
4616void
4617CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4618                                int* seed) {
4619  OopTaskQueue* work_q = work_queue(i);
4620  NOT_PRODUCT(int num_steals = 0;)
4621  oop obj_to_scan;
4622  CMSBitMap* bm = &(_collector->_markBitMap);
4623
4624  while (true) {
4625    // Completely finish any left over work from (an) earlier round(s)
4626    cl->trim_queue(0);
4627    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4628                                         (size_t)ParGCDesiredObjsFromOverflowList);
4629    // Now check if there's any work in the overflow list
4630    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4631    // only affects the number of attempts made to get work from the
4632    // overflow list and does not affect the number of workers.  Just
4633    // pass ParallelGCThreads so this behavior is unchanged.
4634    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4635                                                work_q,
4636                                                ParallelGCThreads)) {
4637      // found something in global overflow list;
4638      // not yet ready to go stealing work from others.
4639      // We'd like to assert(work_q->size() != 0, ...)
4640      // because we just took work from the overflow list,
4641      // but of course we can't since all of that could have
4642      // been already stolen from us.
4643      // "He giveth and He taketh away."
4644      continue;
4645    }
4646    // Verify that we have no work before we resort to stealing
4647    assert(work_q->size() == 0, "Have work, shouldn't steal");
4648    // Try to steal from other queues that have work
4649    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4650      NOT_PRODUCT(num_steals++;)
4651      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4652      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4653      // Do scanning work
4654      obj_to_scan->oop_iterate(cl);
4655      // Loop around, finish this work, and try to steal some more
4656    } else if (terminator()->offer_termination()) {
4657        break;  // nirvana from the infinite cycle
4658    }
4659  }
4660  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4661  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4662         "Else our work is not yet done");
4663}
4664
4665// Record object boundaries in _eden_chunk_array by sampling the eden
4666// top in the slow-path eden object allocation code path and record
4667// the boundaries, if CMSEdenChunksRecordAlways is true. If
4668// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4669// sampling in sample_eden() that activates during the part of the
4670// preclean phase.
4671void CMSCollector::sample_eden_chunk() {
4672  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4673    if (_eden_chunk_lock->try_lock()) {
4674      // Record a sample. This is the critical section. The contents
4675      // of the _eden_chunk_array have to be non-decreasing in the
4676      // address order.
4677      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4678      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4679             "Unexpected state of Eden");
4680      if (_eden_chunk_index == 0 ||
4681          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4682           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4683                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4684        _eden_chunk_index++;  // commit sample
4685      }
4686      _eden_chunk_lock->unlock();
4687    }
4688  }
4689}
4690
4691// Return a thread-local PLAB recording array, as appropriate.
4692void* CMSCollector::get_data_recorder(int thr_num) {
4693  if (_survivor_plab_array != NULL &&
4694      (CMSPLABRecordAlways ||
4695       (_collectorState > Marking && _collectorState < FinalMarking))) {
4696    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4697    ChunkArray* ca = &_survivor_plab_array[thr_num];
4698    ca->reset();   // clear it so that fresh data is recorded
4699    return (void*) ca;
4700  } else {
4701    return NULL;
4702  }
4703}
4704
4705// Reset all the thread-local PLAB recording arrays
4706void CMSCollector::reset_survivor_plab_arrays() {
4707  for (uint i = 0; i < ParallelGCThreads; i++) {
4708    _survivor_plab_array[i].reset();
4709  }
4710}
4711
4712// Merge the per-thread plab arrays into the global survivor chunk
4713// array which will provide the partitioning of the survivor space
4714// for CMS initial scan and rescan.
4715void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4716                                              int no_of_gc_threads) {
4717  assert(_survivor_plab_array  != NULL, "Error");
4718  assert(_survivor_chunk_array != NULL, "Error");
4719  assert(_collectorState == FinalMarking ||
4720         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4721  for (int j = 0; j < no_of_gc_threads; j++) {
4722    _cursor[j] = 0;
4723  }
4724  HeapWord* top = surv->top();
4725  size_t i;
4726  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4727    HeapWord* min_val = top;          // Higher than any PLAB address
4728    uint      min_tid = 0;            // position of min_val this round
4729    for (int j = 0; j < no_of_gc_threads; j++) {
4730      ChunkArray* cur_sca = &_survivor_plab_array[j];
4731      if (_cursor[j] == cur_sca->end()) {
4732        continue;
4733      }
4734      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4735      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4736      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4737      if (cur_val < min_val) {
4738        min_tid = j;
4739        min_val = cur_val;
4740      } else {
4741        assert(cur_val < top, "All recorded addresses should be less");
4742      }
4743    }
4744    // At this point min_val and min_tid are respectively
4745    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4746    // and the thread (j) that witnesses that address.
4747    // We record this address in the _survivor_chunk_array[i]
4748    // and increment _cursor[min_tid] prior to the next round i.
4749    if (min_val == top) {
4750      break;
4751    }
4752    _survivor_chunk_array[i] = min_val;
4753    _cursor[min_tid]++;
4754  }
4755  // We are all done; record the size of the _survivor_chunk_array
4756  _survivor_chunk_index = i; // exclusive: [0, i)
4757  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4758  // Verify that we used up all the recorded entries
4759  #ifdef ASSERT
4760    size_t total = 0;
4761    for (int j = 0; j < no_of_gc_threads; j++) {
4762      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4763      total += _cursor[j];
4764    }
4765    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4766    // Check that the merged array is in sorted order
4767    if (total > 0) {
4768      for (size_t i = 0; i < total - 1; i++) {
4769        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4770                                     i, p2i(_survivor_chunk_array[i]));
4771        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4772               "Not sorted");
4773      }
4774    }
4775  #endif // ASSERT
4776}
4777
4778// Set up the space's par_seq_tasks structure for work claiming
4779// for parallel initial scan and rescan of young gen.
4780// See ParRescanTask where this is currently used.
4781void
4782CMSCollector::
4783initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4784  assert(n_threads > 0, "Unexpected n_threads argument");
4785
4786  // Eden space
4787  if (!_young_gen->eden()->is_empty()) {
4788    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4789    assert(!pst->valid(), "Clobbering existing data?");
4790    // Each valid entry in [0, _eden_chunk_index) represents a task.
4791    size_t n_tasks = _eden_chunk_index + 1;
4792    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4793    // Sets the condition for completion of the subtask (how many threads
4794    // need to finish in order to be done).
4795    pst->set_n_threads(n_threads);
4796    pst->set_n_tasks((int)n_tasks);
4797  }
4798
4799  // Merge the survivor plab arrays into _survivor_chunk_array
4800  if (_survivor_plab_array != NULL) {
4801    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4802  } else {
4803    assert(_survivor_chunk_index == 0, "Error");
4804  }
4805
4806  // To space
4807  {
4808    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4809    assert(!pst->valid(), "Clobbering existing data?");
4810    // Sets the condition for completion of the subtask (how many threads
4811    // need to finish in order to be done).
4812    pst->set_n_threads(n_threads);
4813    pst->set_n_tasks(1);
4814    assert(pst->valid(), "Error");
4815  }
4816
4817  // From space
4818  {
4819    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4820    assert(!pst->valid(), "Clobbering existing data?");
4821    size_t n_tasks = _survivor_chunk_index + 1;
4822    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4823    // Sets the condition for completion of the subtask (how many threads
4824    // need to finish in order to be done).
4825    pst->set_n_threads(n_threads);
4826    pst->set_n_tasks((int)n_tasks);
4827    assert(pst->valid(), "Error");
4828  }
4829}
4830
4831// Parallel version of remark
4832void CMSCollector::do_remark_parallel() {
4833  GenCollectedHeap* gch = GenCollectedHeap::heap();
4834  WorkGang* workers = gch->workers();
4835  assert(workers != NULL, "Need parallel worker threads.");
4836  // Choose to use the number of GC workers most recently set
4837  // into "active_workers".
4838  uint n_workers = workers->active_workers();
4839
4840  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4841
4842  StrongRootsScope srs(n_workers);
4843
4844  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4845
4846  // We won't be iterating over the cards in the card table updating
4847  // the younger_gen cards, so we shouldn't call the following else
4848  // the verification code as well as subsequent younger_refs_iterate
4849  // code would get confused. XXX
4850  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4851
4852  // The young gen rescan work will not be done as part of
4853  // process_roots (which currently doesn't know how to
4854  // parallelize such a scan), but rather will be broken up into
4855  // a set of parallel tasks (via the sampling that the [abortable]
4856  // preclean phase did of eden, plus the [two] tasks of
4857  // scanning the [two] survivor spaces. Further fine-grain
4858  // parallelization of the scanning of the survivor spaces
4859  // themselves, and of precleaning of the young gen itself
4860  // is deferred to the future.
4861  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4862
4863  // The dirty card rescan work is broken up into a "sequence"
4864  // of parallel tasks (per constituent space) that are dynamically
4865  // claimed by the parallel threads.
4866  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4867
4868  // It turns out that even when we're using 1 thread, doing the work in a
4869  // separate thread causes wide variance in run times.  We can't help this
4870  // in the multi-threaded case, but we special-case n=1 here to get
4871  // repeatable measurements of the 1-thread overhead of the parallel code.
4872  if (n_workers > 1) {
4873    // Make refs discovery MT-safe, if it isn't already: it may not
4874    // necessarily be so, since it's possible that we are doing
4875    // ST marking.
4876    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4877    workers->run_task(&tsk);
4878  } else {
4879    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4880    tsk.work(0);
4881  }
4882
4883  // restore, single-threaded for now, any preserved marks
4884  // as a result of work_q overflow
4885  restore_preserved_marks_if_any();
4886}
4887
4888// Non-parallel version of remark
4889void CMSCollector::do_remark_non_parallel() {
4890  ResourceMark rm;
4891  HandleMark   hm;
4892  GenCollectedHeap* gch = GenCollectedHeap::heap();
4893  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4894
4895  MarkRefsIntoAndScanClosure
4896    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4897             &_markStack, this,
4898             false /* should_yield */, false /* not precleaning */);
4899  MarkFromDirtyCardsClosure
4900    markFromDirtyCardsClosure(this, _span,
4901                              NULL,  // space is set further below
4902                              &_markBitMap, &_markStack, &mrias_cl);
4903  {
4904    GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
4905    // Iterate over the dirty cards, setting the corresponding bits in the
4906    // mod union table.
4907    {
4908      ModUnionClosure modUnionClosure(&_modUnionTable);
4909      _ct->ct_bs()->dirty_card_iterate(
4910                      _cmsGen->used_region(),
4911                      &modUnionClosure);
4912    }
4913    // Having transferred these marks into the modUnionTable, we just need
4914    // to rescan the marked objects on the dirty cards in the modUnionTable.
4915    // The initial marking may have been done during an asynchronous
4916    // collection so there may be dirty bits in the mod-union table.
4917    const int alignment =
4918      CardTableModRefBS::card_size * BitsPerWord;
4919    {
4920      // ... First handle dirty cards in CMS gen
4921      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4922      MemRegion ur = _cmsGen->used_region();
4923      HeapWord* lb = ur.start();
4924      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4925      MemRegion cms_span(lb, ub);
4926      _modUnionTable.dirty_range_iterate_clear(cms_span,
4927                                               &markFromDirtyCardsClosure);
4928      verify_work_stacks_empty();
4929      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4930    }
4931  }
4932  if (VerifyDuringGC &&
4933      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4934    HandleMark hm;  // Discard invalid handles created during verification
4935    Universe::verify();
4936  }
4937  {
4938    GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
4939
4940    verify_work_stacks_empty();
4941
4942    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4943    StrongRootsScope srs(1);
4944
4945    gch->gen_process_roots(&srs,
4946                           GenCollectedHeap::OldGen,
4947                           true,  // young gen as roots
4948                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
4949                           should_unload_classes(),
4950                           &mrias_cl,
4951                           NULL,
4952                           NULL); // The dirty klasses will be handled below
4953
4954    assert(should_unload_classes()
4955           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4956           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4957  }
4958
4959  {
4960    GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
4961
4962    verify_work_stacks_empty();
4963
4964    // Scan all class loader data objects that might have been introduced
4965    // during concurrent marking.
4966    ResourceMark rm;
4967    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4968    for (int i = 0; i < array->length(); i++) {
4969      mrias_cl.do_cld_nv(array->at(i));
4970    }
4971
4972    // We don't need to keep track of new CLDs anymore.
4973    ClassLoaderDataGraph::remember_new_clds(false);
4974
4975    verify_work_stacks_empty();
4976  }
4977
4978  {
4979    GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
4980
4981    verify_work_stacks_empty();
4982
4983    RemarkKlassClosure remark_klass_closure(&mrias_cl);
4984    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4985
4986    verify_work_stacks_empty();
4987  }
4988
4989  // We might have added oops to ClassLoaderData::_handles during the
4990  // concurrent marking phase. These oops point to newly allocated objects
4991  // that are guaranteed to be kept alive. Either by the direct allocation
4992  // code, or when the young collector processes the roots. Hence,
4993  // we don't have to revisit the _handles block during the remark phase.
4994
4995  verify_work_stacks_empty();
4996  // Restore evacuated mark words, if any, used for overflow list links
4997  restore_preserved_marks_if_any();
4998
4999  verify_overflow_empty();
5000}
5001
5002////////////////////////////////////////////////////////
5003// Parallel Reference Processing Task Proxy Class
5004////////////////////////////////////////////////////////
5005class AbstractGangTaskWOopQueues : public AbstractGangTask {
5006  OopTaskQueueSet*       _queues;
5007  ParallelTaskTerminator _terminator;
5008 public:
5009  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5010    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5011  ParallelTaskTerminator* terminator() { return &_terminator; }
5012  OopTaskQueueSet* queues() { return _queues; }
5013};
5014
5015class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5016  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5017  CMSCollector*          _collector;
5018  CMSBitMap*             _mark_bit_map;
5019  const MemRegion        _span;
5020  ProcessTask&           _task;
5021
5022public:
5023  CMSRefProcTaskProxy(ProcessTask&     task,
5024                      CMSCollector*    collector,
5025                      const MemRegion& span,
5026                      CMSBitMap*       mark_bit_map,
5027                      AbstractWorkGang* workers,
5028                      OopTaskQueueSet* task_queues):
5029    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5030      task_queues,
5031      workers->active_workers()),
5032    _task(task),
5033    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5034  {
5035    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5036           "Inconsistency in _span");
5037  }
5038
5039  OopTaskQueueSet* task_queues() { return queues(); }
5040
5041  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5042
5043  void do_work_steal(int i,
5044                     CMSParDrainMarkingStackClosure* drain,
5045                     CMSParKeepAliveClosure* keep_alive,
5046                     int* seed);
5047
5048  virtual void work(uint worker_id);
5049};
5050
5051void CMSRefProcTaskProxy::work(uint worker_id) {
5052  ResourceMark rm;
5053  HandleMark hm;
5054  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5055  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5056                                        _mark_bit_map,
5057                                        work_queue(worker_id));
5058  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5059                                                 _mark_bit_map,
5060                                                 work_queue(worker_id));
5061  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5062  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5063  if (_task.marks_oops_alive()) {
5064    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5065                  _collector->hash_seed(worker_id));
5066  }
5067  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5068  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5069}
5070
5071class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5072  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5073  EnqueueTask& _task;
5074
5075public:
5076  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5077    : AbstractGangTask("Enqueue reference objects in parallel"),
5078      _task(task)
5079  { }
5080
5081  virtual void work(uint worker_id)
5082  {
5083    _task.work(worker_id);
5084  }
5085};
5086
5087CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5088  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5089   _span(span),
5090   _bit_map(bit_map),
5091   _work_queue(work_queue),
5092   _mark_and_push(collector, span, bit_map, work_queue),
5093   _low_water_mark(MIN2((work_queue->max_elems()/4),
5094                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5095{ }
5096
5097// . see if we can share work_queues with ParNew? XXX
5098void CMSRefProcTaskProxy::do_work_steal(int i,
5099  CMSParDrainMarkingStackClosure* drain,
5100  CMSParKeepAliveClosure* keep_alive,
5101  int* seed) {
5102  OopTaskQueue* work_q = work_queue(i);
5103  NOT_PRODUCT(int num_steals = 0;)
5104  oop obj_to_scan;
5105
5106  while (true) {
5107    // Completely finish any left over work from (an) earlier round(s)
5108    drain->trim_queue(0);
5109    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5110                                         (size_t)ParGCDesiredObjsFromOverflowList);
5111    // Now check if there's any work in the overflow list
5112    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5113    // only affects the number of attempts made to get work from the
5114    // overflow list and does not affect the number of workers.  Just
5115    // pass ParallelGCThreads so this behavior is unchanged.
5116    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5117                                                work_q,
5118                                                ParallelGCThreads)) {
5119      // Found something in global overflow list;
5120      // not yet ready to go stealing work from others.
5121      // We'd like to assert(work_q->size() != 0, ...)
5122      // because we just took work from the overflow list,
5123      // but of course we can't, since all of that might have
5124      // been already stolen from us.
5125      continue;
5126    }
5127    // Verify that we have no work before we resort to stealing
5128    assert(work_q->size() == 0, "Have work, shouldn't steal");
5129    // Try to steal from other queues that have work
5130    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5131      NOT_PRODUCT(num_steals++;)
5132      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5133      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5134      // Do scanning work
5135      obj_to_scan->oop_iterate(keep_alive);
5136      // Loop around, finish this work, and try to steal some more
5137    } else if (terminator()->offer_termination()) {
5138      break;  // nirvana from the infinite cycle
5139    }
5140  }
5141  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5142}
5143
5144void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5145{
5146  GenCollectedHeap* gch = GenCollectedHeap::heap();
5147  WorkGang* workers = gch->workers();
5148  assert(workers != NULL, "Need parallel worker threads.");
5149  CMSRefProcTaskProxy rp_task(task, &_collector,
5150                              _collector.ref_processor()->span(),
5151                              _collector.markBitMap(),
5152                              workers, _collector.task_queues());
5153  workers->run_task(&rp_task);
5154}
5155
5156void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5157{
5158
5159  GenCollectedHeap* gch = GenCollectedHeap::heap();
5160  WorkGang* workers = gch->workers();
5161  assert(workers != NULL, "Need parallel worker threads.");
5162  CMSRefEnqueueTaskProxy enq_task(task);
5163  workers->run_task(&enq_task);
5164}
5165
5166void CMSCollector::refProcessingWork() {
5167  ResourceMark rm;
5168  HandleMark   hm;
5169
5170  ReferenceProcessor* rp = ref_processor();
5171  assert(rp->span().equals(_span), "Spans should be equal");
5172  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5173  // Process weak references.
5174  rp->setup_policy(false);
5175  verify_work_stacks_empty();
5176
5177  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5178                                          &_markStack, false /* !preclean */);
5179  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5180                                _span, &_markBitMap, &_markStack,
5181                                &cmsKeepAliveClosure, false /* !preclean */);
5182  {
5183    GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
5184
5185    ReferenceProcessorStats stats;
5186    if (rp->processing_is_mt()) {
5187      // Set the degree of MT here.  If the discovery is done MT, there
5188      // may have been a different number of threads doing the discovery
5189      // and a different number of discovered lists may have Ref objects.
5190      // That is OK as long as the Reference lists are balanced (see
5191      // balance_all_queues() and balance_queues()).
5192      GenCollectedHeap* gch = GenCollectedHeap::heap();
5193      uint active_workers = ParallelGCThreads;
5194      WorkGang* workers = gch->workers();
5195      if (workers != NULL) {
5196        active_workers = workers->active_workers();
5197        // The expectation is that active_workers will have already
5198        // been set to a reasonable value.  If it has not been set,
5199        // investigate.
5200        assert(active_workers > 0, "Should have been set during scavenge");
5201      }
5202      rp->set_active_mt_degree(active_workers);
5203      CMSRefProcTaskExecutor task_executor(*this);
5204      stats = rp->process_discovered_references(&_is_alive_closure,
5205                                        &cmsKeepAliveClosure,
5206                                        &cmsDrainMarkingStackClosure,
5207                                        &task_executor,
5208                                        _gc_timer_cm);
5209    } else {
5210      stats = rp->process_discovered_references(&_is_alive_closure,
5211                                        &cmsKeepAliveClosure,
5212                                        &cmsDrainMarkingStackClosure,
5213                                        NULL,
5214                                        _gc_timer_cm);
5215    }
5216    _gc_tracer_cm->report_gc_reference_stats(stats);
5217
5218  }
5219
5220  // This is the point where the entire marking should have completed.
5221  verify_work_stacks_empty();
5222
5223  if (should_unload_classes()) {
5224    {
5225      GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
5226
5227      // Unload classes and purge the SystemDictionary.
5228      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5229
5230      // Unload nmethods.
5231      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5232
5233      // Prune dead klasses from subklass/sibling/implementor lists.
5234      Klass::clean_weak_klass_links(&_is_alive_closure);
5235    }
5236
5237    {
5238      GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
5239      // Clean up unreferenced symbols in symbol table.
5240      SymbolTable::unlink();
5241    }
5242
5243    {
5244      GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
5245      // Delete entries for dead interned strings.
5246      StringTable::unlink(&_is_alive_closure);
5247    }
5248  }
5249
5250
5251  // Restore any preserved marks as a result of mark stack or
5252  // work queue overflow
5253  restore_preserved_marks_if_any();  // done single-threaded for now
5254
5255  rp->set_enqueuing_is_done(true);
5256  if (rp->processing_is_mt()) {
5257    rp->balance_all_queues();
5258    CMSRefProcTaskExecutor task_executor(*this);
5259    rp->enqueue_discovered_references(&task_executor);
5260  } else {
5261    rp->enqueue_discovered_references(NULL);
5262  }
5263  rp->verify_no_references_recorded();
5264  assert(!rp->discovery_enabled(), "should have been disabled");
5265}
5266
5267#ifndef PRODUCT
5268void CMSCollector::check_correct_thread_executing() {
5269  Thread* t = Thread::current();
5270  // Only the VM thread or the CMS thread should be here.
5271  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5272         "Unexpected thread type");
5273  // If this is the vm thread, the foreground process
5274  // should not be waiting.  Note that _foregroundGCIsActive is
5275  // true while the foreground collector is waiting.
5276  if (_foregroundGCShouldWait) {
5277    // We cannot be the VM thread
5278    assert(t->is_ConcurrentGC_thread(),
5279           "Should be CMS thread");
5280  } else {
5281    // We can be the CMS thread only if we are in a stop-world
5282    // phase of CMS collection.
5283    if (t->is_ConcurrentGC_thread()) {
5284      assert(_collectorState == InitialMarking ||
5285             _collectorState == FinalMarking,
5286             "Should be a stop-world phase");
5287      // The CMS thread should be holding the CMS_token.
5288      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5289             "Potential interference with concurrently "
5290             "executing VM thread");
5291    }
5292  }
5293}
5294#endif
5295
5296void CMSCollector::sweep() {
5297  assert(_collectorState == Sweeping, "just checking");
5298  check_correct_thread_executing();
5299  verify_work_stacks_empty();
5300  verify_overflow_empty();
5301  increment_sweep_count();
5302  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5303
5304  _inter_sweep_timer.stop();
5305  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5306
5307  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5308  _intra_sweep_timer.reset();
5309  _intra_sweep_timer.start();
5310  {
5311    GCTraceCPUTime tcpu;
5312    CMSPhaseAccounting pa(this, "Concurrent Sweep");
5313    // First sweep the old gen
5314    {
5315      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5316                               bitMapLock());
5317      sweepWork(_cmsGen);
5318    }
5319
5320    // Update Universe::_heap_*_at_gc figures.
5321    // We need all the free list locks to make the abstract state
5322    // transition from Sweeping to Resetting. See detailed note
5323    // further below.
5324    {
5325      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5326      // Update heap occupancy information which is used as
5327      // input to soft ref clearing policy at the next gc.
5328      Universe::update_heap_info_at_gc();
5329      _collectorState = Resizing;
5330    }
5331  }
5332  verify_work_stacks_empty();
5333  verify_overflow_empty();
5334
5335  if (should_unload_classes()) {
5336    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5337    // requires that the virtual spaces are stable and not deleted.
5338    ClassLoaderDataGraph::set_should_purge(true);
5339  }
5340
5341  _intra_sweep_timer.stop();
5342  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5343
5344  _inter_sweep_timer.reset();
5345  _inter_sweep_timer.start();
5346
5347  // We need to use a monotonically non-decreasing time in ms
5348  // or we will see time-warp warnings and os::javaTimeMillis()
5349  // does not guarantee monotonicity.
5350  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5351  update_time_of_last_gc(now);
5352
5353  // NOTE on abstract state transitions:
5354  // Mutators allocate-live and/or mark the mod-union table dirty
5355  // based on the state of the collection.  The former is done in
5356  // the interval [Marking, Sweeping] and the latter in the interval
5357  // [Marking, Sweeping).  Thus the transitions into the Marking state
5358  // and out of the Sweeping state must be synchronously visible
5359  // globally to the mutators.
5360  // The transition into the Marking state happens with the world
5361  // stopped so the mutators will globally see it.  Sweeping is
5362  // done asynchronously by the background collector so the transition
5363  // from the Sweeping state to the Resizing state must be done
5364  // under the freelistLock (as is the check for whether to
5365  // allocate-live and whether to dirty the mod-union table).
5366  assert(_collectorState == Resizing, "Change of collector state to"
5367    " Resizing must be done under the freelistLocks (plural)");
5368
5369  // Now that sweeping has been completed, we clear
5370  // the incremental_collection_failed flag,
5371  // thus inviting a younger gen collection to promote into
5372  // this generation. If such a promotion may still fail,
5373  // the flag will be set again when a young collection is
5374  // attempted.
5375  GenCollectedHeap* gch = GenCollectedHeap::heap();
5376  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5377  gch->update_full_collections_completed(_collection_count_start);
5378}
5379
5380// FIX ME!!! Looks like this belongs in CFLSpace, with
5381// CMSGen merely delegating to it.
5382void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5383  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5384  HeapWord*  minAddr        = _cmsSpace->bottom();
5385  HeapWord*  largestAddr    =
5386    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5387  if (largestAddr == NULL) {
5388    // The dictionary appears to be empty.  In this case
5389    // try to coalesce at the end of the heap.
5390    largestAddr = _cmsSpace->end();
5391  }
5392  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5393  size_t nearLargestOffset =
5394    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5395  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5396                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5397  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5398}
5399
5400bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5401  return addr >= _cmsSpace->nearLargestChunk();
5402}
5403
5404FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5405  return _cmsSpace->find_chunk_at_end();
5406}
5407
5408void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5409                                                    bool full) {
5410  // If the young generation has been collected, gather any statistics
5411  // that are of interest at this point.
5412  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5413  if (!full && current_is_young) {
5414    // Gather statistics on the young generation collection.
5415    collector()->stats().record_gc0_end(used());
5416  }
5417}
5418
5419void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5420  // We iterate over the space(s) underlying this generation,
5421  // checking the mark bit map to see if the bits corresponding
5422  // to specific blocks are marked or not. Blocks that are
5423  // marked are live and are not swept up. All remaining blocks
5424  // are swept up, with coalescing on-the-fly as we sweep up
5425  // contiguous free and/or garbage blocks:
5426  // We need to ensure that the sweeper synchronizes with allocators
5427  // and stop-the-world collectors. In particular, the following
5428  // locks are used:
5429  // . CMS token: if this is held, a stop the world collection cannot occur
5430  // . freelistLock: if this is held no allocation can occur from this
5431  //                 generation by another thread
5432  // . bitMapLock: if this is held, no other thread can access or update
5433  //
5434
5435  // Note that we need to hold the freelistLock if we use
5436  // block iterate below; else the iterator might go awry if
5437  // a mutator (or promotion) causes block contents to change
5438  // (for instance if the allocator divvies up a block).
5439  // If we hold the free list lock, for all practical purposes
5440  // young generation GC's can't occur (they'll usually need to
5441  // promote), so we might as well prevent all young generation
5442  // GC's while we do a sweeping step. For the same reason, we might
5443  // as well take the bit map lock for the entire duration
5444
5445  // check that we hold the requisite locks
5446  assert(have_cms_token(), "Should hold cms token");
5447  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5448  assert_lock_strong(old_gen->freelistLock());
5449  assert_lock_strong(bitMapLock());
5450
5451  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5452  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5453  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5454                                          _inter_sweep_estimate.padded_average(),
5455                                          _intra_sweep_estimate.padded_average());
5456  old_gen->setNearLargestChunk();
5457
5458  {
5459    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5460    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5461    // We need to free-up/coalesce garbage/blocks from a
5462    // co-terminal free run. This is done in the SweepClosure
5463    // destructor; so, do not remove this scope, else the
5464    // end-of-sweep-census below will be off by a little bit.
5465  }
5466  old_gen->cmsSpace()->sweep_completed();
5467  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5468  if (should_unload_classes()) {                // unloaded classes this cycle,
5469    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5470  } else {                                      // did not unload classes,
5471    _concurrent_cycles_since_last_unload++;     // ... increment count
5472  }
5473}
5474
5475// Reset CMS data structures (for now just the marking bit map)
5476// preparatory for the next cycle.
5477void CMSCollector::reset_concurrent() {
5478  CMSTokenSyncWithLocks ts(true, bitMapLock());
5479
5480  // If the state is not "Resetting", the foreground  thread
5481  // has done a collection and the resetting.
5482  if (_collectorState != Resetting) {
5483    assert(_collectorState == Idling, "The state should only change"
5484      " because the foreground collector has finished the collection");
5485    return;
5486  }
5487
5488  {
5489    // Clear the mark bitmap (no grey objects to start with)
5490    // for the next cycle.
5491    GCTraceCPUTime tcpu;
5492    CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5493
5494    HeapWord* curAddr = _markBitMap.startWord();
5495    while (curAddr < _markBitMap.endWord()) {
5496      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5497      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5498      _markBitMap.clear_large_range(chunk);
5499      if (ConcurrentMarkSweepThread::should_yield() &&
5500          !foregroundGCIsActive() &&
5501          CMSYield) {
5502        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5503               "CMS thread should hold CMS token");
5504        assert_lock_strong(bitMapLock());
5505        bitMapLock()->unlock();
5506        ConcurrentMarkSweepThread::desynchronize(true);
5507        stopTimer();
5508        incrementYields();
5509
5510        // See the comment in coordinator_yield()
5511        for (unsigned i = 0; i < CMSYieldSleepCount &&
5512                         ConcurrentMarkSweepThread::should_yield() &&
5513                         !CMSCollector::foregroundGCIsActive(); ++i) {
5514          os::sleep(Thread::current(), 1, false);
5515        }
5516
5517        ConcurrentMarkSweepThread::synchronize(true);
5518        bitMapLock()->lock_without_safepoint_check();
5519        startTimer();
5520      }
5521      curAddr = chunk.end();
5522    }
5523    // A successful mostly concurrent collection has been done.
5524    // Because only the full (i.e., concurrent mode failure) collections
5525    // are being measured for gc overhead limits, clean the "near" flag
5526    // and count.
5527    size_policy()->reset_gc_overhead_limit_count();
5528    _collectorState = Idling;
5529  }
5530
5531  register_gc_end();
5532}
5533
5534// Same as above but for STW paths
5535void CMSCollector::reset_stw() {
5536  // already have the lock
5537  assert(_collectorState == Resetting, "just checking");
5538  assert_lock_strong(bitMapLock());
5539  GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5540  _markBitMap.clear_all();
5541  _collectorState = Idling;
5542  register_gc_end();
5543}
5544
5545void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5546  GCTraceCPUTime tcpu;
5547  TraceCollectorStats tcs(counters());
5548
5549  switch (op) {
5550    case CMS_op_checkpointRootsInitial: {
5551      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5552      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5553      checkpointRootsInitial();
5554      break;
5555    }
5556    case CMS_op_checkpointRootsFinal: {
5557      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5558      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5559      checkpointRootsFinal();
5560      break;
5561    }
5562    default:
5563      fatal("No such CMS_op");
5564  }
5565}
5566
5567#ifndef PRODUCT
5568size_t const CMSCollector::skip_header_HeapWords() {
5569  return FreeChunk::header_size();
5570}
5571
5572// Try and collect here conditions that should hold when
5573// CMS thread is exiting. The idea is that the foreground GC
5574// thread should not be blocked if it wants to terminate
5575// the CMS thread and yet continue to run the VM for a while
5576// after that.
5577void CMSCollector::verify_ok_to_terminate() const {
5578  assert(Thread::current()->is_ConcurrentGC_thread(),
5579         "should be called by CMS thread");
5580  assert(!_foregroundGCShouldWait, "should be false");
5581  // We could check here that all the various low-level locks
5582  // are not held by the CMS thread, but that is overkill; see
5583  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5584  // is checked.
5585}
5586#endif
5587
5588size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5589   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5590          "missing Printezis mark?");
5591  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5592  size_t size = pointer_delta(nextOneAddr + 1, addr);
5593  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5594         "alignment problem");
5595  assert(size >= 3, "Necessary for Printezis marks to work");
5596  return size;
5597}
5598
5599// A variant of the above (block_size_using_printezis_bits()) except
5600// that we return 0 if the P-bits are not yet set.
5601size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5602  if (_markBitMap.isMarked(addr + 1)) {
5603    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5604    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5605    size_t size = pointer_delta(nextOneAddr + 1, addr);
5606    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5607           "alignment problem");
5608    assert(size >= 3, "Necessary for Printezis marks to work");
5609    return size;
5610  }
5611  return 0;
5612}
5613
5614HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5615  size_t sz = 0;
5616  oop p = (oop)addr;
5617  if (p->klass_or_null() != NULL) {
5618    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5619  } else {
5620    sz = block_size_using_printezis_bits(addr);
5621  }
5622  assert(sz > 0, "size must be nonzero");
5623  HeapWord* next_block = addr + sz;
5624  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5625                                             CardTableModRefBS::card_size);
5626  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5627         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5628         "must be different cards");
5629  return next_card;
5630}
5631
5632
5633// CMS Bit Map Wrapper /////////////////////////////////////////
5634
5635// Construct a CMS bit map infrastructure, but don't create the
5636// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5637// further below.
5638CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5639  _bm(),
5640  _shifter(shifter),
5641  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5642                                    Monitor::_safepoint_check_sometimes) : NULL)
5643{
5644  _bmStartWord = 0;
5645  _bmWordSize  = 0;
5646}
5647
5648bool CMSBitMap::allocate(MemRegion mr) {
5649  _bmStartWord = mr.start();
5650  _bmWordSize  = mr.word_size();
5651  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5652                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5653  if (!brs.is_reserved()) {
5654    warning("CMS bit map allocation failure");
5655    return false;
5656  }
5657  // For now we'll just commit all of the bit map up front.
5658  // Later on we'll try to be more parsimonious with swap.
5659  if (!_virtual_space.initialize(brs, brs.size())) {
5660    warning("CMS bit map backing store failure");
5661    return false;
5662  }
5663  assert(_virtual_space.committed_size() == brs.size(),
5664         "didn't reserve backing store for all of CMS bit map?");
5665  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5666  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5667         _bmWordSize, "inconsistency in bit map sizing");
5668  _bm.set_size(_bmWordSize >> _shifter);
5669
5670  // bm.clear(); // can we rely on getting zero'd memory? verify below
5671  assert(isAllClear(),
5672         "Expected zero'd memory from ReservedSpace constructor");
5673  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5674         "consistency check");
5675  return true;
5676}
5677
5678void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5679  HeapWord *next_addr, *end_addr, *last_addr;
5680  assert_locked();
5681  assert(covers(mr), "out-of-range error");
5682  // XXX assert that start and end are appropriately aligned
5683  for (next_addr = mr.start(), end_addr = mr.end();
5684       next_addr < end_addr; next_addr = last_addr) {
5685    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5686    last_addr = dirty_region.end();
5687    if (!dirty_region.is_empty()) {
5688      cl->do_MemRegion(dirty_region);
5689    } else {
5690      assert(last_addr == end_addr, "program logic");
5691      return;
5692    }
5693  }
5694}
5695
5696void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5697  _bm.print_on_error(st, prefix);
5698}
5699
5700#ifndef PRODUCT
5701void CMSBitMap::assert_locked() const {
5702  CMSLockVerifier::assert_locked(lock());
5703}
5704
5705bool CMSBitMap::covers(MemRegion mr) const {
5706  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5707  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5708         "size inconsistency");
5709  return (mr.start() >= _bmStartWord) &&
5710         (mr.end()   <= endWord());
5711}
5712
5713bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5714    return (start >= _bmStartWord && (start + size) <= endWord());
5715}
5716
5717void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5718  // verify that there are no 1 bits in the interval [left, right)
5719  FalseBitMapClosure falseBitMapClosure;
5720  iterate(&falseBitMapClosure, left, right);
5721}
5722
5723void CMSBitMap::region_invariant(MemRegion mr)
5724{
5725  assert_locked();
5726  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5727  assert(!mr.is_empty(), "unexpected empty region");
5728  assert(covers(mr), "mr should be covered by bit map");
5729  // convert address range into offset range
5730  size_t start_ofs = heapWordToOffset(mr.start());
5731  // Make sure that end() is appropriately aligned
5732  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5733                        (1 << (_shifter+LogHeapWordSize))),
5734         "Misaligned mr.end()");
5735  size_t end_ofs   = heapWordToOffset(mr.end());
5736  assert(end_ofs > start_ofs, "Should mark at least one bit");
5737}
5738
5739#endif
5740
5741bool CMSMarkStack::allocate(size_t size) {
5742  // allocate a stack of the requisite depth
5743  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5744                   size * sizeof(oop)));
5745  if (!rs.is_reserved()) {
5746    warning("CMSMarkStack allocation failure");
5747    return false;
5748  }
5749  if (!_virtual_space.initialize(rs, rs.size())) {
5750    warning("CMSMarkStack backing store failure");
5751    return false;
5752  }
5753  assert(_virtual_space.committed_size() == rs.size(),
5754         "didn't reserve backing store for all of CMS stack?");
5755  _base = (oop*)(_virtual_space.low());
5756  _index = 0;
5757  _capacity = size;
5758  NOT_PRODUCT(_max_depth = 0);
5759  return true;
5760}
5761
5762// XXX FIX ME !!! In the MT case we come in here holding a
5763// leaf lock. For printing we need to take a further lock
5764// which has lower rank. We need to recalibrate the two
5765// lock-ranks involved in order to be able to print the
5766// messages below. (Or defer the printing to the caller.
5767// For now we take the expedient path of just disabling the
5768// messages for the problematic case.)
5769void CMSMarkStack::expand() {
5770  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5771  if (_capacity == MarkStackSizeMax) {
5772    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5773      // We print a warning message only once per CMS cycle.
5774      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5775    }
5776    return;
5777  }
5778  // Double capacity if possible
5779  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5780  // Do not give up existing stack until we have managed to
5781  // get the double capacity that we desired.
5782  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5783                   new_capacity * sizeof(oop)));
5784  if (rs.is_reserved()) {
5785    // Release the backing store associated with old stack
5786    _virtual_space.release();
5787    // Reinitialize virtual space for new stack
5788    if (!_virtual_space.initialize(rs, rs.size())) {
5789      fatal("Not enough swap for expanded marking stack");
5790    }
5791    _base = (oop*)(_virtual_space.low());
5792    _index = 0;
5793    _capacity = new_capacity;
5794  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5795    // Failed to double capacity, continue;
5796    // we print a detail message only once per CMS cycle.
5797    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5798                        _capacity / K, new_capacity / K);
5799  }
5800}
5801
5802
5803// Closures
5804// XXX: there seems to be a lot of code  duplication here;
5805// should refactor and consolidate common code.
5806
5807// This closure is used to mark refs into the CMS generation in
5808// the CMS bit map. Called at the first checkpoint. This closure
5809// assumes that we do not need to re-mark dirty cards; if the CMS
5810// generation on which this is used is not an oldest
5811// generation then this will lose younger_gen cards!
5812
5813MarkRefsIntoClosure::MarkRefsIntoClosure(
5814  MemRegion span, CMSBitMap* bitMap):
5815    _span(span),
5816    _bitMap(bitMap)
5817{
5818  assert(ref_processor() == NULL, "deliberately left NULL");
5819  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5820}
5821
5822void MarkRefsIntoClosure::do_oop(oop obj) {
5823  // if p points into _span, then mark corresponding bit in _markBitMap
5824  assert(obj->is_oop(), "expected an oop");
5825  HeapWord* addr = (HeapWord*)obj;
5826  if (_span.contains(addr)) {
5827    // this should be made more efficient
5828    _bitMap->mark(addr);
5829  }
5830}
5831
5832void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5833void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5834
5835ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5836  MemRegion span, CMSBitMap* bitMap):
5837    _span(span),
5838    _bitMap(bitMap)
5839{
5840  assert(ref_processor() == NULL, "deliberately left NULL");
5841  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5842}
5843
5844void ParMarkRefsIntoClosure::do_oop(oop obj) {
5845  // if p points into _span, then mark corresponding bit in _markBitMap
5846  assert(obj->is_oop(), "expected an oop");
5847  HeapWord* addr = (HeapWord*)obj;
5848  if (_span.contains(addr)) {
5849    // this should be made more efficient
5850    _bitMap->par_mark(addr);
5851  }
5852}
5853
5854void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5855void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5856
5857// A variant of the above, used for CMS marking verification.
5858MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5859  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5860    _span(span),
5861    _verification_bm(verification_bm),
5862    _cms_bm(cms_bm)
5863{
5864  assert(ref_processor() == NULL, "deliberately left NULL");
5865  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5866}
5867
5868void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5869  // if p points into _span, then mark corresponding bit in _markBitMap
5870  assert(obj->is_oop(), "expected an oop");
5871  HeapWord* addr = (HeapWord*)obj;
5872  if (_span.contains(addr)) {
5873    _verification_bm->mark(addr);
5874    if (!_cms_bm->isMarked(addr)) {
5875      LogHandle(gc, verify) log;
5876      ResourceMark rm;
5877      oop(addr)->print_on(log.info_stream());
5878      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5879      fatal("... aborting");
5880    }
5881  }
5882}
5883
5884void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5885void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5886
5887//////////////////////////////////////////////////
5888// MarkRefsIntoAndScanClosure
5889//////////////////////////////////////////////////
5890
5891MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5892                                                       ReferenceProcessor* rp,
5893                                                       CMSBitMap* bit_map,
5894                                                       CMSBitMap* mod_union_table,
5895                                                       CMSMarkStack*  mark_stack,
5896                                                       CMSCollector* collector,
5897                                                       bool should_yield,
5898                                                       bool concurrent_precleaning):
5899  _collector(collector),
5900  _span(span),
5901  _bit_map(bit_map),
5902  _mark_stack(mark_stack),
5903  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5904                      mark_stack, concurrent_precleaning),
5905  _yield(should_yield),
5906  _concurrent_precleaning(concurrent_precleaning),
5907  _freelistLock(NULL)
5908{
5909  // FIXME: Should initialize in base class constructor.
5910  assert(rp != NULL, "ref_processor shouldn't be NULL");
5911  set_ref_processor_internal(rp);
5912}
5913
5914// This closure is used to mark refs into the CMS generation at the
5915// second (final) checkpoint, and to scan and transitively follow
5916// the unmarked oops. It is also used during the concurrent precleaning
5917// phase while scanning objects on dirty cards in the CMS generation.
5918// The marks are made in the marking bit map and the marking stack is
5919// used for keeping the (newly) grey objects during the scan.
5920// The parallel version (Par_...) appears further below.
5921void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5922  if (obj != NULL) {
5923    assert(obj->is_oop(), "expected an oop");
5924    HeapWord* addr = (HeapWord*)obj;
5925    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5926    assert(_collector->overflow_list_is_empty(),
5927           "overflow list should be empty");
5928    if (_span.contains(addr) &&
5929        !_bit_map->isMarked(addr)) {
5930      // mark bit map (object is now grey)
5931      _bit_map->mark(addr);
5932      // push on marking stack (stack should be empty), and drain the
5933      // stack by applying this closure to the oops in the oops popped
5934      // from the stack (i.e. blacken the grey objects)
5935      bool res = _mark_stack->push(obj);
5936      assert(res, "Should have space to push on empty stack");
5937      do {
5938        oop new_oop = _mark_stack->pop();
5939        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5940        assert(_bit_map->isMarked((HeapWord*)new_oop),
5941               "only grey objects on this stack");
5942        // iterate over the oops in this oop, marking and pushing
5943        // the ones in CMS heap (i.e. in _span).
5944        new_oop->oop_iterate(&_pushAndMarkClosure);
5945        // check if it's time to yield
5946        do_yield_check();
5947      } while (!_mark_stack->isEmpty() ||
5948               (!_concurrent_precleaning && take_from_overflow_list()));
5949        // if marking stack is empty, and we are not doing this
5950        // during precleaning, then check the overflow list
5951    }
5952    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5953    assert(_collector->overflow_list_is_empty(),
5954           "overflow list was drained above");
5955
5956    assert(_collector->no_preserved_marks(),
5957           "All preserved marks should have been restored above");
5958  }
5959}
5960
5961void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5962void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5963
5964void MarkRefsIntoAndScanClosure::do_yield_work() {
5965  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5966         "CMS thread should hold CMS token");
5967  assert_lock_strong(_freelistLock);
5968  assert_lock_strong(_bit_map->lock());
5969  // relinquish the free_list_lock and bitMaplock()
5970  _bit_map->lock()->unlock();
5971  _freelistLock->unlock();
5972  ConcurrentMarkSweepThread::desynchronize(true);
5973  _collector->stopTimer();
5974  _collector->incrementYields();
5975
5976  // See the comment in coordinator_yield()
5977  for (unsigned i = 0;
5978       i < CMSYieldSleepCount &&
5979       ConcurrentMarkSweepThread::should_yield() &&
5980       !CMSCollector::foregroundGCIsActive();
5981       ++i) {
5982    os::sleep(Thread::current(), 1, false);
5983  }
5984
5985  ConcurrentMarkSweepThread::synchronize(true);
5986  _freelistLock->lock_without_safepoint_check();
5987  _bit_map->lock()->lock_without_safepoint_check();
5988  _collector->startTimer();
5989}
5990
5991///////////////////////////////////////////////////////////
5992// ParMarkRefsIntoAndScanClosure: a parallel version of
5993//                                MarkRefsIntoAndScanClosure
5994///////////////////////////////////////////////////////////
5995ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
5996  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
5997  CMSBitMap* bit_map, OopTaskQueue* work_queue):
5998  _span(span),
5999  _bit_map(bit_map),
6000  _work_queue(work_queue),
6001  _low_water_mark(MIN2((work_queue->max_elems()/4),
6002                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6003  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6004{
6005  // FIXME: Should initialize in base class constructor.
6006  assert(rp != NULL, "ref_processor shouldn't be NULL");
6007  set_ref_processor_internal(rp);
6008}
6009
6010// This closure is used to mark refs into the CMS generation at the
6011// second (final) checkpoint, and to scan and transitively follow
6012// the unmarked oops. The marks are made in the marking bit map and
6013// the work_queue is used for keeping the (newly) grey objects during
6014// the scan phase whence they are also available for stealing by parallel
6015// threads. Since the marking bit map is shared, updates are
6016// synchronized (via CAS).
6017void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6018  if (obj != NULL) {
6019    // Ignore mark word because this could be an already marked oop
6020    // that may be chained at the end of the overflow list.
6021    assert(obj->is_oop(true), "expected an oop");
6022    HeapWord* addr = (HeapWord*)obj;
6023    if (_span.contains(addr) &&
6024        !_bit_map->isMarked(addr)) {
6025      // mark bit map (object will become grey):
6026      // It is possible for several threads to be
6027      // trying to "claim" this object concurrently;
6028      // the unique thread that succeeds in marking the
6029      // object first will do the subsequent push on
6030      // to the work queue (or overflow list).
6031      if (_bit_map->par_mark(addr)) {
6032        // push on work_queue (which may not be empty), and trim the
6033        // queue to an appropriate length by applying this closure to
6034        // the oops in the oops popped from the stack (i.e. blacken the
6035        // grey objects)
6036        bool res = _work_queue->push(obj);
6037        assert(res, "Low water mark should be less than capacity?");
6038        trim_queue(_low_water_mark);
6039      } // Else, another thread claimed the object
6040    }
6041  }
6042}
6043
6044void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6045void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6046
6047// This closure is used to rescan the marked objects on the dirty cards
6048// in the mod union table and the card table proper.
6049size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6050  oop p, MemRegion mr) {
6051
6052  size_t size = 0;
6053  HeapWord* addr = (HeapWord*)p;
6054  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6055  assert(_span.contains(addr), "we are scanning the CMS generation");
6056  // check if it's time to yield
6057  if (do_yield_check()) {
6058    // We yielded for some foreground stop-world work,
6059    // and we have been asked to abort this ongoing preclean cycle.
6060    return 0;
6061  }
6062  if (_bitMap->isMarked(addr)) {
6063    // it's marked; is it potentially uninitialized?
6064    if (p->klass_or_null() != NULL) {
6065        // an initialized object; ignore mark word in verification below
6066        // since we are running concurrent with mutators
6067        assert(p->is_oop(true), "should be an oop");
6068        if (p->is_objArray()) {
6069          // objArrays are precisely marked; restrict scanning
6070          // to dirty cards only.
6071          size = CompactibleFreeListSpace::adjustObjectSize(
6072                   p->oop_iterate_size(_scanningClosure, mr));
6073        } else {
6074          // A non-array may have been imprecisely marked; we need
6075          // to scan object in its entirety.
6076          size = CompactibleFreeListSpace::adjustObjectSize(
6077                   p->oop_iterate_size(_scanningClosure));
6078        }
6079        #ifdef ASSERT
6080          size_t direct_size =
6081            CompactibleFreeListSpace::adjustObjectSize(p->size());
6082          assert(size == direct_size, "Inconsistency in size");
6083          assert(size >= 3, "Necessary for Printezis marks to work");
6084          if (!_bitMap->isMarked(addr+1)) {
6085            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6086          } else {
6087            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6088            assert(_bitMap->isMarked(addr+size-1),
6089                   "inconsistent Printezis mark");
6090          }
6091        #endif // ASSERT
6092    } else {
6093      // An uninitialized object.
6094      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6095      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6096      size = pointer_delta(nextOneAddr + 1, addr);
6097      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6098             "alignment problem");
6099      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6100      // will dirty the card when the klass pointer is installed in the
6101      // object (signaling the completion of initialization).
6102    }
6103  } else {
6104    // Either a not yet marked object or an uninitialized object
6105    if (p->klass_or_null() == NULL) {
6106      // An uninitialized object, skip to the next card, since
6107      // we may not be able to read its P-bits yet.
6108      assert(size == 0, "Initial value");
6109    } else {
6110      // An object not (yet) reached by marking: we merely need to
6111      // compute its size so as to go look at the next block.
6112      assert(p->is_oop(true), "should be an oop");
6113      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6114    }
6115  }
6116  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6117  return size;
6118}
6119
6120void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6121  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6122         "CMS thread should hold CMS token");
6123  assert_lock_strong(_freelistLock);
6124  assert_lock_strong(_bitMap->lock());
6125  // relinquish the free_list_lock and bitMaplock()
6126  _bitMap->lock()->unlock();
6127  _freelistLock->unlock();
6128  ConcurrentMarkSweepThread::desynchronize(true);
6129  _collector->stopTimer();
6130  _collector->incrementYields();
6131
6132  // See the comment in coordinator_yield()
6133  for (unsigned i = 0; i < CMSYieldSleepCount &&
6134                   ConcurrentMarkSweepThread::should_yield() &&
6135                   !CMSCollector::foregroundGCIsActive(); ++i) {
6136    os::sleep(Thread::current(), 1, false);
6137  }
6138
6139  ConcurrentMarkSweepThread::synchronize(true);
6140  _freelistLock->lock_without_safepoint_check();
6141  _bitMap->lock()->lock_without_safepoint_check();
6142  _collector->startTimer();
6143}
6144
6145
6146//////////////////////////////////////////////////////////////////
6147// SurvivorSpacePrecleanClosure
6148//////////////////////////////////////////////////////////////////
6149// This (single-threaded) closure is used to preclean the oops in
6150// the survivor spaces.
6151size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6152
6153  HeapWord* addr = (HeapWord*)p;
6154  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6155  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6156  assert(p->klass_or_null() != NULL, "object should be initialized");
6157  // an initialized object; ignore mark word in verification below
6158  // since we are running concurrent with mutators
6159  assert(p->is_oop(true), "should be an oop");
6160  // Note that we do not yield while we iterate over
6161  // the interior oops of p, pushing the relevant ones
6162  // on our marking stack.
6163  size_t size = p->oop_iterate_size(_scanning_closure);
6164  do_yield_check();
6165  // Observe that below, we do not abandon the preclean
6166  // phase as soon as we should; rather we empty the
6167  // marking stack before returning. This is to satisfy
6168  // some existing assertions. In general, it may be a
6169  // good idea to abort immediately and complete the marking
6170  // from the grey objects at a later time.
6171  while (!_mark_stack->isEmpty()) {
6172    oop new_oop = _mark_stack->pop();
6173    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6174    assert(_bit_map->isMarked((HeapWord*)new_oop),
6175           "only grey objects on this stack");
6176    // iterate over the oops in this oop, marking and pushing
6177    // the ones in CMS heap (i.e. in _span).
6178    new_oop->oop_iterate(_scanning_closure);
6179    // check if it's time to yield
6180    do_yield_check();
6181  }
6182  unsigned int after_count =
6183    GenCollectedHeap::heap()->total_collections();
6184  bool abort = (_before_count != after_count) ||
6185               _collector->should_abort_preclean();
6186  return abort ? 0 : size;
6187}
6188
6189void SurvivorSpacePrecleanClosure::do_yield_work() {
6190  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6191         "CMS thread should hold CMS token");
6192  assert_lock_strong(_bit_map->lock());
6193  // Relinquish the bit map lock
6194  _bit_map->lock()->unlock();
6195  ConcurrentMarkSweepThread::desynchronize(true);
6196  _collector->stopTimer();
6197  _collector->incrementYields();
6198
6199  // See the comment in coordinator_yield()
6200  for (unsigned i = 0; i < CMSYieldSleepCount &&
6201                       ConcurrentMarkSweepThread::should_yield() &&
6202                       !CMSCollector::foregroundGCIsActive(); ++i) {
6203    os::sleep(Thread::current(), 1, false);
6204  }
6205
6206  ConcurrentMarkSweepThread::synchronize(true);
6207  _bit_map->lock()->lock_without_safepoint_check();
6208  _collector->startTimer();
6209}
6210
6211// This closure is used to rescan the marked objects on the dirty cards
6212// in the mod union table and the card table proper. In the parallel
6213// case, although the bitMap is shared, we do a single read so the
6214// isMarked() query is "safe".
6215bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6216  // Ignore mark word because we are running concurrent with mutators
6217  assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6218  HeapWord* addr = (HeapWord*)p;
6219  assert(_span.contains(addr), "we are scanning the CMS generation");
6220  bool is_obj_array = false;
6221  #ifdef ASSERT
6222    if (!_parallel) {
6223      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6224      assert(_collector->overflow_list_is_empty(),
6225             "overflow list should be empty");
6226
6227    }
6228  #endif // ASSERT
6229  if (_bit_map->isMarked(addr)) {
6230    // Obj arrays are precisely marked, non-arrays are not;
6231    // so we scan objArrays precisely and non-arrays in their
6232    // entirety.
6233    if (p->is_objArray()) {
6234      is_obj_array = true;
6235      if (_parallel) {
6236        p->oop_iterate(_par_scan_closure, mr);
6237      } else {
6238        p->oop_iterate(_scan_closure, mr);
6239      }
6240    } else {
6241      if (_parallel) {
6242        p->oop_iterate(_par_scan_closure);
6243      } else {
6244        p->oop_iterate(_scan_closure);
6245      }
6246    }
6247  }
6248  #ifdef ASSERT
6249    if (!_parallel) {
6250      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6251      assert(_collector->overflow_list_is_empty(),
6252             "overflow list should be empty");
6253
6254    }
6255  #endif // ASSERT
6256  return is_obj_array;
6257}
6258
6259MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6260                        MemRegion span,
6261                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6262                        bool should_yield, bool verifying):
6263  _collector(collector),
6264  _span(span),
6265  _bitMap(bitMap),
6266  _mut(&collector->_modUnionTable),
6267  _markStack(markStack),
6268  _yield(should_yield),
6269  _skipBits(0)
6270{
6271  assert(_markStack->isEmpty(), "stack should be empty");
6272  _finger = _bitMap->startWord();
6273  _threshold = _finger;
6274  assert(_collector->_restart_addr == NULL, "Sanity check");
6275  assert(_span.contains(_finger), "Out of bounds _finger?");
6276  DEBUG_ONLY(_verifying = verifying;)
6277}
6278
6279void MarkFromRootsClosure::reset(HeapWord* addr) {
6280  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6281  assert(_span.contains(addr), "Out of bounds _finger?");
6282  _finger = addr;
6283  _threshold = (HeapWord*)round_to(
6284                 (intptr_t)_finger, CardTableModRefBS::card_size);
6285}
6286
6287// Should revisit to see if this should be restructured for
6288// greater efficiency.
6289bool MarkFromRootsClosure::do_bit(size_t offset) {
6290  if (_skipBits > 0) {
6291    _skipBits--;
6292    return true;
6293  }
6294  // convert offset into a HeapWord*
6295  HeapWord* addr = _bitMap->startWord() + offset;
6296  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6297         "address out of range");
6298  assert(_bitMap->isMarked(addr), "tautology");
6299  if (_bitMap->isMarked(addr+1)) {
6300    // this is an allocated but not yet initialized object
6301    assert(_skipBits == 0, "tautology");
6302    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6303    oop p = oop(addr);
6304    if (p->klass_or_null() == NULL) {
6305      DEBUG_ONLY(if (!_verifying) {)
6306        // We re-dirty the cards on which this object lies and increase
6307        // the _threshold so that we'll come back to scan this object
6308        // during the preclean or remark phase. (CMSCleanOnEnter)
6309        if (CMSCleanOnEnter) {
6310          size_t sz = _collector->block_size_using_printezis_bits(addr);
6311          HeapWord* end_card_addr   = (HeapWord*)round_to(
6312                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6313          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6314          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6315          // Bump _threshold to end_card_addr; note that
6316          // _threshold cannot possibly exceed end_card_addr, anyhow.
6317          // This prevents future clearing of the card as the scan proceeds
6318          // to the right.
6319          assert(_threshold <= end_card_addr,
6320                 "Because we are just scanning into this object");
6321          if (_threshold < end_card_addr) {
6322            _threshold = end_card_addr;
6323          }
6324          if (p->klass_or_null() != NULL) {
6325            // Redirty the range of cards...
6326            _mut->mark_range(redirty_range);
6327          } // ...else the setting of klass will dirty the card anyway.
6328        }
6329      DEBUG_ONLY(})
6330      return true;
6331    }
6332  }
6333  scanOopsInOop(addr);
6334  return true;
6335}
6336
6337// We take a break if we've been at this for a while,
6338// so as to avoid monopolizing the locks involved.
6339void MarkFromRootsClosure::do_yield_work() {
6340  // First give up the locks, then yield, then re-lock
6341  // We should probably use a constructor/destructor idiom to
6342  // do this unlock/lock or modify the MutexUnlocker class to
6343  // serve our purpose. XXX
6344  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6345         "CMS thread should hold CMS token");
6346  assert_lock_strong(_bitMap->lock());
6347  _bitMap->lock()->unlock();
6348  ConcurrentMarkSweepThread::desynchronize(true);
6349  _collector->stopTimer();
6350  _collector->incrementYields();
6351
6352  // See the comment in coordinator_yield()
6353  for (unsigned i = 0; i < CMSYieldSleepCount &&
6354                       ConcurrentMarkSweepThread::should_yield() &&
6355                       !CMSCollector::foregroundGCIsActive(); ++i) {
6356    os::sleep(Thread::current(), 1, false);
6357  }
6358
6359  ConcurrentMarkSweepThread::synchronize(true);
6360  _bitMap->lock()->lock_without_safepoint_check();
6361  _collector->startTimer();
6362}
6363
6364void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6365  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6366  assert(_markStack->isEmpty(),
6367         "should drain stack to limit stack usage");
6368  // convert ptr to an oop preparatory to scanning
6369  oop obj = oop(ptr);
6370  // Ignore mark word in verification below, since we
6371  // may be running concurrent with mutators.
6372  assert(obj->is_oop(true), "should be an oop");
6373  assert(_finger <= ptr, "_finger runneth ahead");
6374  // advance the finger to right end of this object
6375  _finger = ptr + obj->size();
6376  assert(_finger > ptr, "we just incremented it above");
6377  // On large heaps, it may take us some time to get through
6378  // the marking phase. During
6379  // this time it's possible that a lot of mutations have
6380  // accumulated in the card table and the mod union table --
6381  // these mutation records are redundant until we have
6382  // actually traced into the corresponding card.
6383  // Here, we check whether advancing the finger would make
6384  // us cross into a new card, and if so clear corresponding
6385  // cards in the MUT (preclean them in the card-table in the
6386  // future).
6387
6388  DEBUG_ONLY(if (!_verifying) {)
6389    // The clean-on-enter optimization is disabled by default,
6390    // until we fix 6178663.
6391    if (CMSCleanOnEnter && (_finger > _threshold)) {
6392      // [_threshold, _finger) represents the interval
6393      // of cards to be cleared  in MUT (or precleaned in card table).
6394      // The set of cards to be cleared is all those that overlap
6395      // with the interval [_threshold, _finger); note that
6396      // _threshold is always kept card-aligned but _finger isn't
6397      // always card-aligned.
6398      HeapWord* old_threshold = _threshold;
6399      assert(old_threshold == (HeapWord*)round_to(
6400              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6401             "_threshold should always be card-aligned");
6402      _threshold = (HeapWord*)round_to(
6403                     (intptr_t)_finger, CardTableModRefBS::card_size);
6404      MemRegion mr(old_threshold, _threshold);
6405      assert(!mr.is_empty(), "Control point invariant");
6406      assert(_span.contains(mr), "Should clear within span");
6407      _mut->clear_range(mr);
6408    }
6409  DEBUG_ONLY(})
6410  // Note: the finger doesn't advance while we drain
6411  // the stack below.
6412  PushOrMarkClosure pushOrMarkClosure(_collector,
6413                                      _span, _bitMap, _markStack,
6414                                      _finger, this);
6415  bool res = _markStack->push(obj);
6416  assert(res, "Empty non-zero size stack should have space for single push");
6417  while (!_markStack->isEmpty()) {
6418    oop new_oop = _markStack->pop();
6419    // Skip verifying header mark word below because we are
6420    // running concurrent with mutators.
6421    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6422    // now scan this oop's oops
6423    new_oop->oop_iterate(&pushOrMarkClosure);
6424    do_yield_check();
6425  }
6426  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6427}
6428
6429ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6430                       CMSCollector* collector, MemRegion span,
6431                       CMSBitMap* bit_map,
6432                       OopTaskQueue* work_queue,
6433                       CMSMarkStack*  overflow_stack):
6434  _collector(collector),
6435  _whole_span(collector->_span),
6436  _span(span),
6437  _bit_map(bit_map),
6438  _mut(&collector->_modUnionTable),
6439  _work_queue(work_queue),
6440  _overflow_stack(overflow_stack),
6441  _skip_bits(0),
6442  _task(task)
6443{
6444  assert(_work_queue->size() == 0, "work_queue should be empty");
6445  _finger = span.start();
6446  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6447  assert(_span.contains(_finger), "Out of bounds _finger?");
6448}
6449
6450// Should revisit to see if this should be restructured for
6451// greater efficiency.
6452bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6453  if (_skip_bits > 0) {
6454    _skip_bits--;
6455    return true;
6456  }
6457  // convert offset into a HeapWord*
6458  HeapWord* addr = _bit_map->startWord() + offset;
6459  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6460         "address out of range");
6461  assert(_bit_map->isMarked(addr), "tautology");
6462  if (_bit_map->isMarked(addr+1)) {
6463    // this is an allocated object that might not yet be initialized
6464    assert(_skip_bits == 0, "tautology");
6465    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6466    oop p = oop(addr);
6467    if (p->klass_or_null() == NULL) {
6468      // in the case of Clean-on-Enter optimization, redirty card
6469      // and avoid clearing card by increasing  the threshold.
6470      return true;
6471    }
6472  }
6473  scan_oops_in_oop(addr);
6474  return true;
6475}
6476
6477void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6478  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6479  // Should we assert that our work queue is empty or
6480  // below some drain limit?
6481  assert(_work_queue->size() == 0,
6482         "should drain stack to limit stack usage");
6483  // convert ptr to an oop preparatory to scanning
6484  oop obj = oop(ptr);
6485  // Ignore mark word in verification below, since we
6486  // may be running concurrent with mutators.
6487  assert(obj->is_oop(true), "should be an oop");
6488  assert(_finger <= ptr, "_finger runneth ahead");
6489  // advance the finger to right end of this object
6490  _finger = ptr + obj->size();
6491  assert(_finger > ptr, "we just incremented it above");
6492  // On large heaps, it may take us some time to get through
6493  // the marking phase. During
6494  // this time it's possible that a lot of mutations have
6495  // accumulated in the card table and the mod union table --
6496  // these mutation records are redundant until we have
6497  // actually traced into the corresponding card.
6498  // Here, we check whether advancing the finger would make
6499  // us cross into a new card, and if so clear corresponding
6500  // cards in the MUT (preclean them in the card-table in the
6501  // future).
6502
6503  // The clean-on-enter optimization is disabled by default,
6504  // until we fix 6178663.
6505  if (CMSCleanOnEnter && (_finger > _threshold)) {
6506    // [_threshold, _finger) represents the interval
6507    // of cards to be cleared  in MUT (or precleaned in card table).
6508    // The set of cards to be cleared is all those that overlap
6509    // with the interval [_threshold, _finger); note that
6510    // _threshold is always kept card-aligned but _finger isn't
6511    // always card-aligned.
6512    HeapWord* old_threshold = _threshold;
6513    assert(old_threshold == (HeapWord*)round_to(
6514            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6515           "_threshold should always be card-aligned");
6516    _threshold = (HeapWord*)round_to(
6517                   (intptr_t)_finger, CardTableModRefBS::card_size);
6518    MemRegion mr(old_threshold, _threshold);
6519    assert(!mr.is_empty(), "Control point invariant");
6520    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6521    _mut->clear_range(mr);
6522  }
6523
6524  // Note: the local finger doesn't advance while we drain
6525  // the stack below, but the global finger sure can and will.
6526  HeapWord** gfa = _task->global_finger_addr();
6527  ParPushOrMarkClosure pushOrMarkClosure(_collector,
6528                                         _span, _bit_map,
6529                                         _work_queue,
6530                                         _overflow_stack,
6531                                         _finger,
6532                                         gfa, this);
6533  bool res = _work_queue->push(obj);   // overflow could occur here
6534  assert(res, "Will hold once we use workqueues");
6535  while (true) {
6536    oop new_oop;
6537    if (!_work_queue->pop_local(new_oop)) {
6538      // We emptied our work_queue; check if there's stuff that can
6539      // be gotten from the overflow stack.
6540      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6541            _overflow_stack, _work_queue)) {
6542        do_yield_check();
6543        continue;
6544      } else {  // done
6545        break;
6546      }
6547    }
6548    // Skip verifying header mark word below because we are
6549    // running concurrent with mutators.
6550    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6551    // now scan this oop's oops
6552    new_oop->oop_iterate(&pushOrMarkClosure);
6553    do_yield_check();
6554  }
6555  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6556}
6557
6558// Yield in response to a request from VM Thread or
6559// from mutators.
6560void ParMarkFromRootsClosure::do_yield_work() {
6561  assert(_task != NULL, "sanity");
6562  _task->yield();
6563}
6564
6565// A variant of the above used for verifying CMS marking work.
6566MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6567                        MemRegion span,
6568                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6569                        CMSMarkStack*  mark_stack):
6570  _collector(collector),
6571  _span(span),
6572  _verification_bm(verification_bm),
6573  _cms_bm(cms_bm),
6574  _mark_stack(mark_stack),
6575  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6576                      mark_stack)
6577{
6578  assert(_mark_stack->isEmpty(), "stack should be empty");
6579  _finger = _verification_bm->startWord();
6580  assert(_collector->_restart_addr == NULL, "Sanity check");
6581  assert(_span.contains(_finger), "Out of bounds _finger?");
6582}
6583
6584void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6585  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6586  assert(_span.contains(addr), "Out of bounds _finger?");
6587  _finger = addr;
6588}
6589
6590// Should revisit to see if this should be restructured for
6591// greater efficiency.
6592bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6593  // convert offset into a HeapWord*
6594  HeapWord* addr = _verification_bm->startWord() + offset;
6595  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6596         "address out of range");
6597  assert(_verification_bm->isMarked(addr), "tautology");
6598  assert(_cms_bm->isMarked(addr), "tautology");
6599
6600  assert(_mark_stack->isEmpty(),
6601         "should drain stack to limit stack usage");
6602  // convert addr to an oop preparatory to scanning
6603  oop obj = oop(addr);
6604  assert(obj->is_oop(), "should be an oop");
6605  assert(_finger <= addr, "_finger runneth ahead");
6606  // advance the finger to right end of this object
6607  _finger = addr + obj->size();
6608  assert(_finger > addr, "we just incremented it above");
6609  // Note: the finger doesn't advance while we drain
6610  // the stack below.
6611  bool res = _mark_stack->push(obj);
6612  assert(res, "Empty non-zero size stack should have space for single push");
6613  while (!_mark_stack->isEmpty()) {
6614    oop new_oop = _mark_stack->pop();
6615    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6616    // now scan this oop's oops
6617    new_oop->oop_iterate(&_pam_verify_closure);
6618  }
6619  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6620  return true;
6621}
6622
6623PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6624  CMSCollector* collector, MemRegion span,
6625  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6626  CMSMarkStack*  mark_stack):
6627  MetadataAwareOopClosure(collector->ref_processor()),
6628  _collector(collector),
6629  _span(span),
6630  _verification_bm(verification_bm),
6631  _cms_bm(cms_bm),
6632  _mark_stack(mark_stack)
6633{ }
6634
6635void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6636void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6637
6638// Upon stack overflow, we discard (part of) the stack,
6639// remembering the least address amongst those discarded
6640// in CMSCollector's _restart_address.
6641void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6642  // Remember the least grey address discarded
6643  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6644  _collector->lower_restart_addr(ra);
6645  _mark_stack->reset();  // discard stack contents
6646  _mark_stack->expand(); // expand the stack if possible
6647}
6648
6649void PushAndMarkVerifyClosure::do_oop(oop obj) {
6650  assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6651  HeapWord* addr = (HeapWord*)obj;
6652  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6653    // Oop lies in _span and isn't yet grey or black
6654    _verification_bm->mark(addr);            // now grey
6655    if (!_cms_bm->isMarked(addr)) {
6656      LogHandle(gc, verify) log;
6657      ResourceMark rm;
6658      oop(addr)->print_on(log.info_stream());
6659      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6660      fatal("... aborting");
6661    }
6662
6663    if (!_mark_stack->push(obj)) { // stack overflow
6664      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6665      assert(_mark_stack->isFull(), "Else push should have succeeded");
6666      handle_stack_overflow(addr);
6667    }
6668    // anything including and to the right of _finger
6669    // will be scanned as we iterate over the remainder of the
6670    // bit map
6671  }
6672}
6673
6674PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6675                     MemRegion span,
6676                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6677                     HeapWord* finger, MarkFromRootsClosure* parent) :
6678  MetadataAwareOopClosure(collector->ref_processor()),
6679  _collector(collector),
6680  _span(span),
6681  _bitMap(bitMap),
6682  _markStack(markStack),
6683  _finger(finger),
6684  _parent(parent)
6685{ }
6686
6687ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6688                                           MemRegion span,
6689                                           CMSBitMap* bit_map,
6690                                           OopTaskQueue* work_queue,
6691                                           CMSMarkStack*  overflow_stack,
6692                                           HeapWord* finger,
6693                                           HeapWord** global_finger_addr,
6694                                           ParMarkFromRootsClosure* parent) :
6695  MetadataAwareOopClosure(collector->ref_processor()),
6696  _collector(collector),
6697  _whole_span(collector->_span),
6698  _span(span),
6699  _bit_map(bit_map),
6700  _work_queue(work_queue),
6701  _overflow_stack(overflow_stack),
6702  _finger(finger),
6703  _global_finger_addr(global_finger_addr),
6704  _parent(parent)
6705{ }
6706
6707// Assumes thread-safe access by callers, who are
6708// responsible for mutual exclusion.
6709void CMSCollector::lower_restart_addr(HeapWord* low) {
6710  assert(_span.contains(low), "Out of bounds addr");
6711  if (_restart_addr == NULL) {
6712    _restart_addr = low;
6713  } else {
6714    _restart_addr = MIN2(_restart_addr, low);
6715  }
6716}
6717
6718// Upon stack overflow, we discard (part of) the stack,
6719// remembering the least address amongst those discarded
6720// in CMSCollector's _restart_address.
6721void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6722  // Remember the least grey address discarded
6723  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6724  _collector->lower_restart_addr(ra);
6725  _markStack->reset();  // discard stack contents
6726  _markStack->expand(); // expand the stack if possible
6727}
6728
6729// Upon stack overflow, we discard (part of) the stack,
6730// remembering the least address amongst those discarded
6731// in CMSCollector's _restart_address.
6732void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6733  // We need to do this under a mutex to prevent other
6734  // workers from interfering with the work done below.
6735  MutexLockerEx ml(_overflow_stack->par_lock(),
6736                   Mutex::_no_safepoint_check_flag);
6737  // Remember the least grey address discarded
6738  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6739  _collector->lower_restart_addr(ra);
6740  _overflow_stack->reset();  // discard stack contents
6741  _overflow_stack->expand(); // expand the stack if possible
6742}
6743
6744void PushOrMarkClosure::do_oop(oop obj) {
6745  // Ignore mark word because we are running concurrent with mutators.
6746  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6747  HeapWord* addr = (HeapWord*)obj;
6748  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6749    // Oop lies in _span and isn't yet grey or black
6750    _bitMap->mark(addr);            // now grey
6751    if (addr < _finger) {
6752      // the bit map iteration has already either passed, or
6753      // sampled, this bit in the bit map; we'll need to
6754      // use the marking stack to scan this oop's oops.
6755      bool simulate_overflow = false;
6756      NOT_PRODUCT(
6757        if (CMSMarkStackOverflowALot &&
6758            _collector->simulate_overflow()) {
6759          // simulate a stack overflow
6760          simulate_overflow = true;
6761        }
6762      )
6763      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6764        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6765        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6766        handle_stack_overflow(addr);
6767      }
6768    }
6769    // anything including and to the right of _finger
6770    // will be scanned as we iterate over the remainder of the
6771    // bit map
6772    do_yield_check();
6773  }
6774}
6775
6776void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6777void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6778
6779void ParPushOrMarkClosure::do_oop(oop obj) {
6780  // Ignore mark word because we are running concurrent with mutators.
6781  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6782  HeapWord* addr = (HeapWord*)obj;
6783  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6784    // Oop lies in _span and isn't yet grey or black
6785    // We read the global_finger (volatile read) strictly after marking oop
6786    bool res = _bit_map->par_mark(addr);    // now grey
6787    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6788    // Should we push this marked oop on our stack?
6789    // -- if someone else marked it, nothing to do
6790    // -- if target oop is above global finger nothing to do
6791    // -- if target oop is in chunk and above local finger
6792    //      then nothing to do
6793    // -- else push on work queue
6794    if (   !res       // someone else marked it, they will deal with it
6795        || (addr >= *gfa)  // will be scanned in a later task
6796        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6797      return;
6798    }
6799    // the bit map iteration has already either passed, or
6800    // sampled, this bit in the bit map; we'll need to
6801    // use the marking stack to scan this oop's oops.
6802    bool simulate_overflow = false;
6803    NOT_PRODUCT(
6804      if (CMSMarkStackOverflowALot &&
6805          _collector->simulate_overflow()) {
6806        // simulate a stack overflow
6807        simulate_overflow = true;
6808      }
6809    )
6810    if (simulate_overflow ||
6811        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6812      // stack overflow
6813      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6814      // We cannot assert that the overflow stack is full because
6815      // it may have been emptied since.
6816      assert(simulate_overflow ||
6817             _work_queue->size() == _work_queue->max_elems(),
6818            "Else push should have succeeded");
6819      handle_stack_overflow(addr);
6820    }
6821    do_yield_check();
6822  }
6823}
6824
6825void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6826void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6827
6828PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6829                                       MemRegion span,
6830                                       ReferenceProcessor* rp,
6831                                       CMSBitMap* bit_map,
6832                                       CMSBitMap* mod_union_table,
6833                                       CMSMarkStack*  mark_stack,
6834                                       bool           concurrent_precleaning):
6835  MetadataAwareOopClosure(rp),
6836  _collector(collector),
6837  _span(span),
6838  _bit_map(bit_map),
6839  _mod_union_table(mod_union_table),
6840  _mark_stack(mark_stack),
6841  _concurrent_precleaning(concurrent_precleaning)
6842{
6843  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6844}
6845
6846// Grey object rescan during pre-cleaning and second checkpoint phases --
6847// the non-parallel version (the parallel version appears further below.)
6848void PushAndMarkClosure::do_oop(oop obj) {
6849  // Ignore mark word verification. If during concurrent precleaning,
6850  // the object monitor may be locked. If during the checkpoint
6851  // phases, the object may already have been reached by a  different
6852  // path and may be at the end of the global overflow list (so
6853  // the mark word may be NULL).
6854  assert(obj->is_oop_or_null(true /* ignore mark word */),
6855         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6856  HeapWord* addr = (HeapWord*)obj;
6857  // Check if oop points into the CMS generation
6858  // and is not marked
6859  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6860    // a white object ...
6861    _bit_map->mark(addr);         // ... now grey
6862    // push on the marking stack (grey set)
6863    bool simulate_overflow = false;
6864    NOT_PRODUCT(
6865      if (CMSMarkStackOverflowALot &&
6866          _collector->simulate_overflow()) {
6867        // simulate a stack overflow
6868        simulate_overflow = true;
6869      }
6870    )
6871    if (simulate_overflow || !_mark_stack->push(obj)) {
6872      if (_concurrent_precleaning) {
6873         // During precleaning we can just dirty the appropriate card(s)
6874         // in the mod union table, thus ensuring that the object remains
6875         // in the grey set  and continue. In the case of object arrays
6876         // we need to dirty all of the cards that the object spans,
6877         // since the rescan of object arrays will be limited to the
6878         // dirty cards.
6879         // Note that no one can be interfering with us in this action
6880         // of dirtying the mod union table, so no locking or atomics
6881         // are required.
6882         if (obj->is_objArray()) {
6883           size_t sz = obj->size();
6884           HeapWord* end_card_addr = (HeapWord*)round_to(
6885                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6886           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6887           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6888           _mod_union_table->mark_range(redirty_range);
6889         } else {
6890           _mod_union_table->mark(addr);
6891         }
6892         _collector->_ser_pmc_preclean_ovflw++;
6893      } else {
6894         // During the remark phase, we need to remember this oop
6895         // in the overflow list.
6896         _collector->push_on_overflow_list(obj);
6897         _collector->_ser_pmc_remark_ovflw++;
6898      }
6899    }
6900  }
6901}
6902
6903ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6904                                             MemRegion span,
6905                                             ReferenceProcessor* rp,
6906                                             CMSBitMap* bit_map,
6907                                             OopTaskQueue* work_queue):
6908  MetadataAwareOopClosure(rp),
6909  _collector(collector),
6910  _span(span),
6911  _bit_map(bit_map),
6912  _work_queue(work_queue)
6913{
6914  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6915}
6916
6917void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6918void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6919
6920// Grey object rescan during second checkpoint phase --
6921// the parallel version.
6922void ParPushAndMarkClosure::do_oop(oop obj) {
6923  // In the assert below, we ignore the mark word because
6924  // this oop may point to an already visited object that is
6925  // on the overflow stack (in which case the mark word has
6926  // been hijacked for chaining into the overflow stack --
6927  // if this is the last object in the overflow stack then
6928  // its mark word will be NULL). Because this object may
6929  // have been subsequently popped off the global overflow
6930  // stack, and the mark word possibly restored to the prototypical
6931  // value, by the time we get to examined this failing assert in
6932  // the debugger, is_oop_or_null(false) may subsequently start
6933  // to hold.
6934  assert(obj->is_oop_or_null(true),
6935         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6936  HeapWord* addr = (HeapWord*)obj;
6937  // Check if oop points into the CMS generation
6938  // and is not marked
6939  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6940    // a white object ...
6941    // If we manage to "claim" the object, by being the
6942    // first thread to mark it, then we push it on our
6943    // marking stack
6944    if (_bit_map->par_mark(addr)) {     // ... now grey
6945      // push on work queue (grey set)
6946      bool simulate_overflow = false;
6947      NOT_PRODUCT(
6948        if (CMSMarkStackOverflowALot &&
6949            _collector->par_simulate_overflow()) {
6950          // simulate a stack overflow
6951          simulate_overflow = true;
6952        }
6953      )
6954      if (simulate_overflow || !_work_queue->push(obj)) {
6955        _collector->par_push_on_overflow_list(obj);
6956        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6957      }
6958    } // Else, some other thread got there first
6959  }
6960}
6961
6962void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6963void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6964
6965void CMSPrecleanRefsYieldClosure::do_yield_work() {
6966  Mutex* bml = _collector->bitMapLock();
6967  assert_lock_strong(bml);
6968  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6969         "CMS thread should hold CMS token");
6970
6971  bml->unlock();
6972  ConcurrentMarkSweepThread::desynchronize(true);
6973
6974  _collector->stopTimer();
6975  _collector->incrementYields();
6976
6977  // See the comment in coordinator_yield()
6978  for (unsigned i = 0; i < CMSYieldSleepCount &&
6979                       ConcurrentMarkSweepThread::should_yield() &&
6980                       !CMSCollector::foregroundGCIsActive(); ++i) {
6981    os::sleep(Thread::current(), 1, false);
6982  }
6983
6984  ConcurrentMarkSweepThread::synchronize(true);
6985  bml->lock();
6986
6987  _collector->startTimer();
6988}
6989
6990bool CMSPrecleanRefsYieldClosure::should_return() {
6991  if (ConcurrentMarkSweepThread::should_yield()) {
6992    do_yield_work();
6993  }
6994  return _collector->foregroundGCIsActive();
6995}
6996
6997void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
6998  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
6999         "mr should be aligned to start at a card boundary");
7000  // We'd like to assert:
7001  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7002  //        "mr should be a range of cards");
7003  // However, that would be too strong in one case -- the last
7004  // partition ends at _unallocated_block which, in general, can be
7005  // an arbitrary boundary, not necessarily card aligned.
7006  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7007  _space->object_iterate_mem(mr, &_scan_cl);
7008}
7009
7010SweepClosure::SweepClosure(CMSCollector* collector,
7011                           ConcurrentMarkSweepGeneration* g,
7012                           CMSBitMap* bitMap, bool should_yield) :
7013  _collector(collector),
7014  _g(g),
7015  _sp(g->cmsSpace()),
7016  _limit(_sp->sweep_limit()),
7017  _freelistLock(_sp->freelistLock()),
7018  _bitMap(bitMap),
7019  _yield(should_yield),
7020  _inFreeRange(false),           // No free range at beginning of sweep
7021  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7022  _lastFreeRangeCoalesced(false),
7023  _freeFinger(g->used_region().start())
7024{
7025  NOT_PRODUCT(
7026    _numObjectsFreed = 0;
7027    _numWordsFreed   = 0;
7028    _numObjectsLive = 0;
7029    _numWordsLive = 0;
7030    _numObjectsAlreadyFree = 0;
7031    _numWordsAlreadyFree = 0;
7032    _last_fc = NULL;
7033
7034    _sp->initializeIndexedFreeListArrayReturnedBytes();
7035    _sp->dictionary()->initialize_dict_returned_bytes();
7036  )
7037  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7038         "sweep _limit out of bounds");
7039  log_develop_trace(gc, sweep)("====================");
7040  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7041}
7042
7043void SweepClosure::print_on(outputStream* st) const {
7044  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7045                p2i(_sp->bottom()), p2i(_sp->end()));
7046  tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7047  tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7048  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7049  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7050                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7051}
7052
7053#ifndef PRODUCT
7054// Assertion checking only:  no useful work in product mode --
7055// however, if any of the flags below become product flags,
7056// you may need to review this code to see if it needs to be
7057// enabled in product mode.
7058SweepClosure::~SweepClosure() {
7059  assert_lock_strong(_freelistLock);
7060  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7061         "sweep _limit out of bounds");
7062  if (inFreeRange()) {
7063    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7064    print();
7065    ShouldNotReachHere();
7066  }
7067
7068  if (log_is_enabled(Debug, gc, sweep)) {
7069    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7070                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7071    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7072                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7073    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7074    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7075  }
7076
7077  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7078    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7079    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7080    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7081    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7082                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7083  }
7084  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7085  log_develop_trace(gc, sweep)("================");
7086}
7087#endif  // PRODUCT
7088
7089void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7090    bool freeRangeInFreeLists) {
7091  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7092                               p2i(freeFinger), freeRangeInFreeLists);
7093  assert(!inFreeRange(), "Trampling existing free range");
7094  set_inFreeRange(true);
7095  set_lastFreeRangeCoalesced(false);
7096
7097  set_freeFinger(freeFinger);
7098  set_freeRangeInFreeLists(freeRangeInFreeLists);
7099  if (CMSTestInFreeList) {
7100    if (freeRangeInFreeLists) {
7101      FreeChunk* fc = (FreeChunk*) freeFinger;
7102      assert(fc->is_free(), "A chunk on the free list should be free.");
7103      assert(fc->size() > 0, "Free range should have a size");
7104      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7105    }
7106  }
7107}
7108
7109// Note that the sweeper runs concurrently with mutators. Thus,
7110// it is possible for direct allocation in this generation to happen
7111// in the middle of the sweep. Note that the sweeper also coalesces
7112// contiguous free blocks. Thus, unless the sweeper and the allocator
7113// synchronize appropriately freshly allocated blocks may get swept up.
7114// This is accomplished by the sweeper locking the free lists while
7115// it is sweeping. Thus blocks that are determined to be free are
7116// indeed free. There is however one additional complication:
7117// blocks that have been allocated since the final checkpoint and
7118// mark, will not have been marked and so would be treated as
7119// unreachable and swept up. To prevent this, the allocator marks
7120// the bit map when allocating during the sweep phase. This leads,
7121// however, to a further complication -- objects may have been allocated
7122// but not yet initialized -- in the sense that the header isn't yet
7123// installed. The sweeper can not then determine the size of the block
7124// in order to skip over it. To deal with this case, we use a technique
7125// (due to Printezis) to encode such uninitialized block sizes in the
7126// bit map. Since the bit map uses a bit per every HeapWord, but the
7127// CMS generation has a minimum object size of 3 HeapWords, it follows
7128// that "normal marks" won't be adjacent in the bit map (there will
7129// always be at least two 0 bits between successive 1 bits). We make use
7130// of these "unused" bits to represent uninitialized blocks -- the bit
7131// corresponding to the start of the uninitialized object and the next
7132// bit are both set. Finally, a 1 bit marks the end of the object that
7133// started with the two consecutive 1 bits to indicate its potentially
7134// uninitialized state.
7135
7136size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7137  FreeChunk* fc = (FreeChunk*)addr;
7138  size_t res;
7139
7140  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7141  // than "addr == _limit" because although _limit was a block boundary when
7142  // we started the sweep, it may no longer be one because heap expansion
7143  // may have caused us to coalesce the block ending at the address _limit
7144  // with a newly expanded chunk (this happens when _limit was set to the
7145  // previous _end of the space), so we may have stepped past _limit:
7146  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7147  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7148    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7149           "sweep _limit out of bounds");
7150    assert(addr < _sp->end(), "addr out of bounds");
7151    // Flush any free range we might be holding as a single
7152    // coalesced chunk to the appropriate free list.
7153    if (inFreeRange()) {
7154      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7155             "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7156      flush_cur_free_chunk(freeFinger(),
7157                           pointer_delta(addr, freeFinger()));
7158      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7159                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7160                                   lastFreeRangeCoalesced() ? 1 : 0);
7161    }
7162
7163    // help the iterator loop finish
7164    return pointer_delta(_sp->end(), addr);
7165  }
7166
7167  assert(addr < _limit, "sweep invariant");
7168  // check if we should yield
7169  do_yield_check(addr);
7170  if (fc->is_free()) {
7171    // Chunk that is already free
7172    res = fc->size();
7173    do_already_free_chunk(fc);
7174    debug_only(_sp->verifyFreeLists());
7175    // If we flush the chunk at hand in lookahead_and_flush()
7176    // and it's coalesced with a preceding chunk, then the
7177    // process of "mangling" the payload of the coalesced block
7178    // will cause erasure of the size information from the
7179    // (erstwhile) header of all the coalesced blocks but the
7180    // first, so the first disjunct in the assert will not hold
7181    // in that specific case (in which case the second disjunct
7182    // will hold).
7183    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7184           "Otherwise the size info doesn't change at this step");
7185    NOT_PRODUCT(
7186      _numObjectsAlreadyFree++;
7187      _numWordsAlreadyFree += res;
7188    )
7189    NOT_PRODUCT(_last_fc = fc;)
7190  } else if (!_bitMap->isMarked(addr)) {
7191    // Chunk is fresh garbage
7192    res = do_garbage_chunk(fc);
7193    debug_only(_sp->verifyFreeLists());
7194    NOT_PRODUCT(
7195      _numObjectsFreed++;
7196      _numWordsFreed += res;
7197    )
7198  } else {
7199    // Chunk that is alive.
7200    res = do_live_chunk(fc);
7201    debug_only(_sp->verifyFreeLists());
7202    NOT_PRODUCT(
7203        _numObjectsLive++;
7204        _numWordsLive += res;
7205    )
7206  }
7207  return res;
7208}
7209
7210// For the smart allocation, record following
7211//  split deaths - a free chunk is removed from its free list because
7212//      it is being split into two or more chunks.
7213//  split birth - a free chunk is being added to its free list because
7214//      a larger free chunk has been split and resulted in this free chunk.
7215//  coal death - a free chunk is being removed from its free list because
7216//      it is being coalesced into a large free chunk.
7217//  coal birth - a free chunk is being added to its free list because
7218//      it was created when two or more free chunks where coalesced into
7219//      this free chunk.
7220//
7221// These statistics are used to determine the desired number of free
7222// chunks of a given size.  The desired number is chosen to be relative
7223// to the end of a CMS sweep.  The desired number at the end of a sweep
7224// is the
7225//      count-at-end-of-previous-sweep (an amount that was enough)
7226//              - count-at-beginning-of-current-sweep  (the excess)
7227//              + split-births  (gains in this size during interval)
7228//              - split-deaths  (demands on this size during interval)
7229// where the interval is from the end of one sweep to the end of the
7230// next.
7231//
7232// When sweeping the sweeper maintains an accumulated chunk which is
7233// the chunk that is made up of chunks that have been coalesced.  That
7234// will be termed the left-hand chunk.  A new chunk of garbage that
7235// is being considered for coalescing will be referred to as the
7236// right-hand chunk.
7237//
7238// When making a decision on whether to coalesce a right-hand chunk with
7239// the current left-hand chunk, the current count vs. the desired count
7240// of the left-hand chunk is considered.  Also if the right-hand chunk
7241// is near the large chunk at the end of the heap (see
7242// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7243// left-hand chunk is coalesced.
7244//
7245// When making a decision about whether to split a chunk, the desired count
7246// vs. the current count of the candidate to be split is also considered.
7247// If the candidate is underpopulated (currently fewer chunks than desired)
7248// a chunk of an overpopulated (currently more chunks than desired) size may
7249// be chosen.  The "hint" associated with a free list, if non-null, points
7250// to a free list which may be overpopulated.
7251//
7252
7253void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7254  const size_t size = fc->size();
7255  // Chunks that cannot be coalesced are not in the
7256  // free lists.
7257  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7258    assert(_sp->verify_chunk_in_free_list(fc),
7259           "free chunk should be in free lists");
7260  }
7261  // a chunk that is already free, should not have been
7262  // marked in the bit map
7263  HeapWord* const addr = (HeapWord*) fc;
7264  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7265  // Verify that the bit map has no bits marked between
7266  // addr and purported end of this block.
7267  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7268
7269  // Some chunks cannot be coalesced under any circumstances.
7270  // See the definition of cantCoalesce().
7271  if (!fc->cantCoalesce()) {
7272    // This chunk can potentially be coalesced.
7273    // All the work is done in
7274    do_post_free_or_garbage_chunk(fc, size);
7275    // Note that if the chunk is not coalescable (the else arm
7276    // below), we unconditionally flush, without needing to do
7277    // a "lookahead," as we do below.
7278    if (inFreeRange()) lookahead_and_flush(fc, size);
7279  } else {
7280    // Code path common to both original and adaptive free lists.
7281
7282    // cant coalesce with previous block; this should be treated
7283    // as the end of a free run if any
7284    if (inFreeRange()) {
7285      // we kicked some butt; time to pick up the garbage
7286      assert(freeFinger() < addr, "freeFinger points too high");
7287      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7288    }
7289    // else, nothing to do, just continue
7290  }
7291}
7292
7293size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7294  // This is a chunk of garbage.  It is not in any free list.
7295  // Add it to a free list or let it possibly be coalesced into
7296  // a larger chunk.
7297  HeapWord* const addr = (HeapWord*) fc;
7298  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7299
7300  // Verify that the bit map has no bits marked between
7301  // addr and purported end of just dead object.
7302  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7303  do_post_free_or_garbage_chunk(fc, size);
7304
7305  assert(_limit >= addr + size,
7306         "A freshly garbage chunk can't possibly straddle over _limit");
7307  if (inFreeRange()) lookahead_and_flush(fc, size);
7308  return size;
7309}
7310
7311size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7312  HeapWord* addr = (HeapWord*) fc;
7313  // The sweeper has just found a live object. Return any accumulated
7314  // left hand chunk to the free lists.
7315  if (inFreeRange()) {
7316    assert(freeFinger() < addr, "freeFinger points too high");
7317    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7318  }
7319
7320  // This object is live: we'd normally expect this to be
7321  // an oop, and like to assert the following:
7322  // assert(oop(addr)->is_oop(), "live block should be an oop");
7323  // However, as we commented above, this may be an object whose
7324  // header hasn't yet been initialized.
7325  size_t size;
7326  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7327  if (_bitMap->isMarked(addr + 1)) {
7328    // Determine the size from the bit map, rather than trying to
7329    // compute it from the object header.
7330    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7331    size = pointer_delta(nextOneAddr + 1, addr);
7332    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7333           "alignment problem");
7334
7335#ifdef ASSERT
7336      if (oop(addr)->klass_or_null() != NULL) {
7337        // Ignore mark word because we are running concurrent with mutators
7338        assert(oop(addr)->is_oop(true), "live block should be an oop");
7339        assert(size ==
7340               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7341               "P-mark and computed size do not agree");
7342      }
7343#endif
7344
7345  } else {
7346    // This should be an initialized object that's alive.
7347    assert(oop(addr)->klass_or_null() != NULL,
7348           "Should be an initialized object");
7349    // Ignore mark word because we are running concurrent with mutators
7350    assert(oop(addr)->is_oop(true), "live block should be an oop");
7351    // Verify that the bit map has no bits marked between
7352    // addr and purported end of this block.
7353    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7354    assert(size >= 3, "Necessary for Printezis marks to work");
7355    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7356    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7357  }
7358  return size;
7359}
7360
7361void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7362                                                 size_t chunkSize) {
7363  // do_post_free_or_garbage_chunk() should only be called in the case
7364  // of the adaptive free list allocator.
7365  const bool fcInFreeLists = fc->is_free();
7366  assert((HeapWord*)fc <= _limit, "sweep invariant");
7367  if (CMSTestInFreeList && fcInFreeLists) {
7368    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7369  }
7370
7371  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7372
7373  HeapWord* const fc_addr = (HeapWord*) fc;
7374
7375  bool coalesce = false;
7376  const size_t left  = pointer_delta(fc_addr, freeFinger());
7377  const size_t right = chunkSize;
7378  switch (FLSCoalescePolicy) {
7379    // numeric value forms a coalition aggressiveness metric
7380    case 0:  { // never coalesce
7381      coalesce = false;
7382      break;
7383    }
7384    case 1: { // coalesce if left & right chunks on overpopulated lists
7385      coalesce = _sp->coalOverPopulated(left) &&
7386                 _sp->coalOverPopulated(right);
7387      break;
7388    }
7389    case 2: { // coalesce if left chunk on overpopulated list (default)
7390      coalesce = _sp->coalOverPopulated(left);
7391      break;
7392    }
7393    case 3: { // coalesce if left OR right chunk on overpopulated list
7394      coalesce = _sp->coalOverPopulated(left) ||
7395                 _sp->coalOverPopulated(right);
7396      break;
7397    }
7398    case 4: { // always coalesce
7399      coalesce = true;
7400      break;
7401    }
7402    default:
7403     ShouldNotReachHere();
7404  }
7405
7406  // Should the current free range be coalesced?
7407  // If the chunk is in a free range and either we decided to coalesce above
7408  // or the chunk is near the large block at the end of the heap
7409  // (isNearLargestChunk() returns true), then coalesce this chunk.
7410  const bool doCoalesce = inFreeRange()
7411                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7412  if (doCoalesce) {
7413    // Coalesce the current free range on the left with the new
7414    // chunk on the right.  If either is on a free list,
7415    // it must be removed from the list and stashed in the closure.
7416    if (freeRangeInFreeLists()) {
7417      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7418      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7419             "Size of free range is inconsistent with chunk size.");
7420      if (CMSTestInFreeList) {
7421        assert(_sp->verify_chunk_in_free_list(ffc),
7422               "Chunk is not in free lists");
7423      }
7424      _sp->coalDeath(ffc->size());
7425      _sp->removeFreeChunkFromFreeLists(ffc);
7426      set_freeRangeInFreeLists(false);
7427    }
7428    if (fcInFreeLists) {
7429      _sp->coalDeath(chunkSize);
7430      assert(fc->size() == chunkSize,
7431        "The chunk has the wrong size or is not in the free lists");
7432      _sp->removeFreeChunkFromFreeLists(fc);
7433    }
7434    set_lastFreeRangeCoalesced(true);
7435    print_free_block_coalesced(fc);
7436  } else {  // not in a free range and/or should not coalesce
7437    // Return the current free range and start a new one.
7438    if (inFreeRange()) {
7439      // In a free range but cannot coalesce with the right hand chunk.
7440      // Put the current free range into the free lists.
7441      flush_cur_free_chunk(freeFinger(),
7442                           pointer_delta(fc_addr, freeFinger()));
7443    }
7444    // Set up for new free range.  Pass along whether the right hand
7445    // chunk is in the free lists.
7446    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7447  }
7448}
7449
7450// Lookahead flush:
7451// If we are tracking a free range, and this is the last chunk that
7452// we'll look at because its end crosses past _limit, we'll preemptively
7453// flush it along with any free range we may be holding on to. Note that
7454// this can be the case only for an already free or freshly garbage
7455// chunk. If this block is an object, it can never straddle
7456// over _limit. The "straddling" occurs when _limit is set at
7457// the previous end of the space when this cycle started, and
7458// a subsequent heap expansion caused the previously co-terminal
7459// free block to be coalesced with the newly expanded portion,
7460// thus rendering _limit a non-block-boundary making it dangerous
7461// for the sweeper to step over and examine.
7462void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7463  assert(inFreeRange(), "Should only be called if currently in a free range.");
7464  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7465  assert(_sp->used_region().contains(eob - 1),
7466         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7467         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7468         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7469         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7470  if (eob >= _limit) {
7471    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7472    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7473                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7474                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
7475                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7476    // Return the storage we are tracking back into the free lists.
7477    log_develop_trace(gc, sweep)("Flushing ... ");
7478    assert(freeFinger() < eob, "Error");
7479    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7480  }
7481}
7482
7483void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7484  assert(inFreeRange(), "Should only be called if currently in a free range.");
7485  assert(size > 0,
7486    "A zero sized chunk cannot be added to the free lists.");
7487  if (!freeRangeInFreeLists()) {
7488    if (CMSTestInFreeList) {
7489      FreeChunk* fc = (FreeChunk*) chunk;
7490      fc->set_size(size);
7491      assert(!_sp->verify_chunk_in_free_list(fc),
7492             "chunk should not be in free lists yet");
7493    }
7494    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7495    // A new free range is going to be starting.  The current
7496    // free range has not been added to the free lists yet or
7497    // was removed so add it back.
7498    // If the current free range was coalesced, then the death
7499    // of the free range was recorded.  Record a birth now.
7500    if (lastFreeRangeCoalesced()) {
7501      _sp->coalBirth(size);
7502    }
7503    _sp->addChunkAndRepairOffsetTable(chunk, size,
7504            lastFreeRangeCoalesced());
7505  } else {
7506    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7507  }
7508  set_inFreeRange(false);
7509  set_freeRangeInFreeLists(false);
7510}
7511
7512// We take a break if we've been at this for a while,
7513// so as to avoid monopolizing the locks involved.
7514void SweepClosure::do_yield_work(HeapWord* addr) {
7515  // Return current free chunk being used for coalescing (if any)
7516  // to the appropriate freelist.  After yielding, the next
7517  // free block encountered will start a coalescing range of
7518  // free blocks.  If the next free block is adjacent to the
7519  // chunk just flushed, they will need to wait for the next
7520  // sweep to be coalesced.
7521  if (inFreeRange()) {
7522    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7523  }
7524
7525  // First give up the locks, then yield, then re-lock.
7526  // We should probably use a constructor/destructor idiom to
7527  // do this unlock/lock or modify the MutexUnlocker class to
7528  // serve our purpose. XXX
7529  assert_lock_strong(_bitMap->lock());
7530  assert_lock_strong(_freelistLock);
7531  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7532         "CMS thread should hold CMS token");
7533  _bitMap->lock()->unlock();
7534  _freelistLock->unlock();
7535  ConcurrentMarkSweepThread::desynchronize(true);
7536  _collector->stopTimer();
7537  _collector->incrementYields();
7538
7539  // See the comment in coordinator_yield()
7540  for (unsigned i = 0; i < CMSYieldSleepCount &&
7541                       ConcurrentMarkSweepThread::should_yield() &&
7542                       !CMSCollector::foregroundGCIsActive(); ++i) {
7543    os::sleep(Thread::current(), 1, false);
7544  }
7545
7546  ConcurrentMarkSweepThread::synchronize(true);
7547  _freelistLock->lock();
7548  _bitMap->lock()->lock_without_safepoint_check();
7549  _collector->startTimer();
7550}
7551
7552#ifndef PRODUCT
7553// This is actually very useful in a product build if it can
7554// be called from the debugger.  Compile it into the product
7555// as needed.
7556bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7557  return debug_cms_space->verify_chunk_in_free_list(fc);
7558}
7559#endif
7560
7561void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7562  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7563                               p2i(fc), fc->size());
7564}
7565
7566// CMSIsAliveClosure
7567bool CMSIsAliveClosure::do_object_b(oop obj) {
7568  HeapWord* addr = (HeapWord*)obj;
7569  return addr != NULL &&
7570         (!_span.contains(addr) || _bit_map->isMarked(addr));
7571}
7572
7573
7574CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7575                      MemRegion span,
7576                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7577                      bool cpc):
7578  _collector(collector),
7579  _span(span),
7580  _bit_map(bit_map),
7581  _mark_stack(mark_stack),
7582  _concurrent_precleaning(cpc) {
7583  assert(!_span.is_empty(), "Empty span could spell trouble");
7584}
7585
7586
7587// CMSKeepAliveClosure: the serial version
7588void CMSKeepAliveClosure::do_oop(oop obj) {
7589  HeapWord* addr = (HeapWord*)obj;
7590  if (_span.contains(addr) &&
7591      !_bit_map->isMarked(addr)) {
7592    _bit_map->mark(addr);
7593    bool simulate_overflow = false;
7594    NOT_PRODUCT(
7595      if (CMSMarkStackOverflowALot &&
7596          _collector->simulate_overflow()) {
7597        // simulate a stack overflow
7598        simulate_overflow = true;
7599      }
7600    )
7601    if (simulate_overflow || !_mark_stack->push(obj)) {
7602      if (_concurrent_precleaning) {
7603        // We dirty the overflown object and let the remark
7604        // phase deal with it.
7605        assert(_collector->overflow_list_is_empty(), "Error");
7606        // In the case of object arrays, we need to dirty all of
7607        // the cards that the object spans. No locking or atomics
7608        // are needed since no one else can be mutating the mod union
7609        // table.
7610        if (obj->is_objArray()) {
7611          size_t sz = obj->size();
7612          HeapWord* end_card_addr =
7613            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7614          MemRegion redirty_range = MemRegion(addr, end_card_addr);
7615          assert(!redirty_range.is_empty(), "Arithmetical tautology");
7616          _collector->_modUnionTable.mark_range(redirty_range);
7617        } else {
7618          _collector->_modUnionTable.mark(addr);
7619        }
7620        _collector->_ser_kac_preclean_ovflw++;
7621      } else {
7622        _collector->push_on_overflow_list(obj);
7623        _collector->_ser_kac_ovflw++;
7624      }
7625    }
7626  }
7627}
7628
7629void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7630void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7631
7632// CMSParKeepAliveClosure: a parallel version of the above.
7633// The work queues are private to each closure (thread),
7634// but (may be) available for stealing by other threads.
7635void CMSParKeepAliveClosure::do_oop(oop obj) {
7636  HeapWord* addr = (HeapWord*)obj;
7637  if (_span.contains(addr) &&
7638      !_bit_map->isMarked(addr)) {
7639    // In general, during recursive tracing, several threads
7640    // may be concurrently getting here; the first one to
7641    // "tag" it, claims it.
7642    if (_bit_map->par_mark(addr)) {
7643      bool res = _work_queue->push(obj);
7644      assert(res, "Low water mark should be much less than capacity");
7645      // Do a recursive trim in the hope that this will keep
7646      // stack usage lower, but leave some oops for potential stealers
7647      trim_queue(_low_water_mark);
7648    } // Else, another thread got there first
7649  }
7650}
7651
7652void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7653void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7654
7655void CMSParKeepAliveClosure::trim_queue(uint max) {
7656  while (_work_queue->size() > max) {
7657    oop new_oop;
7658    if (_work_queue->pop_local(new_oop)) {
7659      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7660      assert(_bit_map->isMarked((HeapWord*)new_oop),
7661             "no white objects on this stack!");
7662      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7663      // iterate over the oops in this oop, marking and pushing
7664      // the ones in CMS heap (i.e. in _span).
7665      new_oop->oop_iterate(&_mark_and_push);
7666    }
7667  }
7668}
7669
7670CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7671                                CMSCollector* collector,
7672                                MemRegion span, CMSBitMap* bit_map,
7673                                OopTaskQueue* work_queue):
7674  _collector(collector),
7675  _span(span),
7676  _bit_map(bit_map),
7677  _work_queue(work_queue) { }
7678
7679void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7680  HeapWord* addr = (HeapWord*)obj;
7681  if (_span.contains(addr) &&
7682      !_bit_map->isMarked(addr)) {
7683    if (_bit_map->par_mark(addr)) {
7684      bool simulate_overflow = false;
7685      NOT_PRODUCT(
7686        if (CMSMarkStackOverflowALot &&
7687            _collector->par_simulate_overflow()) {
7688          // simulate a stack overflow
7689          simulate_overflow = true;
7690        }
7691      )
7692      if (simulate_overflow || !_work_queue->push(obj)) {
7693        _collector->par_push_on_overflow_list(obj);
7694        _collector->_par_kac_ovflw++;
7695      }
7696    } // Else another thread got there already
7697  }
7698}
7699
7700void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7701void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7702
7703//////////////////////////////////////////////////////////////////
7704//  CMSExpansionCause                /////////////////////////////
7705//////////////////////////////////////////////////////////////////
7706const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7707  switch (cause) {
7708    case _no_expansion:
7709      return "No expansion";
7710    case _satisfy_free_ratio:
7711      return "Free ratio";
7712    case _satisfy_promotion:
7713      return "Satisfy promotion";
7714    case _satisfy_allocation:
7715      return "allocation";
7716    case _allocate_par_lab:
7717      return "Par LAB";
7718    case _allocate_par_spooling_space:
7719      return "Par Spooling Space";
7720    case _adaptive_size_policy:
7721      return "Ergonomics";
7722    default:
7723      return "unknown";
7724  }
7725}
7726
7727void CMSDrainMarkingStackClosure::do_void() {
7728  // the max number to take from overflow list at a time
7729  const size_t num = _mark_stack->capacity()/4;
7730  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7731         "Overflow list should be NULL during concurrent phases");
7732  while (!_mark_stack->isEmpty() ||
7733         // if stack is empty, check the overflow list
7734         _collector->take_from_overflow_list(num, _mark_stack)) {
7735    oop obj = _mark_stack->pop();
7736    HeapWord* addr = (HeapWord*)obj;
7737    assert(_span.contains(addr), "Should be within span");
7738    assert(_bit_map->isMarked(addr), "Should be marked");
7739    assert(obj->is_oop(), "Should be an oop");
7740    obj->oop_iterate(_keep_alive);
7741  }
7742}
7743
7744void CMSParDrainMarkingStackClosure::do_void() {
7745  // drain queue
7746  trim_queue(0);
7747}
7748
7749// Trim our work_queue so its length is below max at return
7750void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7751  while (_work_queue->size() > max) {
7752    oop new_oop;
7753    if (_work_queue->pop_local(new_oop)) {
7754      assert(new_oop->is_oop(), "Expected an oop");
7755      assert(_bit_map->isMarked((HeapWord*)new_oop),
7756             "no white objects on this stack!");
7757      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7758      // iterate over the oops in this oop, marking and pushing
7759      // the ones in CMS heap (i.e. in _span).
7760      new_oop->oop_iterate(&_mark_and_push);
7761    }
7762  }
7763}
7764
7765////////////////////////////////////////////////////////////////////
7766// Support for Marking Stack Overflow list handling and related code
7767////////////////////////////////////////////////////////////////////
7768// Much of the following code is similar in shape and spirit to the
7769// code used in ParNewGC. We should try and share that code
7770// as much as possible in the future.
7771
7772#ifndef PRODUCT
7773// Debugging support for CMSStackOverflowALot
7774
7775// It's OK to call this multi-threaded;  the worst thing
7776// that can happen is that we'll get a bunch of closely
7777// spaced simulated overflows, but that's OK, in fact
7778// probably good as it would exercise the overflow code
7779// under contention.
7780bool CMSCollector::simulate_overflow() {
7781  if (_overflow_counter-- <= 0) { // just being defensive
7782    _overflow_counter = CMSMarkStackOverflowInterval;
7783    return true;
7784  } else {
7785    return false;
7786  }
7787}
7788
7789bool CMSCollector::par_simulate_overflow() {
7790  return simulate_overflow();
7791}
7792#endif
7793
7794// Single-threaded
7795bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7796  assert(stack->isEmpty(), "Expected precondition");
7797  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7798  size_t i = num;
7799  oop  cur = _overflow_list;
7800  const markOop proto = markOopDesc::prototype();
7801  NOT_PRODUCT(ssize_t n = 0;)
7802  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7803    next = oop(cur->mark());
7804    cur->set_mark(proto);   // until proven otherwise
7805    assert(cur->is_oop(), "Should be an oop");
7806    bool res = stack->push(cur);
7807    assert(res, "Bit off more than can chew?");
7808    NOT_PRODUCT(n++;)
7809  }
7810  _overflow_list = cur;
7811#ifndef PRODUCT
7812  assert(_num_par_pushes >= n, "Too many pops?");
7813  _num_par_pushes -=n;
7814#endif
7815  return !stack->isEmpty();
7816}
7817
7818#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7819// (MT-safe) Get a prefix of at most "num" from the list.
7820// The overflow list is chained through the mark word of
7821// each object in the list. We fetch the entire list,
7822// break off a prefix of the right size and return the
7823// remainder. If other threads try to take objects from
7824// the overflow list at that time, they will wait for
7825// some time to see if data becomes available. If (and
7826// only if) another thread places one or more object(s)
7827// on the global list before we have returned the suffix
7828// to the global list, we will walk down our local list
7829// to find its end and append the global list to
7830// our suffix before returning it. This suffix walk can
7831// prove to be expensive (quadratic in the amount of traffic)
7832// when there are many objects in the overflow list and
7833// there is much producer-consumer contention on the list.
7834// *NOTE*: The overflow list manipulation code here and
7835// in ParNewGeneration:: are very similar in shape,
7836// except that in the ParNew case we use the old (from/eden)
7837// copy of the object to thread the list via its klass word.
7838// Because of the common code, if you make any changes in
7839// the code below, please check the ParNew version to see if
7840// similar changes might be needed.
7841// CR 6797058 has been filed to consolidate the common code.
7842bool CMSCollector::par_take_from_overflow_list(size_t num,
7843                                               OopTaskQueue* work_q,
7844                                               int no_of_gc_threads) {
7845  assert(work_q->size() == 0, "First empty local work queue");
7846  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7847  if (_overflow_list == NULL) {
7848    return false;
7849  }
7850  // Grab the entire list; we'll put back a suffix
7851  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7852  Thread* tid = Thread::current();
7853  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7854  // set to ParallelGCThreads.
7855  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7856  size_t sleep_time_millis = MAX2((size_t)1, num/100);
7857  // If the list is busy, we spin for a short while,
7858  // sleeping between attempts to get the list.
7859  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7860    os::sleep(tid, sleep_time_millis, false);
7861    if (_overflow_list == NULL) {
7862      // Nothing left to take
7863      return false;
7864    } else if (_overflow_list != BUSY) {
7865      // Try and grab the prefix
7866      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7867    }
7868  }
7869  // If the list was found to be empty, or we spun long
7870  // enough, we give up and return empty-handed. If we leave
7871  // the list in the BUSY state below, it must be the case that
7872  // some other thread holds the overflow list and will set it
7873  // to a non-BUSY state in the future.
7874  if (prefix == NULL || prefix == BUSY) {
7875     // Nothing to take or waited long enough
7876     if (prefix == NULL) {
7877       // Write back the NULL in case we overwrote it with BUSY above
7878       // and it is still the same value.
7879       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7880     }
7881     return false;
7882  }
7883  assert(prefix != NULL && prefix != BUSY, "Error");
7884  size_t i = num;
7885  oop cur = prefix;
7886  // Walk down the first "num" objects, unless we reach the end.
7887  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7888  if (cur->mark() == NULL) {
7889    // We have "num" or fewer elements in the list, so there
7890    // is nothing to return to the global list.
7891    // Write back the NULL in lieu of the BUSY we wrote
7892    // above, if it is still the same value.
7893    if (_overflow_list == BUSY) {
7894      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7895    }
7896  } else {
7897    // Chop off the suffix and return it to the global list.
7898    assert(cur->mark() != BUSY, "Error");
7899    oop suffix_head = cur->mark(); // suffix will be put back on global list
7900    cur->set_mark(NULL);           // break off suffix
7901    // It's possible that the list is still in the empty(busy) state
7902    // we left it in a short while ago; in that case we may be
7903    // able to place back the suffix without incurring the cost
7904    // of a walk down the list.
7905    oop observed_overflow_list = _overflow_list;
7906    oop cur_overflow_list = observed_overflow_list;
7907    bool attached = false;
7908    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7909      observed_overflow_list =
7910        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7911      if (cur_overflow_list == observed_overflow_list) {
7912        attached = true;
7913        break;
7914      } else cur_overflow_list = observed_overflow_list;
7915    }
7916    if (!attached) {
7917      // Too bad, someone else sneaked in (at least) an element; we'll need
7918      // to do a splice. Find tail of suffix so we can prepend suffix to global
7919      // list.
7920      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7921      oop suffix_tail = cur;
7922      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7923             "Tautology");
7924      observed_overflow_list = _overflow_list;
7925      do {
7926        cur_overflow_list = observed_overflow_list;
7927        if (cur_overflow_list != BUSY) {
7928          // Do the splice ...
7929          suffix_tail->set_mark(markOop(cur_overflow_list));
7930        } else { // cur_overflow_list == BUSY
7931          suffix_tail->set_mark(NULL);
7932        }
7933        // ... and try to place spliced list back on overflow_list ...
7934        observed_overflow_list =
7935          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7936      } while (cur_overflow_list != observed_overflow_list);
7937      // ... until we have succeeded in doing so.
7938    }
7939  }
7940
7941  // Push the prefix elements on work_q
7942  assert(prefix != NULL, "control point invariant");
7943  const markOop proto = markOopDesc::prototype();
7944  oop next;
7945  NOT_PRODUCT(ssize_t n = 0;)
7946  for (cur = prefix; cur != NULL; cur = next) {
7947    next = oop(cur->mark());
7948    cur->set_mark(proto);   // until proven otherwise
7949    assert(cur->is_oop(), "Should be an oop");
7950    bool res = work_q->push(cur);
7951    assert(res, "Bit off more than we can chew?");
7952    NOT_PRODUCT(n++;)
7953  }
7954#ifndef PRODUCT
7955  assert(_num_par_pushes >= n, "Too many pops?");
7956  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7957#endif
7958  return true;
7959}
7960
7961// Single-threaded
7962void CMSCollector::push_on_overflow_list(oop p) {
7963  NOT_PRODUCT(_num_par_pushes++;)
7964  assert(p->is_oop(), "Not an oop");
7965  preserve_mark_if_necessary(p);
7966  p->set_mark((markOop)_overflow_list);
7967  _overflow_list = p;
7968}
7969
7970// Multi-threaded; use CAS to prepend to overflow list
7971void CMSCollector::par_push_on_overflow_list(oop p) {
7972  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7973  assert(p->is_oop(), "Not an oop");
7974  par_preserve_mark_if_necessary(p);
7975  oop observed_overflow_list = _overflow_list;
7976  oop cur_overflow_list;
7977  do {
7978    cur_overflow_list = observed_overflow_list;
7979    if (cur_overflow_list != BUSY) {
7980      p->set_mark(markOop(cur_overflow_list));
7981    } else {
7982      p->set_mark(NULL);
7983    }
7984    observed_overflow_list =
7985      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7986  } while (cur_overflow_list != observed_overflow_list);
7987}
7988#undef BUSY
7989
7990// Single threaded
7991// General Note on GrowableArray: pushes may silently fail
7992// because we are (temporarily) out of C-heap for expanding
7993// the stack. The problem is quite ubiquitous and affects
7994// a lot of code in the JVM. The prudent thing for GrowableArray
7995// to do (for now) is to exit with an error. However, that may
7996// be too draconian in some cases because the caller may be
7997// able to recover without much harm. For such cases, we
7998// should probably introduce a "soft_push" method which returns
7999// an indication of success or failure with the assumption that
8000// the caller may be able to recover from a failure; code in
8001// the VM can then be changed, incrementally, to deal with such
8002// failures where possible, thus, incrementally hardening the VM
8003// in such low resource situations.
8004void CMSCollector::preserve_mark_work(oop p, markOop m) {
8005  _preserved_oop_stack.push(p);
8006  _preserved_mark_stack.push(m);
8007  assert(m == p->mark(), "Mark word changed");
8008  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8009         "bijection");
8010}
8011
8012// Single threaded
8013void CMSCollector::preserve_mark_if_necessary(oop p) {
8014  markOop m = p->mark();
8015  if (m->must_be_preserved(p)) {
8016    preserve_mark_work(p, m);
8017  }
8018}
8019
8020void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8021  markOop m = p->mark();
8022  if (m->must_be_preserved(p)) {
8023    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8024    // Even though we read the mark word without holding
8025    // the lock, we are assured that it will not change
8026    // because we "own" this oop, so no other thread can
8027    // be trying to push it on the overflow list; see
8028    // the assertion in preserve_mark_work() that checks
8029    // that m == p->mark().
8030    preserve_mark_work(p, m);
8031  }
8032}
8033
8034// We should be able to do this multi-threaded,
8035// a chunk of stack being a task (this is
8036// correct because each oop only ever appears
8037// once in the overflow list. However, it's
8038// not very easy to completely overlap this with
8039// other operations, so will generally not be done
8040// until all work's been completed. Because we
8041// expect the preserved oop stack (set) to be small,
8042// it's probably fine to do this single-threaded.
8043// We can explore cleverer concurrent/overlapped/parallel
8044// processing of preserved marks if we feel the
8045// need for this in the future. Stack overflow should
8046// be so rare in practice and, when it happens, its
8047// effect on performance so great that this will
8048// likely just be in the noise anyway.
8049void CMSCollector::restore_preserved_marks_if_any() {
8050  assert(SafepointSynchronize::is_at_safepoint(),
8051         "world should be stopped");
8052  assert(Thread::current()->is_ConcurrentGC_thread() ||
8053         Thread::current()->is_VM_thread(),
8054         "should be single-threaded");
8055  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8056         "bijection");
8057
8058  while (!_preserved_oop_stack.is_empty()) {
8059    oop p = _preserved_oop_stack.pop();
8060    assert(p->is_oop(), "Should be an oop");
8061    assert(_span.contains(p), "oop should be in _span");
8062    assert(p->mark() == markOopDesc::prototype(),
8063           "Set when taken from overflow list");
8064    markOop m = _preserved_mark_stack.pop();
8065    p->set_mark(m);
8066  }
8067  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8068         "stacks were cleared above");
8069}
8070
8071#ifndef PRODUCT
8072bool CMSCollector::no_preserved_marks() const {
8073  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8074}
8075#endif
8076
8077// Transfer some number of overflown objects to usual marking
8078// stack. Return true if some objects were transferred.
8079bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8080  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8081                    (size_t)ParGCDesiredObjsFromOverflowList);
8082
8083  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8084  assert(_collector->overflow_list_is_empty() || res,
8085         "If list is not empty, we should have taken something");
8086  assert(!res || !_mark_stack->isEmpty(),
8087         "If we took something, it should now be on our stack");
8088  return res;
8089}
8090
8091size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8092  size_t res = _sp->block_size_no_stall(addr, _collector);
8093  if (_sp->block_is_obj(addr)) {
8094    if (_live_bit_map->isMarked(addr)) {
8095      // It can't have been dead in a previous cycle
8096      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8097    } else {
8098      _dead_bit_map->mark(addr);      // mark the dead object
8099    }
8100  }
8101  // Could be 0, if the block size could not be computed without stalling.
8102  return res;
8103}
8104
8105TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8106
8107  switch (phase) {
8108    case CMSCollector::InitialMarking:
8109      initialize(true  /* fullGC */ ,
8110                 cause /* cause of the GC */,
8111                 true  /* recordGCBeginTime */,
8112                 true  /* recordPreGCUsage */,
8113                 false /* recordPeakUsage */,
8114                 false /* recordPostGCusage */,
8115                 true  /* recordAccumulatedGCTime */,
8116                 false /* recordGCEndTime */,
8117                 false /* countCollection */  );
8118      break;
8119
8120    case CMSCollector::FinalMarking:
8121      initialize(true  /* fullGC */ ,
8122                 cause /* cause of the GC */,
8123                 false /* recordGCBeginTime */,
8124                 false /* recordPreGCUsage */,
8125                 false /* recordPeakUsage */,
8126                 false /* recordPostGCusage */,
8127                 true  /* recordAccumulatedGCTime */,
8128                 false /* recordGCEndTime */,
8129                 false /* countCollection */  );
8130      break;
8131
8132    case CMSCollector::Sweeping:
8133      initialize(true  /* fullGC */ ,
8134                 cause /* cause of the GC */,
8135                 false /* recordGCBeginTime */,
8136                 false /* recordPreGCUsage */,
8137                 true  /* recordPeakUsage */,
8138                 true  /* recordPostGCusage */,
8139                 false /* recordAccumulatedGCTime */,
8140                 true  /* recordGCEndTime */,
8141                 true  /* countCollection */  );
8142      break;
8143
8144    default:
8145      ShouldNotReachHere();
8146  }
8147}
8148