concurrentMarkSweepGeneration.cpp revision 9181:29c399fbbf25
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/codeCache.hpp"
30#include "gc/cms/cmsCollectorPolicy.hpp"
31#include "gc/cms/cmsOopClosures.inline.hpp"
32#include "gc/cms/compactibleFreeListSpace.hpp"
33#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
34#include "gc/cms/concurrentMarkSweepThread.hpp"
35#include "gc/cms/parNewGeneration.hpp"
36#include "gc/cms/vmCMSOperations.hpp"
37#include "gc/serial/genMarkSweep.hpp"
38#include "gc/serial/tenuredGeneration.hpp"
39#include "gc/shared/adaptiveSizePolicy.hpp"
40#include "gc/shared/cardGeneration.inline.hpp"
41#include "gc/shared/cardTableRS.hpp"
42#include "gc/shared/collectedHeap.inline.hpp"
43#include "gc/shared/collectorCounters.hpp"
44#include "gc/shared/collectorPolicy.hpp"
45#include "gc/shared/gcLocker.inline.hpp"
46#include "gc/shared/gcPolicyCounters.hpp"
47#include "gc/shared/gcTimer.hpp"
48#include "gc/shared/gcTrace.hpp"
49#include "gc/shared/gcTraceTime.hpp"
50#include "gc/shared/genCollectedHeap.hpp"
51#include "gc/shared/genOopClosures.inline.hpp"
52#include "gc/shared/isGCActiveMark.hpp"
53#include "gc/shared/referencePolicy.hpp"
54#include "gc/shared/strongRootsScope.hpp"
55#include "gc/shared/taskqueue.inline.hpp"
56#include "memory/allocation.hpp"
57#include "memory/iterator.inline.hpp"
58#include "memory/padded.hpp"
59#include "memory/resourceArea.hpp"
60#include "oops/oop.inline.hpp"
61#include "prims/jvmtiExport.hpp"
62#include "runtime/atomic.inline.hpp"
63#include "runtime/globals_extension.hpp"
64#include "runtime/handles.inline.hpp"
65#include "runtime/java.hpp"
66#include "runtime/orderAccess.inline.hpp"
67#include "runtime/vmThread.hpp"
68#include "services/memoryService.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/stack.inline.hpp"
71
72// statics
73CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
74bool CMSCollector::_full_gc_requested = false;
75GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
76
77//////////////////////////////////////////////////////////////////
78// In support of CMS/VM thread synchronization
79//////////////////////////////////////////////////////////////////
80// We split use of the CGC_lock into 2 "levels".
81// The low-level locking is of the usual CGC_lock monitor. We introduce
82// a higher level "token" (hereafter "CMS token") built on top of the
83// low level monitor (hereafter "CGC lock").
84// The token-passing protocol gives priority to the VM thread. The
85// CMS-lock doesn't provide any fairness guarantees, but clients
86// should ensure that it is only held for very short, bounded
87// durations.
88//
89// When either of the CMS thread or the VM thread is involved in
90// collection operations during which it does not want the other
91// thread to interfere, it obtains the CMS token.
92//
93// If either thread tries to get the token while the other has
94// it, that thread waits. However, if the VM thread and CMS thread
95// both want the token, then the VM thread gets priority while the
96// CMS thread waits. This ensures, for instance, that the "concurrent"
97// phases of the CMS thread's work do not block out the VM thread
98// for long periods of time as the CMS thread continues to hog
99// the token. (See bug 4616232).
100//
101// The baton-passing functions are, however, controlled by the
102// flags _foregroundGCShouldWait and _foregroundGCIsActive,
103// and here the low-level CMS lock, not the high level token,
104// ensures mutual exclusion.
105//
106// Two important conditions that we have to satisfy:
107// 1. if a thread does a low-level wait on the CMS lock, then it
108//    relinquishes the CMS token if it were holding that token
109//    when it acquired the low-level CMS lock.
110// 2. any low-level notifications on the low-level lock
111//    should only be sent when a thread has relinquished the token.
112//
113// In the absence of either property, we'd have potential deadlock.
114//
115// We protect each of the CMS (concurrent and sequential) phases
116// with the CMS _token_, not the CMS _lock_.
117//
118// The only code protected by CMS lock is the token acquisition code
119// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
120// baton-passing code.
121//
122// Unfortunately, i couldn't come up with a good abstraction to factor and
123// hide the naked CGC_lock manipulation in the baton-passing code
124// further below. That's something we should try to do. Also, the proof
125// of correctness of this 2-level locking scheme is far from obvious,
126// and potentially quite slippery. We have an uneasy suspicion, for instance,
127// that there may be a theoretical possibility of delay/starvation in the
128// low-level lock/wait/notify scheme used for the baton-passing because of
129// potential interference with the priority scheme embodied in the
130// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
131// invocation further below and marked with "XXX 20011219YSR".
132// Indeed, as we note elsewhere, this may become yet more slippery
133// in the presence of multiple CMS and/or multiple VM threads. XXX
134
135class CMSTokenSync: public StackObj {
136 private:
137  bool _is_cms_thread;
138 public:
139  CMSTokenSync(bool is_cms_thread):
140    _is_cms_thread(is_cms_thread) {
141    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
142           "Incorrect argument to constructor");
143    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
144  }
145
146  ~CMSTokenSync() {
147    assert(_is_cms_thread ?
148             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
149             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
150          "Incorrect state");
151    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
152  }
153};
154
155// Convenience class that does a CMSTokenSync, and then acquires
156// upto three locks.
157class CMSTokenSyncWithLocks: public CMSTokenSync {
158 private:
159  // Note: locks are acquired in textual declaration order
160  // and released in the opposite order
161  MutexLockerEx _locker1, _locker2, _locker3;
162 public:
163  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
164                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
165    CMSTokenSync(is_cms_thread),
166    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
167    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
168    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
169  { }
170};
171
172
173//////////////////////////////////////////////////////////////////
174//  Concurrent Mark-Sweep Generation /////////////////////////////
175//////////////////////////////////////////////////////////////////
176
177NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
178
179// This struct contains per-thread things necessary to support parallel
180// young-gen collection.
181class CMSParGCThreadState: public CHeapObj<mtGC> {
182 public:
183  CFLS_LAB lab;
184  PromotionInfo promo;
185
186  // Constructor.
187  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
188    promo.setSpace(cfls);
189  }
190};
191
192ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
193     ReservedSpace rs, size_t initial_byte_size,
194     CardTableRS* ct, bool use_adaptive_freelists,
195     FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
196  CardGeneration(rs, initial_byte_size, ct),
197  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
198  _did_compact(false)
199{
200  HeapWord* bottom = (HeapWord*) _virtual_space.low();
201  HeapWord* end    = (HeapWord*) _virtual_space.high();
202
203  _direct_allocated_words = 0;
204  NOT_PRODUCT(
205    _numObjectsPromoted = 0;
206    _numWordsPromoted = 0;
207    _numObjectsAllocated = 0;
208    _numWordsAllocated = 0;
209  )
210
211  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
212                                           use_adaptive_freelists,
213                                           dictionaryChoice);
214  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
215  _cmsSpace->_old_gen = this;
216
217  _gc_stats = new CMSGCStats();
218
219  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
220  // offsets match. The ability to tell free chunks from objects
221  // depends on this property.
222  debug_only(
223    FreeChunk* junk = NULL;
224    assert(UseCompressedClassPointers ||
225           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
226           "Offset of FreeChunk::_prev within FreeChunk must match"
227           "  that of OopDesc::_klass within OopDesc");
228  )
229
230  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
231  for (uint i = 0; i < ParallelGCThreads; i++) {
232    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
233  }
234
235  _incremental_collection_failed = false;
236  // The "dilatation_factor" is the expansion that can occur on
237  // account of the fact that the minimum object size in the CMS
238  // generation may be larger than that in, say, a contiguous young
239  //  generation.
240  // Ideally, in the calculation below, we'd compute the dilatation
241  // factor as: MinChunkSize/(promoting_gen's min object size)
242  // Since we do not have such a general query interface for the
243  // promoting generation, we'll instead just use the minimum
244  // object size (which today is a header's worth of space);
245  // note that all arithmetic is in units of HeapWords.
246  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
247  assert(_dilatation_factor >= 1.0, "from previous assert");
248}
249
250
251// The field "_initiating_occupancy" represents the occupancy percentage
252// at which we trigger a new collection cycle.  Unless explicitly specified
253// via CMSInitiatingOccupancyFraction (argument "io" below), it
254// is calculated by:
255//
256//   Let "f" be MinHeapFreeRatio in
257//
258//    _initiating_occupancy = 100-f +
259//                           f * (CMSTriggerRatio/100)
260//   where CMSTriggerRatio is the argument "tr" below.
261//
262// That is, if we assume the heap is at its desired maximum occupancy at the
263// end of a collection, we let CMSTriggerRatio of the (purported) free
264// space be allocated before initiating a new collection cycle.
265//
266void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
267  assert(io <= 100 && tr <= 100, "Check the arguments");
268  if (io >= 0) {
269    _initiating_occupancy = (double)io / 100.0;
270  } else {
271    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
272                             (double)(tr * MinHeapFreeRatio) / 100.0)
273                            / 100.0;
274  }
275}
276
277void ConcurrentMarkSweepGeneration::ref_processor_init() {
278  assert(collector() != NULL, "no collector");
279  collector()->ref_processor_init();
280}
281
282void CMSCollector::ref_processor_init() {
283  if (_ref_processor == NULL) {
284    // Allocate and initialize a reference processor
285    _ref_processor =
286      new ReferenceProcessor(_span,                               // span
287                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
288                             ParallelGCThreads,                   // mt processing degree
289                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
290                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
291                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
292                             &_is_alive_closure);                 // closure for liveness info
293    // Initialize the _ref_processor field of CMSGen
294    _cmsGen->set_ref_processor(_ref_processor);
295
296  }
297}
298
299AdaptiveSizePolicy* CMSCollector::size_policy() {
300  GenCollectedHeap* gch = GenCollectedHeap::heap();
301  return gch->gen_policy()->size_policy();
302}
303
304void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
305
306  const char* gen_name = "old";
307  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
308  // Generation Counters - generation 1, 1 subspace
309  _gen_counters = new GenerationCounters(gen_name, 1, 1,
310      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
311
312  _space_counters = new GSpaceCounters(gen_name, 0,
313                                       _virtual_space.reserved_size(),
314                                       this, _gen_counters);
315}
316
317CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
318  _cms_gen(cms_gen)
319{
320  assert(alpha <= 100, "bad value");
321  _saved_alpha = alpha;
322
323  // Initialize the alphas to the bootstrap value of 100.
324  _gc0_alpha = _cms_alpha = 100;
325
326  _cms_begin_time.update();
327  _cms_end_time.update();
328
329  _gc0_duration = 0.0;
330  _gc0_period = 0.0;
331  _gc0_promoted = 0;
332
333  _cms_duration = 0.0;
334  _cms_period = 0.0;
335  _cms_allocated = 0;
336
337  _cms_used_at_gc0_begin = 0;
338  _cms_used_at_gc0_end = 0;
339  _allow_duty_cycle_reduction = false;
340  _valid_bits = 0;
341}
342
343double CMSStats::cms_free_adjustment_factor(size_t free) const {
344  // TBD: CR 6909490
345  return 1.0;
346}
347
348void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
349}
350
351// If promotion failure handling is on use
352// the padded average size of the promotion for each
353// young generation collection.
354double CMSStats::time_until_cms_gen_full() const {
355  size_t cms_free = _cms_gen->cmsSpace()->free();
356  GenCollectedHeap* gch = GenCollectedHeap::heap();
357  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
358                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
359  if (cms_free > expected_promotion) {
360    // Start a cms collection if there isn't enough space to promote
361    // for the next young collection.  Use the padded average as
362    // a safety factor.
363    cms_free -= expected_promotion;
364
365    // Adjust by the safety factor.
366    double cms_free_dbl = (double)cms_free;
367    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
368    // Apply a further correction factor which tries to adjust
369    // for recent occurance of concurrent mode failures.
370    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
371    cms_free_dbl = cms_free_dbl * cms_adjustment;
372
373    if (PrintGCDetails && Verbose) {
374      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
375        SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
376        cms_free, expected_promotion);
377      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
378        cms_free_dbl, cms_consumption_rate() + 1.0);
379    }
380    // Add 1 in case the consumption rate goes to zero.
381    return cms_free_dbl / (cms_consumption_rate() + 1.0);
382  }
383  return 0.0;
384}
385
386// Compare the duration of the cms collection to the
387// time remaining before the cms generation is empty.
388// Note that the time from the start of the cms collection
389// to the start of the cms sweep (less than the total
390// duration of the cms collection) can be used.  This
391// has been tried and some applications experienced
392// promotion failures early in execution.  This was
393// possibly because the averages were not accurate
394// enough at the beginning.
395double CMSStats::time_until_cms_start() const {
396  // We add "gc0_period" to the "work" calculation
397  // below because this query is done (mostly) at the
398  // end of a scavenge, so we need to conservatively
399  // account for that much possible delay
400  // in the query so as to avoid concurrent mode failures
401  // due to starting the collection just a wee bit too
402  // late.
403  double work = cms_duration() + gc0_period();
404  double deadline = time_until_cms_gen_full();
405  // If a concurrent mode failure occurred recently, we want to be
406  // more conservative and halve our expected time_until_cms_gen_full()
407  if (work > deadline) {
408    if (Verbose && PrintGCDetails) {
409      gclog_or_tty->print(
410        " CMSCollector: collect because of anticipated promotion "
411        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
412        gc0_period(), time_until_cms_gen_full());
413    }
414    return 0.0;
415  }
416  return work - deadline;
417}
418
419#ifndef PRODUCT
420void CMSStats::print_on(outputStream *st) const {
421  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
422  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
423               gc0_duration(), gc0_period(), gc0_promoted());
424  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
425            cms_duration(), cms_period(), cms_allocated());
426  st->print(",cms_since_beg=%g,cms_since_end=%g",
427            cms_time_since_begin(), cms_time_since_end());
428  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
429            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
430
431  if (valid()) {
432    st->print(",promo_rate=%g,cms_alloc_rate=%g",
433              promotion_rate(), cms_allocation_rate());
434    st->print(",cms_consumption_rate=%g,time_until_full=%g",
435              cms_consumption_rate(), time_until_cms_gen_full());
436  }
437  st->print(" ");
438}
439#endif // #ifndef PRODUCT
440
441CMSCollector::CollectorState CMSCollector::_collectorState =
442                             CMSCollector::Idling;
443bool CMSCollector::_foregroundGCIsActive = false;
444bool CMSCollector::_foregroundGCShouldWait = false;
445
446CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
447                           CardTableRS*                   ct,
448                           ConcurrentMarkSweepPolicy*     cp):
449  _cmsGen(cmsGen),
450  _ct(ct),
451  _ref_processor(NULL),    // will be set later
452  _conc_workers(NULL),     // may be set later
453  _abort_preclean(false),
454  _start_sampling(false),
455  _between_prologue_and_epilogue(false),
456  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
457  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
458                 -1 /* lock-free */, "No_lock" /* dummy */),
459  _modUnionClosurePar(&_modUnionTable),
460  // Adjust my span to cover old (cms) gen
461  _span(cmsGen->reserved()),
462  // Construct the is_alive_closure with _span & markBitMap
463  _is_alive_closure(_span, &_markBitMap),
464  _restart_addr(NULL),
465  _overflow_list(NULL),
466  _stats(cmsGen),
467  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
468                             //verify that this lock should be acquired with safepoint check.
469                             Monitor::_safepoint_check_sometimes)),
470  _eden_chunk_array(NULL),     // may be set in ctor body
471  _eden_chunk_capacity(0),     // -- ditto --
472  _eden_chunk_index(0),        // -- ditto --
473  _survivor_plab_array(NULL),  // -- ditto --
474  _survivor_chunk_array(NULL), // -- ditto --
475  _survivor_chunk_capacity(0), // -- ditto --
476  _survivor_chunk_index(0),    // -- ditto --
477  _ser_pmc_preclean_ovflw(0),
478  _ser_kac_preclean_ovflw(0),
479  _ser_pmc_remark_ovflw(0),
480  _par_pmc_remark_ovflw(0),
481  _ser_kac_ovflw(0),
482  _par_kac_ovflw(0),
483#ifndef PRODUCT
484  _num_par_pushes(0),
485#endif
486  _collection_count_start(0),
487  _verifying(false),
488  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
489  _completed_initialization(false),
490  _collector_policy(cp),
491  _should_unload_classes(CMSClassUnloadingEnabled),
492  _concurrent_cycles_since_last_unload(0),
493  _roots_scanning_options(GenCollectedHeap::SO_None),
494  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
495  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
496  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
497  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
498  _cms_start_registered(false)
499{
500  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
501    ExplicitGCInvokesConcurrent = true;
502  }
503  // Now expand the span and allocate the collection support structures
504  // (MUT, marking bit map etc.) to cover both generations subject to
505  // collection.
506
507  // For use by dirty card to oop closures.
508  _cmsGen->cmsSpace()->set_collector(this);
509
510  // Allocate MUT and marking bit map
511  {
512    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
513    if (!_markBitMap.allocate(_span)) {
514      warning("Failed to allocate CMS Bit Map");
515      return;
516    }
517    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
518  }
519  {
520    _modUnionTable.allocate(_span);
521    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
522  }
523
524  if (!_markStack.allocate(MarkStackSize)) {
525    warning("Failed to allocate CMS Marking Stack");
526    return;
527  }
528
529  // Support for multi-threaded concurrent phases
530  if (CMSConcurrentMTEnabled) {
531    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
532      // just for now
533      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
534    }
535    if (ConcGCThreads > 1) {
536      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
537                                 ConcGCThreads, true);
538      if (_conc_workers == NULL) {
539        warning("GC/CMS: _conc_workers allocation failure: "
540              "forcing -CMSConcurrentMTEnabled");
541        CMSConcurrentMTEnabled = false;
542      } else {
543        _conc_workers->initialize_workers();
544      }
545    } else {
546      CMSConcurrentMTEnabled = false;
547    }
548  }
549  if (!CMSConcurrentMTEnabled) {
550    ConcGCThreads = 0;
551  } else {
552    // Turn off CMSCleanOnEnter optimization temporarily for
553    // the MT case where it's not fixed yet; see 6178663.
554    CMSCleanOnEnter = false;
555  }
556  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
557         "Inconsistency");
558
559  // Parallel task queues; these are shared for the
560  // concurrent and stop-world phases of CMS, but
561  // are not shared with parallel scavenge (ParNew).
562  {
563    uint i;
564    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
565
566    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
567         || ParallelRefProcEnabled)
568        && num_queues > 0) {
569      _task_queues = new OopTaskQueueSet(num_queues);
570      if (_task_queues == NULL) {
571        warning("task_queues allocation failure.");
572        return;
573      }
574      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
575      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
576      for (i = 0; i < num_queues; i++) {
577        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
578        if (q == NULL) {
579          warning("work_queue allocation failure.");
580          return;
581        }
582        _task_queues->register_queue(i, q);
583      }
584      for (i = 0; i < num_queues; i++) {
585        _task_queues->queue(i)->initialize();
586        _hash_seed[i] = 17;  // copied from ParNew
587      }
588    }
589  }
590
591  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
592
593  // Clip CMSBootstrapOccupancy between 0 and 100.
594  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
595
596  // Now tell CMS generations the identity of their collector
597  ConcurrentMarkSweepGeneration::set_collector(this);
598
599  // Create & start a CMS thread for this CMS collector
600  _cmsThread = ConcurrentMarkSweepThread::start(this);
601  assert(cmsThread() != NULL, "CMS Thread should have been created");
602  assert(cmsThread()->collector() == this,
603         "CMS Thread should refer to this gen");
604  assert(CGC_lock != NULL, "Where's the CGC_lock?");
605
606  // Support for parallelizing young gen rescan
607  GenCollectedHeap* gch = GenCollectedHeap::heap();
608  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
609  _young_gen = (ParNewGeneration*)gch->young_gen();
610  if (gch->supports_inline_contig_alloc()) {
611    _top_addr = gch->top_addr();
612    _end_addr = gch->end_addr();
613    assert(_young_gen != NULL, "no _young_gen");
614    _eden_chunk_index = 0;
615    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
616    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
617  }
618
619  // Support for parallelizing survivor space rescan
620  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
621    const size_t max_plab_samples =
622      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
623
624    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
625    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
626    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
627    _survivor_chunk_capacity = max_plab_samples;
628    for (uint i = 0; i < ParallelGCThreads; i++) {
629      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
630      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
631      assert(cur->end() == 0, "Should be 0");
632      assert(cur->array() == vec, "Should be vec");
633      assert(cur->capacity() == max_plab_samples, "Error");
634    }
635  }
636
637  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
638  _gc_counters = new CollectorCounters("CMS", 1);
639  _completed_initialization = true;
640  _inter_sweep_timer.start();  // start of time
641}
642
643const char* ConcurrentMarkSweepGeneration::name() const {
644  return "concurrent mark-sweep generation";
645}
646void ConcurrentMarkSweepGeneration::update_counters() {
647  if (UsePerfData) {
648    _space_counters->update_all();
649    _gen_counters->update_all();
650  }
651}
652
653// this is an optimized version of update_counters(). it takes the
654// used value as a parameter rather than computing it.
655//
656void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
657  if (UsePerfData) {
658    _space_counters->update_used(used);
659    _space_counters->update_capacity();
660    _gen_counters->update_all();
661  }
662}
663
664void ConcurrentMarkSweepGeneration::print() const {
665  Generation::print();
666  cmsSpace()->print();
667}
668
669#ifndef PRODUCT
670void ConcurrentMarkSweepGeneration::print_statistics() {
671  cmsSpace()->printFLCensus(0);
672}
673#endif
674
675void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
676  GenCollectedHeap* gch = GenCollectedHeap::heap();
677  if (PrintGCDetails) {
678    // I didn't want to change the logging when removing the level concept,
679    // but I guess this logging could say "old" or something instead of "1".
680    assert(gch->is_old_gen(this),
681           "The CMS generation should be the old generation");
682    uint level = 1;
683    if (Verbose) {
684      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
685        level, short_name(), s, used(), capacity());
686    } else {
687      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
688        level, short_name(), s, used() / K, capacity() / K);
689    }
690  }
691  if (Verbose) {
692    gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
693              gch->used(), gch->capacity());
694  } else {
695    gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
696              gch->used() / K, gch->capacity() / K);
697  }
698}
699
700size_t
701ConcurrentMarkSweepGeneration::contiguous_available() const {
702  // dld proposes an improvement in precision here. If the committed
703  // part of the space ends in a free block we should add that to
704  // uncommitted size in the calculation below. Will make this
705  // change later, staying with the approximation below for the
706  // time being. -- ysr.
707  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
708}
709
710size_t
711ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
712  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
713}
714
715size_t ConcurrentMarkSweepGeneration::max_available() const {
716  return free() + _virtual_space.uncommitted_size();
717}
718
719bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
720  size_t available = max_available();
721  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
722  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
723  if (Verbose && PrintGCDetails) {
724    gclog_or_tty->print_cr(
725      "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
726      "max_promo(" SIZE_FORMAT ")",
727      res? "":" not", available, res? ">=":"<",
728      av_promo, max_promotion_in_bytes);
729  }
730  return res;
731}
732
733// At a promotion failure dump information on block layout in heap
734// (cms old generation).
735void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
736  if (CMSDumpAtPromotionFailure) {
737    cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
738  }
739}
740
741void ConcurrentMarkSweepGeneration::reset_after_compaction() {
742  // Clear the promotion information.  These pointers can be adjusted
743  // along with all the other pointers into the heap but
744  // compaction is expected to be a rare event with
745  // a heap using cms so don't do it without seeing the need.
746  for (uint i = 0; i < ParallelGCThreads; i++) {
747    _par_gc_thread_states[i]->promo.reset();
748  }
749}
750
751void ConcurrentMarkSweepGeneration::compute_new_size() {
752  assert_locked_or_safepoint(Heap_lock);
753
754  // If incremental collection failed, we just want to expand
755  // to the limit.
756  if (incremental_collection_failed()) {
757    clear_incremental_collection_failed();
758    grow_to_reserved();
759    return;
760  }
761
762  // The heap has been compacted but not reset yet.
763  // Any metric such as free() or used() will be incorrect.
764
765  CardGeneration::compute_new_size();
766
767  // Reset again after a possible resizing
768  if (did_compact()) {
769    cmsSpace()->reset_after_compaction();
770  }
771}
772
773void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
774  assert_locked_or_safepoint(Heap_lock);
775
776  // If incremental collection failed, we just want to expand
777  // to the limit.
778  if (incremental_collection_failed()) {
779    clear_incremental_collection_failed();
780    grow_to_reserved();
781    return;
782  }
783
784  double free_percentage = ((double) free()) / capacity();
785  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
786  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
787
788  // compute expansion delta needed for reaching desired free percentage
789  if (free_percentage < desired_free_percentage) {
790    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
791    assert(desired_capacity >= capacity(), "invalid expansion size");
792    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
793    if (PrintGCDetails && Verbose) {
794      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
795      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
796      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
797      gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
798      gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
799      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
800      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
801      GenCollectedHeap* gch = GenCollectedHeap::heap();
802      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
803      size_t young_size = gch->young_gen()->capacity();
804      gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
805      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
806      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
807      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
808    }
809    // safe if expansion fails
810    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
811    if (PrintGCDetails && Verbose) {
812      gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
813    }
814  } else {
815    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
816    assert(desired_capacity <= capacity(), "invalid expansion size");
817    size_t shrink_bytes = capacity() - desired_capacity;
818    // Don't shrink unless the delta is greater than the minimum shrink we want
819    if (shrink_bytes >= MinHeapDeltaBytes) {
820      shrink_free_list_by(shrink_bytes);
821    }
822  }
823}
824
825Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
826  return cmsSpace()->freelistLock();
827}
828
829HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
830  CMSSynchronousYieldRequest yr;
831  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
832  return have_lock_and_allocate(size, tlab);
833}
834
835HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
836                                                                bool   tlab /* ignored */) {
837  assert_lock_strong(freelistLock());
838  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
839  HeapWord* res = cmsSpace()->allocate(adjustedSize);
840  // Allocate the object live (grey) if the background collector has
841  // started marking. This is necessary because the marker may
842  // have passed this address and consequently this object will
843  // not otherwise be greyed and would be incorrectly swept up.
844  // Note that if this object contains references, the writing
845  // of those references will dirty the card containing this object
846  // allowing the object to be blackened (and its references scanned)
847  // either during a preclean phase or at the final checkpoint.
848  if (res != NULL) {
849    // We may block here with an uninitialized object with
850    // its mark-bit or P-bits not yet set. Such objects need
851    // to be safely navigable by block_start().
852    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
853    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
854    collector()->direct_allocated(res, adjustedSize);
855    _direct_allocated_words += adjustedSize;
856    // allocation counters
857    NOT_PRODUCT(
858      _numObjectsAllocated++;
859      _numWordsAllocated += (int)adjustedSize;
860    )
861  }
862  return res;
863}
864
865// In the case of direct allocation by mutators in a generation that
866// is being concurrently collected, the object must be allocated
867// live (grey) if the background collector has started marking.
868// This is necessary because the marker may
869// have passed this address and consequently this object will
870// not otherwise be greyed and would be incorrectly swept up.
871// Note that if this object contains references, the writing
872// of those references will dirty the card containing this object
873// allowing the object to be blackened (and its references scanned)
874// either during a preclean phase or at the final checkpoint.
875void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
876  assert(_markBitMap.covers(start, size), "Out of bounds");
877  if (_collectorState >= Marking) {
878    MutexLockerEx y(_markBitMap.lock(),
879                    Mutex::_no_safepoint_check_flag);
880    // [see comments preceding SweepClosure::do_blk() below for details]
881    //
882    // Can the P-bits be deleted now?  JJJ
883    //
884    // 1. need to mark the object as live so it isn't collected
885    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
886    // 3. need to mark the end of the object so marking, precleaning or sweeping
887    //    can skip over uninitialized or unparsable objects. An allocated
888    //    object is considered uninitialized for our purposes as long as
889    //    its klass word is NULL.  All old gen objects are parsable
890    //    as soon as they are initialized.)
891    _markBitMap.mark(start);          // object is live
892    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
893    _markBitMap.mark(start + size - 1);
894                                      // mark end of object
895  }
896  // check that oop looks uninitialized
897  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
898}
899
900void CMSCollector::promoted(bool par, HeapWord* start,
901                            bool is_obj_array, size_t obj_size) {
902  assert(_markBitMap.covers(start), "Out of bounds");
903  // See comment in direct_allocated() about when objects should
904  // be allocated live.
905  if (_collectorState >= Marking) {
906    // we already hold the marking bit map lock, taken in
907    // the prologue
908    if (par) {
909      _markBitMap.par_mark(start);
910    } else {
911      _markBitMap.mark(start);
912    }
913    // We don't need to mark the object as uninitialized (as
914    // in direct_allocated above) because this is being done with the
915    // world stopped and the object will be initialized by the
916    // time the marking, precleaning or sweeping get to look at it.
917    // But see the code for copying objects into the CMS generation,
918    // where we need to ensure that concurrent readers of the
919    // block offset table are able to safely navigate a block that
920    // is in flux from being free to being allocated (and in
921    // transition while being copied into) and subsequently
922    // becoming a bona-fide object when the copy/promotion is complete.
923    assert(SafepointSynchronize::is_at_safepoint(),
924           "expect promotion only at safepoints");
925
926    if (_collectorState < Sweeping) {
927      // Mark the appropriate cards in the modUnionTable, so that
928      // this object gets scanned before the sweep. If this is
929      // not done, CMS generation references in the object might
930      // not get marked.
931      // For the case of arrays, which are otherwise precisely
932      // marked, we need to dirty the entire array, not just its head.
933      if (is_obj_array) {
934        // The [par_]mark_range() method expects mr.end() below to
935        // be aligned to the granularity of a bit's representation
936        // in the heap. In the case of the MUT below, that's a
937        // card size.
938        MemRegion mr(start,
939                     (HeapWord*)round_to((intptr_t)(start + obj_size),
940                        CardTableModRefBS::card_size /* bytes */));
941        if (par) {
942          _modUnionTable.par_mark_range(mr);
943        } else {
944          _modUnionTable.mark_range(mr);
945        }
946      } else {  // not an obj array; we can just mark the head
947        if (par) {
948          _modUnionTable.par_mark(start);
949        } else {
950          _modUnionTable.mark(start);
951        }
952      }
953    }
954  }
955}
956
957oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
958  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
959  // allocate, copy and if necessary update promoinfo --
960  // delegate to underlying space.
961  assert_lock_strong(freelistLock());
962
963#ifndef PRODUCT
964  if (GenCollectedHeap::heap()->promotion_should_fail()) {
965    return NULL;
966  }
967#endif  // #ifndef PRODUCT
968
969  oop res = _cmsSpace->promote(obj, obj_size);
970  if (res == NULL) {
971    // expand and retry
972    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
973    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
974    // Since this is the old generation, we don't try to promote
975    // into a more senior generation.
976    res = _cmsSpace->promote(obj, obj_size);
977  }
978  if (res != NULL) {
979    // See comment in allocate() about when objects should
980    // be allocated live.
981    assert(obj->is_oop(), "Will dereference klass pointer below");
982    collector()->promoted(false,           // Not parallel
983                          (HeapWord*)res, obj->is_objArray(), obj_size);
984    // promotion counters
985    NOT_PRODUCT(
986      _numObjectsPromoted++;
987      _numWordsPromoted +=
988        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
989    )
990  }
991  return res;
992}
993
994
995// IMPORTANT: Notes on object size recognition in CMS.
996// ---------------------------------------------------
997// A block of storage in the CMS generation is always in
998// one of three states. A free block (FREE), an allocated
999// object (OBJECT) whose size() method reports the correct size,
1000// and an intermediate state (TRANSIENT) in which its size cannot
1001// be accurately determined.
1002// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1003// -----------------------------------------------------
1004// FREE:      klass_word & 1 == 1; mark_word holds block size
1005//
1006// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1007//            obj->size() computes correct size
1008//
1009// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1010//
1011// STATE IDENTIFICATION: (64 bit+COOPS)
1012// ------------------------------------
1013// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1014//
1015// OBJECT:    klass_word installed; klass_word != 0;
1016//            obj->size() computes correct size
1017//
1018// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1019//
1020//
1021// STATE TRANSITION DIAGRAM
1022//
1023//        mut / parnew                     mut  /  parnew
1024// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1025//  ^                                                                   |
1026//  |------------------------ DEAD <------------------------------------|
1027//         sweep                            mut
1028//
1029// While a block is in TRANSIENT state its size cannot be determined
1030// so readers will either need to come back later or stall until
1031// the size can be determined. Note that for the case of direct
1032// allocation, P-bits, when available, may be used to determine the
1033// size of an object that may not yet have been initialized.
1034
1035// Things to support parallel young-gen collection.
1036oop
1037ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1038                                           oop old, markOop m,
1039                                           size_t word_sz) {
1040#ifndef PRODUCT
1041  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1042    return NULL;
1043  }
1044#endif  // #ifndef PRODUCT
1045
1046  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1047  PromotionInfo* promoInfo = &ps->promo;
1048  // if we are tracking promotions, then first ensure space for
1049  // promotion (including spooling space for saving header if necessary).
1050  // then allocate and copy, then track promoted info if needed.
1051  // When tracking (see PromotionInfo::track()), the mark word may
1052  // be displaced and in this case restoration of the mark word
1053  // occurs in the (oop_since_save_marks_)iterate phase.
1054  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1055    // Out of space for allocating spooling buffers;
1056    // try expanding and allocating spooling buffers.
1057    if (!expand_and_ensure_spooling_space(promoInfo)) {
1058      return NULL;
1059    }
1060  }
1061  assert(promoInfo->has_spooling_space(), "Control point invariant");
1062  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1063  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1064  if (obj_ptr == NULL) {
1065     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1066     if (obj_ptr == NULL) {
1067       return NULL;
1068     }
1069  }
1070  oop obj = oop(obj_ptr);
1071  OrderAccess::storestore();
1072  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1073  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1074  // IMPORTANT: See note on object initialization for CMS above.
1075  // Otherwise, copy the object.  Here we must be careful to insert the
1076  // klass pointer last, since this marks the block as an allocated object.
1077  // Except with compressed oops it's the mark word.
1078  HeapWord* old_ptr = (HeapWord*)old;
1079  // Restore the mark word copied above.
1080  obj->set_mark(m);
1081  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1082  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1083  OrderAccess::storestore();
1084
1085  if (UseCompressedClassPointers) {
1086    // Copy gap missed by (aligned) header size calculation below
1087    obj->set_klass_gap(old->klass_gap());
1088  }
1089  if (word_sz > (size_t)oopDesc::header_size()) {
1090    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1091                                 obj_ptr + oopDesc::header_size(),
1092                                 word_sz - oopDesc::header_size());
1093  }
1094
1095  // Now we can track the promoted object, if necessary.  We take care
1096  // to delay the transition from uninitialized to full object
1097  // (i.e., insertion of klass pointer) until after, so that it
1098  // atomically becomes a promoted object.
1099  if (promoInfo->tracking()) {
1100    promoInfo->track((PromotedObject*)obj, old->klass());
1101  }
1102  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1103  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1104  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1105
1106  // Finally, install the klass pointer (this should be volatile).
1107  OrderAccess::storestore();
1108  obj->set_klass(old->klass());
1109  // We should now be able to calculate the right size for this object
1110  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1111
1112  collector()->promoted(true,          // parallel
1113                        obj_ptr, old->is_objArray(), word_sz);
1114
1115  NOT_PRODUCT(
1116    Atomic::inc_ptr(&_numObjectsPromoted);
1117    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1118  )
1119
1120  return obj;
1121}
1122
1123void
1124ConcurrentMarkSweepGeneration::
1125par_promote_alloc_done(int thread_num) {
1126  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1127  ps->lab.retire(thread_num);
1128}
1129
1130void
1131ConcurrentMarkSweepGeneration::
1132par_oop_since_save_marks_iterate_done(int thread_num) {
1133  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1134  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1135  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1136}
1137
1138bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1139                                                   size_t size,
1140                                                   bool   tlab)
1141{
1142  // We allow a STW collection only if a full
1143  // collection was requested.
1144  return full || should_allocate(size, tlab); // FIX ME !!!
1145  // This and promotion failure handling are connected at the
1146  // hip and should be fixed by untying them.
1147}
1148
1149bool CMSCollector::shouldConcurrentCollect() {
1150  if (_full_gc_requested) {
1151    if (Verbose && PrintGCDetails) {
1152      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1153                             " gc request (or gc_locker)");
1154    }
1155    return true;
1156  }
1157
1158  FreelistLocker x(this);
1159  // ------------------------------------------------------------------
1160  // Print out lots of information which affects the initiation of
1161  // a collection.
1162  if (PrintCMSInitiationStatistics && stats().valid()) {
1163    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1164    gclog_or_tty->stamp();
1165    gclog_or_tty->cr();
1166    stats().print_on(gclog_or_tty);
1167    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1168      stats().time_until_cms_gen_full());
1169    gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
1170    gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
1171                           _cmsGen->contiguous_available());
1172    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1173    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1174    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1175    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1176    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1177    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1178    gclog_or_tty->print_cr("metadata initialized %d",
1179      MetaspaceGC::should_concurrent_collect());
1180  }
1181  // ------------------------------------------------------------------
1182
1183  // If the estimated time to complete a cms collection (cms_duration())
1184  // is less than the estimated time remaining until the cms generation
1185  // is full, start a collection.
1186  if (!UseCMSInitiatingOccupancyOnly) {
1187    if (stats().valid()) {
1188      if (stats().time_until_cms_start() == 0.0) {
1189        return true;
1190      }
1191    } else {
1192      // We want to conservatively collect somewhat early in order
1193      // to try and "bootstrap" our CMS/promotion statistics;
1194      // this branch will not fire after the first successful CMS
1195      // collection because the stats should then be valid.
1196      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1197        if (Verbose && PrintGCDetails) {
1198          gclog_or_tty->print_cr(
1199            " CMSCollector: collect for bootstrapping statistics:"
1200            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1201            _bootstrap_occupancy);
1202        }
1203        return true;
1204      }
1205    }
1206  }
1207
1208  // Otherwise, we start a collection cycle if
1209  // old gen want a collection cycle started. Each may use
1210  // an appropriate criterion for making this decision.
1211  // XXX We need to make sure that the gen expansion
1212  // criterion dovetails well with this. XXX NEED TO FIX THIS
1213  if (_cmsGen->should_concurrent_collect()) {
1214    if (Verbose && PrintGCDetails) {
1215      gclog_or_tty->print_cr("CMS old gen initiated");
1216    }
1217    return true;
1218  }
1219
1220  // We start a collection if we believe an incremental collection may fail;
1221  // this is not likely to be productive in practice because it's probably too
1222  // late anyway.
1223  GenCollectedHeap* gch = GenCollectedHeap::heap();
1224  assert(gch->collector_policy()->is_generation_policy(),
1225         "You may want to check the correctness of the following");
1226  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1227    if (Verbose && PrintGCDetails) {
1228      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1229    }
1230    return true;
1231  }
1232
1233  if (MetaspaceGC::should_concurrent_collect()) {
1234    if (Verbose && PrintGCDetails) {
1235      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1236    }
1237    return true;
1238  }
1239
1240  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1241  if (CMSTriggerInterval >= 0) {
1242    if (CMSTriggerInterval == 0) {
1243      // Trigger always
1244      return true;
1245    }
1246
1247    // Check the CMS time since begin (we do not check the stats validity
1248    // as we want to be able to trigger the first CMS cycle as well)
1249    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1250      if (Verbose && PrintGCDetails) {
1251        if (stats().valid()) {
1252          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1253                                 stats().cms_time_since_begin());
1254        } else {
1255          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1256        }
1257      }
1258      return true;
1259    }
1260  }
1261
1262  return false;
1263}
1264
1265void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1266
1267// Clear _expansion_cause fields of constituent generations
1268void CMSCollector::clear_expansion_cause() {
1269  _cmsGen->clear_expansion_cause();
1270}
1271
1272// We should be conservative in starting a collection cycle.  To
1273// start too eagerly runs the risk of collecting too often in the
1274// extreme.  To collect too rarely falls back on full collections,
1275// which works, even if not optimum in terms of concurrent work.
1276// As a work around for too eagerly collecting, use the flag
1277// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1278// giving the user an easily understandable way of controlling the
1279// collections.
1280// We want to start a new collection cycle if any of the following
1281// conditions hold:
1282// . our current occupancy exceeds the configured initiating occupancy
1283//   for this generation, or
1284// . we recently needed to expand this space and have not, since that
1285//   expansion, done a collection of this generation, or
1286// . the underlying space believes that it may be a good idea to initiate
1287//   a concurrent collection (this may be based on criteria such as the
1288//   following: the space uses linear allocation and linear allocation is
1289//   going to fail, or there is believed to be excessive fragmentation in
1290//   the generation, etc... or ...
1291// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1292//   the case of the old generation; see CR 6543076):
1293//   we may be approaching a point at which allocation requests may fail because
1294//   we will be out of sufficient free space given allocation rate estimates.]
1295bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1296
1297  assert_lock_strong(freelistLock());
1298  if (occupancy() > initiating_occupancy()) {
1299    if (PrintGCDetails && Verbose) {
1300      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1301        short_name(), occupancy(), initiating_occupancy());
1302    }
1303    return true;
1304  }
1305  if (UseCMSInitiatingOccupancyOnly) {
1306    return false;
1307  }
1308  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1309    if (PrintGCDetails && Verbose) {
1310      gclog_or_tty->print(" %s: collect because expanded for allocation ",
1311        short_name());
1312    }
1313    return true;
1314  }
1315  if (_cmsSpace->should_concurrent_collect()) {
1316    if (PrintGCDetails && Verbose) {
1317      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1318        short_name());
1319    }
1320    return true;
1321  }
1322  return false;
1323}
1324
1325void ConcurrentMarkSweepGeneration::collect(bool   full,
1326                                            bool   clear_all_soft_refs,
1327                                            size_t size,
1328                                            bool   tlab)
1329{
1330  collector()->collect(full, clear_all_soft_refs, size, tlab);
1331}
1332
1333void CMSCollector::collect(bool   full,
1334                           bool   clear_all_soft_refs,
1335                           size_t size,
1336                           bool   tlab)
1337{
1338  // The following "if" branch is present for defensive reasons.
1339  // In the current uses of this interface, it can be replaced with:
1340  // assert(!GC_locker.is_active(), "Can't be called otherwise");
1341  // But I am not placing that assert here to allow future
1342  // generality in invoking this interface.
1343  if (GC_locker::is_active()) {
1344    // A consistency test for GC_locker
1345    assert(GC_locker::needs_gc(), "Should have been set already");
1346    // Skip this foreground collection, instead
1347    // expanding the heap if necessary.
1348    // Need the free list locks for the call to free() in compute_new_size()
1349    compute_new_size();
1350    return;
1351  }
1352  acquire_control_and_collect(full, clear_all_soft_refs);
1353}
1354
1355void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1356  GenCollectedHeap* gch = GenCollectedHeap::heap();
1357  unsigned int gc_count = gch->total_full_collections();
1358  if (gc_count == full_gc_count) {
1359    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1360    _full_gc_requested = true;
1361    _full_gc_cause = cause;
1362    CGC_lock->notify();   // nudge CMS thread
1363  } else {
1364    assert(gc_count > full_gc_count, "Error: causal loop");
1365  }
1366}
1367
1368bool CMSCollector::is_external_interruption() {
1369  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1370  return GCCause::is_user_requested_gc(cause) ||
1371         GCCause::is_serviceability_requested_gc(cause);
1372}
1373
1374void CMSCollector::report_concurrent_mode_interruption() {
1375  if (is_external_interruption()) {
1376    if (PrintGCDetails) {
1377      gclog_or_tty->print(" (concurrent mode interrupted)");
1378    }
1379  } else {
1380    if (PrintGCDetails) {
1381      gclog_or_tty->print(" (concurrent mode failure)");
1382    }
1383    _gc_tracer_cm->report_concurrent_mode_failure();
1384  }
1385}
1386
1387
1388// The foreground and background collectors need to coordinate in order
1389// to make sure that they do not mutually interfere with CMS collections.
1390// When a background collection is active,
1391// the foreground collector may need to take over (preempt) and
1392// synchronously complete an ongoing collection. Depending on the
1393// frequency of the background collections and the heap usage
1394// of the application, this preemption can be seldom or frequent.
1395// There are only certain
1396// points in the background collection that the "collection-baton"
1397// can be passed to the foreground collector.
1398//
1399// The foreground collector will wait for the baton before
1400// starting any part of the collection.  The foreground collector
1401// will only wait at one location.
1402//
1403// The background collector will yield the baton before starting a new
1404// phase of the collection (e.g., before initial marking, marking from roots,
1405// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1406// of the loop which switches the phases. The background collector does some
1407// of the phases (initial mark, final re-mark) with the world stopped.
1408// Because of locking involved in stopping the world,
1409// the foreground collector should not block waiting for the background
1410// collector when it is doing a stop-the-world phase.  The background
1411// collector will yield the baton at an additional point just before
1412// it enters a stop-the-world phase.  Once the world is stopped, the
1413// background collector checks the phase of the collection.  If the
1414// phase has not changed, it proceeds with the collection.  If the
1415// phase has changed, it skips that phase of the collection.  See
1416// the comments on the use of the Heap_lock in collect_in_background().
1417//
1418// Variable used in baton passing.
1419//   _foregroundGCIsActive - Set to true by the foreground collector when
1420//      it wants the baton.  The foreground clears it when it has finished
1421//      the collection.
1422//   _foregroundGCShouldWait - Set to true by the background collector
1423//        when it is running.  The foreground collector waits while
1424//      _foregroundGCShouldWait is true.
1425//  CGC_lock - monitor used to protect access to the above variables
1426//      and to notify the foreground and background collectors.
1427//  _collectorState - current state of the CMS collection.
1428//
1429// The foreground collector
1430//   acquires the CGC_lock
1431//   sets _foregroundGCIsActive
1432//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1433//     various locks acquired in preparation for the collection
1434//     are released so as not to block the background collector
1435//     that is in the midst of a collection
1436//   proceeds with the collection
1437//   clears _foregroundGCIsActive
1438//   returns
1439//
1440// The background collector in a loop iterating on the phases of the
1441//      collection
1442//   acquires the CGC_lock
1443//   sets _foregroundGCShouldWait
1444//   if _foregroundGCIsActive is set
1445//     clears _foregroundGCShouldWait, notifies _CGC_lock
1446//     waits on _CGC_lock for _foregroundGCIsActive to become false
1447//     and exits the loop.
1448//   otherwise
1449//     proceed with that phase of the collection
1450//     if the phase is a stop-the-world phase,
1451//       yield the baton once more just before enqueueing
1452//       the stop-world CMS operation (executed by the VM thread).
1453//   returns after all phases of the collection are done
1454//
1455
1456void CMSCollector::acquire_control_and_collect(bool full,
1457        bool clear_all_soft_refs) {
1458  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1459  assert(!Thread::current()->is_ConcurrentGC_thread(),
1460         "shouldn't try to acquire control from self!");
1461
1462  // Start the protocol for acquiring control of the
1463  // collection from the background collector (aka CMS thread).
1464  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1465         "VM thread should have CMS token");
1466  // Remember the possibly interrupted state of an ongoing
1467  // concurrent collection
1468  CollectorState first_state = _collectorState;
1469
1470  // Signal to a possibly ongoing concurrent collection that
1471  // we want to do a foreground collection.
1472  _foregroundGCIsActive = true;
1473
1474  // release locks and wait for a notify from the background collector
1475  // releasing the locks in only necessary for phases which
1476  // do yields to improve the granularity of the collection.
1477  assert_lock_strong(bitMapLock());
1478  // We need to lock the Free list lock for the space that we are
1479  // currently collecting.
1480  assert(haveFreelistLocks(), "Must be holding free list locks");
1481  bitMapLock()->unlock();
1482  releaseFreelistLocks();
1483  {
1484    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1485    if (_foregroundGCShouldWait) {
1486      // We are going to be waiting for action for the CMS thread;
1487      // it had better not be gone (for instance at shutdown)!
1488      assert(ConcurrentMarkSweepThread::cmst() != NULL,
1489             "CMS thread must be running");
1490      // Wait here until the background collector gives us the go-ahead
1491      ConcurrentMarkSweepThread::clear_CMS_flag(
1492        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1493      // Get a possibly blocked CMS thread going:
1494      //   Note that we set _foregroundGCIsActive true above,
1495      //   without protection of the CGC_lock.
1496      CGC_lock->notify();
1497      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1498             "Possible deadlock");
1499      while (_foregroundGCShouldWait) {
1500        // wait for notification
1501        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1502        // Possibility of delay/starvation here, since CMS token does
1503        // not know to give priority to VM thread? Actually, i think
1504        // there wouldn't be any delay/starvation, but the proof of
1505        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1506      }
1507      ConcurrentMarkSweepThread::set_CMS_flag(
1508        ConcurrentMarkSweepThread::CMS_vm_has_token);
1509    }
1510  }
1511  // The CMS_token is already held.  Get back the other locks.
1512  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1513         "VM thread should have CMS token");
1514  getFreelistLocks();
1515  bitMapLock()->lock_without_safepoint_check();
1516  if (TraceCMSState) {
1517    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1518      INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1519    gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1520  }
1521
1522  // Inform cms gen if this was due to partial collection failing.
1523  // The CMS gen may use this fact to determine its expansion policy.
1524  GenCollectedHeap* gch = GenCollectedHeap::heap();
1525  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1526    assert(!_cmsGen->incremental_collection_failed(),
1527           "Should have been noticed, reacted to and cleared");
1528    _cmsGen->set_incremental_collection_failed();
1529  }
1530
1531  if (first_state > Idling) {
1532    report_concurrent_mode_interruption();
1533  }
1534
1535  set_did_compact(true);
1536
1537  // If the collection is being acquired from the background
1538  // collector, there may be references on the discovered
1539  // references lists.  Abandon those references, since some
1540  // of them may have become unreachable after concurrent
1541  // discovery; the STW compacting collector will redo discovery
1542  // more precisely, without being subject to floating garbage.
1543  // Leaving otherwise unreachable references in the discovered
1544  // lists would require special handling.
1545  ref_processor()->disable_discovery();
1546  ref_processor()->abandon_partial_discovery();
1547  ref_processor()->verify_no_references_recorded();
1548
1549  if (first_state > Idling) {
1550    save_heap_summary();
1551  }
1552
1553  do_compaction_work(clear_all_soft_refs);
1554
1555  // Has the GC time limit been exceeded?
1556  size_t max_eden_size = _young_gen->max_capacity() -
1557                         _young_gen->to()->capacity() -
1558                         _young_gen->from()->capacity();
1559  GCCause::Cause gc_cause = gch->gc_cause();
1560  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1561                                         _young_gen->eden()->used(),
1562                                         _cmsGen->max_capacity(),
1563                                         max_eden_size,
1564                                         full,
1565                                         gc_cause,
1566                                         gch->collector_policy());
1567
1568  // Reset the expansion cause, now that we just completed
1569  // a collection cycle.
1570  clear_expansion_cause();
1571  _foregroundGCIsActive = false;
1572  return;
1573}
1574
1575// Resize the tenured generation
1576// after obtaining the free list locks for the
1577// two generations.
1578void CMSCollector::compute_new_size() {
1579  assert_locked_or_safepoint(Heap_lock);
1580  FreelistLocker z(this);
1581  MetaspaceGC::compute_new_size();
1582  _cmsGen->compute_new_size_free_list();
1583}
1584
1585// A work method used by the foreground collector to do
1586// a mark-sweep-compact.
1587void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1588  GenCollectedHeap* gch = GenCollectedHeap::heap();
1589
1590  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1591  gc_timer->register_gc_start();
1592
1593  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1594  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1595
1596  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1597
1598  // Temporarily widen the span of the weak reference processing to
1599  // the entire heap.
1600  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1601  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1602  // Temporarily, clear the "is_alive_non_header" field of the
1603  // reference processor.
1604  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1605  // Temporarily make reference _processing_ single threaded (non-MT).
1606  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1607  // Temporarily make refs discovery atomic
1608  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1609  // Temporarily make reference _discovery_ single threaded (non-MT)
1610  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1611
1612  ref_processor()->set_enqueuing_is_done(false);
1613  ref_processor()->enable_discovery();
1614  ref_processor()->setup_policy(clear_all_soft_refs);
1615  // If an asynchronous collection finishes, the _modUnionTable is
1616  // all clear.  If we are assuming the collection from an asynchronous
1617  // collection, clear the _modUnionTable.
1618  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1619    "_modUnionTable should be clear if the baton was not passed");
1620  _modUnionTable.clear_all();
1621  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1622    "mod union for klasses should be clear if the baton was passed");
1623  _ct->klass_rem_set()->clear_mod_union();
1624
1625  // We must adjust the allocation statistics being maintained
1626  // in the free list space. We do so by reading and clearing
1627  // the sweep timer and updating the block flux rate estimates below.
1628  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1629  if (_inter_sweep_timer.is_active()) {
1630    _inter_sweep_timer.stop();
1631    // Note that we do not use this sample to update the _inter_sweep_estimate.
1632    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1633                                            _inter_sweep_estimate.padded_average(),
1634                                            _intra_sweep_estimate.padded_average());
1635  }
1636
1637  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1638  #ifdef ASSERT
1639    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1640    size_t free_size = cms_space->free();
1641    assert(free_size ==
1642           pointer_delta(cms_space->end(), cms_space->compaction_top())
1643           * HeapWordSize,
1644      "All the free space should be compacted into one chunk at top");
1645    assert(cms_space->dictionary()->total_chunk_size(
1646                                      debug_only(cms_space->freelistLock())) == 0 ||
1647           cms_space->totalSizeInIndexedFreeLists() == 0,
1648      "All the free space should be in a single chunk");
1649    size_t num = cms_space->totalCount();
1650    assert((free_size == 0 && num == 0) ||
1651           (free_size > 0  && (num == 1 || num == 2)),
1652         "There should be at most 2 free chunks after compaction");
1653  #endif // ASSERT
1654  _collectorState = Resetting;
1655  assert(_restart_addr == NULL,
1656         "Should have been NULL'd before baton was passed");
1657  reset_stw();
1658  _cmsGen->reset_after_compaction();
1659  _concurrent_cycles_since_last_unload = 0;
1660
1661  // Clear any data recorded in the PLAB chunk arrays.
1662  if (_survivor_plab_array != NULL) {
1663    reset_survivor_plab_arrays();
1664  }
1665
1666  // Adjust the per-size allocation stats for the next epoch.
1667  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1668  // Restart the "inter sweep timer" for the next epoch.
1669  _inter_sweep_timer.reset();
1670  _inter_sweep_timer.start();
1671
1672  gc_timer->register_gc_end();
1673
1674  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1675
1676  // For a mark-sweep-compact, compute_new_size() will be called
1677  // in the heap's do_collection() method.
1678}
1679
1680void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1681  ContiguousSpace* eden_space = _young_gen->eden();
1682  ContiguousSpace* from_space = _young_gen->from();
1683  ContiguousSpace* to_space   = _young_gen->to();
1684  // Eden
1685  if (_eden_chunk_array != NULL) {
1686    gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1687                           p2i(eden_space->bottom()), p2i(eden_space->top()),
1688                           p2i(eden_space->end()), eden_space->capacity());
1689    gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1690                           "_eden_chunk_capacity=" SIZE_FORMAT,
1691                           _eden_chunk_index, _eden_chunk_capacity);
1692    for (size_t i = 0; i < _eden_chunk_index; i++) {
1693      gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1694                             i, p2i(_eden_chunk_array[i]));
1695    }
1696  }
1697  // Survivor
1698  if (_survivor_chunk_array != NULL) {
1699    gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1700                           p2i(from_space->bottom()), p2i(from_space->top()),
1701                           p2i(from_space->end()), from_space->capacity());
1702    gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1703                           "_survivor_chunk_capacity=" SIZE_FORMAT,
1704                           _survivor_chunk_index, _survivor_chunk_capacity);
1705    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1706      gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1707                             i, p2i(_survivor_chunk_array[i]));
1708    }
1709  }
1710}
1711
1712void CMSCollector::getFreelistLocks() const {
1713  // Get locks for all free lists in all generations that this
1714  // collector is responsible for
1715  _cmsGen->freelistLock()->lock_without_safepoint_check();
1716}
1717
1718void CMSCollector::releaseFreelistLocks() const {
1719  // Release locks for all free lists in all generations that this
1720  // collector is responsible for
1721  _cmsGen->freelistLock()->unlock();
1722}
1723
1724bool CMSCollector::haveFreelistLocks() const {
1725  // Check locks for all free lists in all generations that this
1726  // collector is responsible for
1727  assert_lock_strong(_cmsGen->freelistLock());
1728  PRODUCT_ONLY(ShouldNotReachHere());
1729  return true;
1730}
1731
1732// A utility class that is used by the CMS collector to
1733// temporarily "release" the foreground collector from its
1734// usual obligation to wait for the background collector to
1735// complete an ongoing phase before proceeding.
1736class ReleaseForegroundGC: public StackObj {
1737 private:
1738  CMSCollector* _c;
1739 public:
1740  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1741    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1742    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1743    // allow a potentially blocked foreground collector to proceed
1744    _c->_foregroundGCShouldWait = false;
1745    if (_c->_foregroundGCIsActive) {
1746      CGC_lock->notify();
1747    }
1748    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1749           "Possible deadlock");
1750  }
1751
1752  ~ReleaseForegroundGC() {
1753    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1754    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1755    _c->_foregroundGCShouldWait = true;
1756  }
1757};
1758
1759void CMSCollector::collect_in_background(GCCause::Cause cause) {
1760  assert(Thread::current()->is_ConcurrentGC_thread(),
1761    "A CMS asynchronous collection is only allowed on a CMS thread.");
1762
1763  GenCollectedHeap* gch = GenCollectedHeap::heap();
1764  {
1765    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1766    MutexLockerEx hl(Heap_lock, safepoint_check);
1767    FreelistLocker fll(this);
1768    MutexLockerEx x(CGC_lock, safepoint_check);
1769    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1770      // The foreground collector is active or we're
1771      // not using asynchronous collections.  Skip this
1772      // background collection.
1773      assert(!_foregroundGCShouldWait, "Should be clear");
1774      return;
1775    } else {
1776      assert(_collectorState == Idling, "Should be idling before start.");
1777      _collectorState = InitialMarking;
1778      register_gc_start(cause);
1779      // Reset the expansion cause, now that we are about to begin
1780      // a new cycle.
1781      clear_expansion_cause();
1782
1783      // Clear the MetaspaceGC flag since a concurrent collection
1784      // is starting but also clear it after the collection.
1785      MetaspaceGC::set_should_concurrent_collect(false);
1786    }
1787    // Decide if we want to enable class unloading as part of the
1788    // ensuing concurrent GC cycle.
1789    update_should_unload_classes();
1790    _full_gc_requested = false;           // acks all outstanding full gc requests
1791    _full_gc_cause = GCCause::_no_gc;
1792    // Signal that we are about to start a collection
1793    gch->increment_total_full_collections();  // ... starting a collection cycle
1794    _collection_count_start = gch->total_full_collections();
1795  }
1796
1797  // Used for PrintGC
1798  size_t prev_used;
1799  if (PrintGC && Verbose) {
1800    prev_used = _cmsGen->used();
1801  }
1802
1803  // The change of the collection state is normally done at this level;
1804  // the exceptions are phases that are executed while the world is
1805  // stopped.  For those phases the change of state is done while the
1806  // world is stopped.  For baton passing purposes this allows the
1807  // background collector to finish the phase and change state atomically.
1808  // The foreground collector cannot wait on a phase that is done
1809  // while the world is stopped because the foreground collector already
1810  // has the world stopped and would deadlock.
1811  while (_collectorState != Idling) {
1812    if (TraceCMSState) {
1813      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1814        p2i(Thread::current()), _collectorState);
1815    }
1816    // The foreground collector
1817    //   holds the Heap_lock throughout its collection.
1818    //   holds the CMS token (but not the lock)
1819    //     except while it is waiting for the background collector to yield.
1820    //
1821    // The foreground collector should be blocked (not for long)
1822    //   if the background collector is about to start a phase
1823    //   executed with world stopped.  If the background
1824    //   collector has already started such a phase, the
1825    //   foreground collector is blocked waiting for the
1826    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1827    //   are executed in the VM thread.
1828    //
1829    // The locking order is
1830    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1831    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1832    //   CMS token  (claimed in
1833    //                stop_world_and_do() -->
1834    //                  safepoint_synchronize() -->
1835    //                    CMSThread::synchronize())
1836
1837    {
1838      // Check if the FG collector wants us to yield.
1839      CMSTokenSync x(true); // is cms thread
1840      if (waitForForegroundGC()) {
1841        // We yielded to a foreground GC, nothing more to be
1842        // done this round.
1843        assert(_foregroundGCShouldWait == false, "We set it to false in "
1844               "waitForForegroundGC()");
1845        if (TraceCMSState) {
1846          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1847            " exiting collection CMS state %d",
1848            p2i(Thread::current()), _collectorState);
1849        }
1850        return;
1851      } else {
1852        // The background collector can run but check to see if the
1853        // foreground collector has done a collection while the
1854        // background collector was waiting to get the CGC_lock
1855        // above.  If yes, break so that _foregroundGCShouldWait
1856        // is cleared before returning.
1857        if (_collectorState == Idling) {
1858          break;
1859        }
1860      }
1861    }
1862
1863    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1864      "should be waiting");
1865
1866    switch (_collectorState) {
1867      case InitialMarking:
1868        {
1869          ReleaseForegroundGC x(this);
1870          stats().record_cms_begin();
1871          VM_CMS_Initial_Mark initial_mark_op(this);
1872          VMThread::execute(&initial_mark_op);
1873        }
1874        // The collector state may be any legal state at this point
1875        // since the background collector may have yielded to the
1876        // foreground collector.
1877        break;
1878      case Marking:
1879        // initial marking in checkpointRootsInitialWork has been completed
1880        if (markFromRoots()) { // we were successful
1881          assert(_collectorState == Precleaning, "Collector state should "
1882            "have changed");
1883        } else {
1884          assert(_foregroundGCIsActive, "Internal state inconsistency");
1885        }
1886        break;
1887      case Precleaning:
1888        // marking from roots in markFromRoots has been completed
1889        preclean();
1890        assert(_collectorState == AbortablePreclean ||
1891               _collectorState == FinalMarking,
1892               "Collector state should have changed");
1893        break;
1894      case AbortablePreclean:
1895        abortable_preclean();
1896        assert(_collectorState == FinalMarking, "Collector state should "
1897          "have changed");
1898        break;
1899      case FinalMarking:
1900        {
1901          ReleaseForegroundGC x(this);
1902
1903          VM_CMS_Final_Remark final_remark_op(this);
1904          VMThread::execute(&final_remark_op);
1905        }
1906        assert(_foregroundGCShouldWait, "block post-condition");
1907        break;
1908      case Sweeping:
1909        // final marking in checkpointRootsFinal has been completed
1910        sweep();
1911        assert(_collectorState == Resizing, "Collector state change "
1912          "to Resizing must be done under the free_list_lock");
1913
1914      case Resizing: {
1915        // Sweeping has been completed...
1916        // At this point the background collection has completed.
1917        // Don't move the call to compute_new_size() down
1918        // into code that might be executed if the background
1919        // collection was preempted.
1920        {
1921          ReleaseForegroundGC x(this);   // unblock FG collection
1922          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1923          CMSTokenSync        z(true);   // not strictly needed.
1924          if (_collectorState == Resizing) {
1925            compute_new_size();
1926            save_heap_summary();
1927            _collectorState = Resetting;
1928          } else {
1929            assert(_collectorState == Idling, "The state should only change"
1930                   " because the foreground collector has finished the collection");
1931          }
1932        }
1933        break;
1934      }
1935      case Resetting:
1936        // CMS heap resizing has been completed
1937        reset_concurrent();
1938        assert(_collectorState == Idling, "Collector state should "
1939          "have changed");
1940
1941        MetaspaceGC::set_should_concurrent_collect(false);
1942
1943        stats().record_cms_end();
1944        // Don't move the concurrent_phases_end() and compute_new_size()
1945        // calls to here because a preempted background collection
1946        // has it's state set to "Resetting".
1947        break;
1948      case Idling:
1949      default:
1950        ShouldNotReachHere();
1951        break;
1952    }
1953    if (TraceCMSState) {
1954      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1955        p2i(Thread::current()), _collectorState);
1956    }
1957    assert(_foregroundGCShouldWait, "block post-condition");
1958  }
1959
1960  // Should this be in gc_epilogue?
1961  collector_policy()->counters()->update_counters();
1962
1963  {
1964    // Clear _foregroundGCShouldWait and, in the event that the
1965    // foreground collector is waiting, notify it, before
1966    // returning.
1967    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1968    _foregroundGCShouldWait = false;
1969    if (_foregroundGCIsActive) {
1970      CGC_lock->notify();
1971    }
1972    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1973           "Possible deadlock");
1974  }
1975  if (TraceCMSState) {
1976    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1977      " exiting collection CMS state %d",
1978      p2i(Thread::current()), _collectorState);
1979  }
1980  if (PrintGC && Verbose) {
1981    _cmsGen->print_heap_change(prev_used);
1982  }
1983}
1984
1985void CMSCollector::register_gc_start(GCCause::Cause cause) {
1986  _cms_start_registered = true;
1987  _gc_timer_cm->register_gc_start();
1988  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1989}
1990
1991void CMSCollector::register_gc_end() {
1992  if (_cms_start_registered) {
1993    report_heap_summary(GCWhen::AfterGC);
1994
1995    _gc_timer_cm->register_gc_end();
1996    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1997    _cms_start_registered = false;
1998  }
1999}
2000
2001void CMSCollector::save_heap_summary() {
2002  GenCollectedHeap* gch = GenCollectedHeap::heap();
2003  _last_heap_summary = gch->create_heap_summary();
2004  _last_metaspace_summary = gch->create_metaspace_summary();
2005}
2006
2007void CMSCollector::report_heap_summary(GCWhen::Type when) {
2008  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2009  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2010}
2011
2012bool CMSCollector::waitForForegroundGC() {
2013  bool res = false;
2014  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2015         "CMS thread should have CMS token");
2016  // Block the foreground collector until the
2017  // background collectors decides whether to
2018  // yield.
2019  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2020  _foregroundGCShouldWait = true;
2021  if (_foregroundGCIsActive) {
2022    // The background collector yields to the
2023    // foreground collector and returns a value
2024    // indicating that it has yielded.  The foreground
2025    // collector can proceed.
2026    res = true;
2027    _foregroundGCShouldWait = false;
2028    ConcurrentMarkSweepThread::clear_CMS_flag(
2029      ConcurrentMarkSweepThread::CMS_cms_has_token);
2030    ConcurrentMarkSweepThread::set_CMS_flag(
2031      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2032    // Get a possibly blocked foreground thread going
2033    CGC_lock->notify();
2034    if (TraceCMSState) {
2035      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2036        p2i(Thread::current()), _collectorState);
2037    }
2038    while (_foregroundGCIsActive) {
2039      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2040    }
2041    ConcurrentMarkSweepThread::set_CMS_flag(
2042      ConcurrentMarkSweepThread::CMS_cms_has_token);
2043    ConcurrentMarkSweepThread::clear_CMS_flag(
2044      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2045  }
2046  if (TraceCMSState) {
2047    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2048      p2i(Thread::current()), _collectorState);
2049  }
2050  return res;
2051}
2052
2053// Because of the need to lock the free lists and other structures in
2054// the collector, common to all the generations that the collector is
2055// collecting, we need the gc_prologues of individual CMS generations
2056// delegate to their collector. It may have been simpler had the
2057// current infrastructure allowed one to call a prologue on a
2058// collector. In the absence of that we have the generation's
2059// prologue delegate to the collector, which delegates back
2060// some "local" work to a worker method in the individual generations
2061// that it's responsible for collecting, while itself doing any
2062// work common to all generations it's responsible for. A similar
2063// comment applies to the  gc_epilogue()'s.
2064// The role of the variable _between_prologue_and_epilogue is to
2065// enforce the invocation protocol.
2066void CMSCollector::gc_prologue(bool full) {
2067  // Call gc_prologue_work() for the CMSGen
2068  // we are responsible for.
2069
2070  // The following locking discipline assumes that we are only called
2071  // when the world is stopped.
2072  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2073
2074  // The CMSCollector prologue must call the gc_prologues for the
2075  // "generations" that it's responsible
2076  // for.
2077
2078  assert(   Thread::current()->is_VM_thread()
2079         || (   CMSScavengeBeforeRemark
2080             && Thread::current()->is_ConcurrentGC_thread()),
2081         "Incorrect thread type for prologue execution");
2082
2083  if (_between_prologue_and_epilogue) {
2084    // We have already been invoked; this is a gc_prologue delegation
2085    // from yet another CMS generation that we are responsible for, just
2086    // ignore it since all relevant work has already been done.
2087    return;
2088  }
2089
2090  // set a bit saying prologue has been called; cleared in epilogue
2091  _between_prologue_and_epilogue = true;
2092  // Claim locks for common data structures, then call gc_prologue_work()
2093  // for each CMSGen.
2094
2095  getFreelistLocks();   // gets free list locks on constituent spaces
2096  bitMapLock()->lock_without_safepoint_check();
2097
2098  // Should call gc_prologue_work() for all cms gens we are responsible for
2099  bool duringMarking =    _collectorState >= Marking
2100                         && _collectorState < Sweeping;
2101
2102  // The young collections clear the modified oops state, which tells if
2103  // there are any modified oops in the class. The remark phase also needs
2104  // that information. Tell the young collection to save the union of all
2105  // modified klasses.
2106  if (duringMarking) {
2107    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2108  }
2109
2110  bool registerClosure = duringMarking;
2111
2112  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2113
2114  if (!full) {
2115    stats().record_gc0_begin();
2116  }
2117}
2118
2119void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2120
2121  _capacity_at_prologue = capacity();
2122  _used_at_prologue = used();
2123
2124  // Delegate to CMScollector which knows how to coordinate between
2125  // this and any other CMS generations that it is responsible for
2126  // collecting.
2127  collector()->gc_prologue(full);
2128}
2129
2130// This is a "private" interface for use by this generation's CMSCollector.
2131// Not to be called directly by any other entity (for instance,
2132// GenCollectedHeap, which calls the "public" gc_prologue method above).
2133void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2134  bool registerClosure, ModUnionClosure* modUnionClosure) {
2135  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2136  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2137    "Should be NULL");
2138  if (registerClosure) {
2139    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2140  }
2141  cmsSpace()->gc_prologue();
2142  // Clear stat counters
2143  NOT_PRODUCT(
2144    assert(_numObjectsPromoted == 0, "check");
2145    assert(_numWordsPromoted   == 0, "check");
2146    if (Verbose && PrintGC) {
2147      gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
2148                          SIZE_FORMAT " bytes concurrently",
2149      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2150    }
2151    _numObjectsAllocated = 0;
2152    _numWordsAllocated   = 0;
2153  )
2154}
2155
2156void CMSCollector::gc_epilogue(bool full) {
2157  // The following locking discipline assumes that we are only called
2158  // when the world is stopped.
2159  assert(SafepointSynchronize::is_at_safepoint(),
2160         "world is stopped assumption");
2161
2162  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2163  // if linear allocation blocks need to be appropriately marked to allow the
2164  // the blocks to be parsable. We also check here whether we need to nudge the
2165  // CMS collector thread to start a new cycle (if it's not already active).
2166  assert(   Thread::current()->is_VM_thread()
2167         || (   CMSScavengeBeforeRemark
2168             && Thread::current()->is_ConcurrentGC_thread()),
2169         "Incorrect thread type for epilogue execution");
2170
2171  if (!_between_prologue_and_epilogue) {
2172    // We have already been invoked; this is a gc_epilogue delegation
2173    // from yet another CMS generation that we are responsible for, just
2174    // ignore it since all relevant work has already been done.
2175    return;
2176  }
2177  assert(haveFreelistLocks(), "must have freelist locks");
2178  assert_lock_strong(bitMapLock());
2179
2180  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2181
2182  _cmsGen->gc_epilogue_work(full);
2183
2184  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2185    // in case sampling was not already enabled, enable it
2186    _start_sampling = true;
2187  }
2188  // reset _eden_chunk_array so sampling starts afresh
2189  _eden_chunk_index = 0;
2190
2191  size_t cms_used   = _cmsGen->cmsSpace()->used();
2192
2193  // update performance counters - this uses a special version of
2194  // update_counters() that allows the utilization to be passed as a
2195  // parameter, avoiding multiple calls to used().
2196  //
2197  _cmsGen->update_counters(cms_used);
2198
2199  bitMapLock()->unlock();
2200  releaseFreelistLocks();
2201
2202  if (!CleanChunkPoolAsync) {
2203    Chunk::clean_chunk_pool();
2204  }
2205
2206  set_did_compact(false);
2207  _between_prologue_and_epilogue = false;  // ready for next cycle
2208}
2209
2210void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2211  collector()->gc_epilogue(full);
2212
2213  // Also reset promotion tracking in par gc thread states.
2214  for (uint i = 0; i < ParallelGCThreads; i++) {
2215    _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2216  }
2217}
2218
2219void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2220  assert(!incremental_collection_failed(), "Should have been cleared");
2221  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2222  cmsSpace()->gc_epilogue();
2223    // Print stat counters
2224  NOT_PRODUCT(
2225    assert(_numObjectsAllocated == 0, "check");
2226    assert(_numWordsAllocated == 0, "check");
2227    if (Verbose && PrintGC) {
2228      gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
2229                          SIZE_FORMAT " bytes",
2230                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2231    }
2232    _numObjectsPromoted = 0;
2233    _numWordsPromoted   = 0;
2234  )
2235
2236  if (PrintGC && Verbose) {
2237    // Call down the chain in contiguous_available needs the freelistLock
2238    // so print this out before releasing the freeListLock.
2239    gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
2240                        contiguous_available());
2241  }
2242}
2243
2244#ifndef PRODUCT
2245bool CMSCollector::have_cms_token() {
2246  Thread* thr = Thread::current();
2247  if (thr->is_VM_thread()) {
2248    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2249  } else if (thr->is_ConcurrentGC_thread()) {
2250    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2251  } else if (thr->is_GC_task_thread()) {
2252    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2253           ParGCRareEvent_lock->owned_by_self();
2254  }
2255  return false;
2256}
2257#endif
2258
2259// Check reachability of the given heap address in CMS generation,
2260// treating all other generations as roots.
2261bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2262  // We could "guarantee" below, rather than assert, but I'll
2263  // leave these as "asserts" so that an adventurous debugger
2264  // could try this in the product build provided some subset of
2265  // the conditions were met, provided they were interested in the
2266  // results and knew that the computation below wouldn't interfere
2267  // with other concurrent computations mutating the structures
2268  // being read or written.
2269  assert(SafepointSynchronize::is_at_safepoint(),
2270         "Else mutations in object graph will make answer suspect");
2271  assert(have_cms_token(), "Should hold cms token");
2272  assert(haveFreelistLocks(), "must hold free list locks");
2273  assert_lock_strong(bitMapLock());
2274
2275  // Clear the marking bit map array before starting, but, just
2276  // for kicks, first report if the given address is already marked
2277  gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2278                _markBitMap.isMarked(addr) ? "" : " not");
2279
2280  if (verify_after_remark()) {
2281    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2282    bool result = verification_mark_bm()->isMarked(addr);
2283    gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2284                           result ? "IS" : "is NOT");
2285    return result;
2286  } else {
2287    gclog_or_tty->print_cr("Could not compute result");
2288    return false;
2289  }
2290}
2291
2292
2293void
2294CMSCollector::print_on_error(outputStream* st) {
2295  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2296  if (collector != NULL) {
2297    CMSBitMap* bitmap = &collector->_markBitMap;
2298    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2299    bitmap->print_on_error(st, " Bits: ");
2300
2301    st->cr();
2302
2303    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2304    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2305    mut_bitmap->print_on_error(st, " Bits: ");
2306  }
2307}
2308
2309////////////////////////////////////////////////////////
2310// CMS Verification Support
2311////////////////////////////////////////////////////////
2312// Following the remark phase, the following invariant
2313// should hold -- each object in the CMS heap which is
2314// marked in markBitMap() should be marked in the verification_mark_bm().
2315
2316class VerifyMarkedClosure: public BitMapClosure {
2317  CMSBitMap* _marks;
2318  bool       _failed;
2319
2320 public:
2321  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2322
2323  bool do_bit(size_t offset) {
2324    HeapWord* addr = _marks->offsetToHeapWord(offset);
2325    if (!_marks->isMarked(addr)) {
2326      oop(addr)->print_on(gclog_or_tty);
2327      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2328      _failed = true;
2329    }
2330    return true;
2331  }
2332
2333  bool failed() { return _failed; }
2334};
2335
2336bool CMSCollector::verify_after_remark(bool silent) {
2337  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2338  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2339  static bool init = false;
2340
2341  assert(SafepointSynchronize::is_at_safepoint(),
2342         "Else mutations in object graph will make answer suspect");
2343  assert(have_cms_token(),
2344         "Else there may be mutual interference in use of "
2345         " verification data structures");
2346  assert(_collectorState > Marking && _collectorState <= Sweeping,
2347         "Else marking info checked here may be obsolete");
2348  assert(haveFreelistLocks(), "must hold free list locks");
2349  assert_lock_strong(bitMapLock());
2350
2351
2352  // Allocate marking bit map if not already allocated
2353  if (!init) { // first time
2354    if (!verification_mark_bm()->allocate(_span)) {
2355      return false;
2356    }
2357    init = true;
2358  }
2359
2360  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2361
2362  // Turn off refs discovery -- so we will be tracing through refs.
2363  // This is as intended, because by this time
2364  // GC must already have cleared any refs that need to be cleared,
2365  // and traced those that need to be marked; moreover,
2366  // the marking done here is not going to interfere in any
2367  // way with the marking information used by GC.
2368  NoRefDiscovery no_discovery(ref_processor());
2369
2370#if defined(COMPILER2) || INCLUDE_JVMCI
2371  DerivedPointerTableDeactivate dpt_deact;
2372#endif
2373
2374  // Clear any marks from a previous round
2375  verification_mark_bm()->clear_all();
2376  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2377  verify_work_stacks_empty();
2378
2379  GenCollectedHeap* gch = GenCollectedHeap::heap();
2380  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2381  // Update the saved marks which may affect the root scans.
2382  gch->save_marks();
2383
2384  if (CMSRemarkVerifyVariant == 1) {
2385    // In this first variant of verification, we complete
2386    // all marking, then check if the new marks-vector is
2387    // a subset of the CMS marks-vector.
2388    verify_after_remark_work_1();
2389  } else if (CMSRemarkVerifyVariant == 2) {
2390    // In this second variant of verification, we flag an error
2391    // (i.e. an object reachable in the new marks-vector not reachable
2392    // in the CMS marks-vector) immediately, also indicating the
2393    // identify of an object (A) that references the unmarked object (B) --
2394    // presumably, a mutation to A failed to be picked up by preclean/remark?
2395    verify_after_remark_work_2();
2396  } else {
2397    warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2398            CMSRemarkVerifyVariant);
2399  }
2400  if (!silent) gclog_or_tty->print(" done] ");
2401  return true;
2402}
2403
2404void CMSCollector::verify_after_remark_work_1() {
2405  ResourceMark rm;
2406  HandleMark  hm;
2407  GenCollectedHeap* gch = GenCollectedHeap::heap();
2408
2409  // Get a clear set of claim bits for the roots processing to work with.
2410  ClassLoaderDataGraph::clear_claimed_marks();
2411
2412  // Mark from roots one level into CMS
2413  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2414  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2415
2416  {
2417    StrongRootsScope srs(1);
2418
2419    gch->gen_process_roots(&srs,
2420                           GenCollectedHeap::OldGen,
2421                           true,   // young gen as roots
2422                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2423                           should_unload_classes(),
2424                           &notOlder,
2425                           NULL,
2426                           NULL);
2427  }
2428
2429  // Now mark from the roots
2430  MarkFromRootsClosure markFromRootsClosure(this, _span,
2431    verification_mark_bm(), verification_mark_stack(),
2432    false /* don't yield */, true /* verifying */);
2433  assert(_restart_addr == NULL, "Expected pre-condition");
2434  verification_mark_bm()->iterate(&markFromRootsClosure);
2435  while (_restart_addr != NULL) {
2436    // Deal with stack overflow: by restarting at the indicated
2437    // address.
2438    HeapWord* ra = _restart_addr;
2439    markFromRootsClosure.reset(ra);
2440    _restart_addr = NULL;
2441    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2442  }
2443  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2444  verify_work_stacks_empty();
2445
2446  // Marking completed -- now verify that each bit marked in
2447  // verification_mark_bm() is also marked in markBitMap(); flag all
2448  // errors by printing corresponding objects.
2449  VerifyMarkedClosure vcl(markBitMap());
2450  verification_mark_bm()->iterate(&vcl);
2451  if (vcl.failed()) {
2452    gclog_or_tty->print("Verification failed");
2453    gch->print_on(gclog_or_tty);
2454    fatal("CMS: failed marking verification after remark");
2455  }
2456}
2457
2458class VerifyKlassOopsKlassClosure : public KlassClosure {
2459  class VerifyKlassOopsClosure : public OopClosure {
2460    CMSBitMap* _bitmap;
2461   public:
2462    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2463    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2464    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2465  } _oop_closure;
2466 public:
2467  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2468  void do_klass(Klass* k) {
2469    k->oops_do(&_oop_closure);
2470  }
2471};
2472
2473void CMSCollector::verify_after_remark_work_2() {
2474  ResourceMark rm;
2475  HandleMark  hm;
2476  GenCollectedHeap* gch = GenCollectedHeap::heap();
2477
2478  // Get a clear set of claim bits for the roots processing to work with.
2479  ClassLoaderDataGraph::clear_claimed_marks();
2480
2481  // Mark from roots one level into CMS
2482  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2483                                     markBitMap());
2484  CLDToOopClosure cld_closure(&notOlder, true);
2485
2486  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2487
2488  {
2489    StrongRootsScope srs(1);
2490
2491    gch->gen_process_roots(&srs,
2492                           GenCollectedHeap::OldGen,
2493                           true,   // young gen as roots
2494                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2495                           should_unload_classes(),
2496                           &notOlder,
2497                           NULL,
2498                           &cld_closure);
2499  }
2500
2501  // Now mark from the roots
2502  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2503    verification_mark_bm(), markBitMap(), verification_mark_stack());
2504  assert(_restart_addr == NULL, "Expected pre-condition");
2505  verification_mark_bm()->iterate(&markFromRootsClosure);
2506  while (_restart_addr != NULL) {
2507    // Deal with stack overflow: by restarting at the indicated
2508    // address.
2509    HeapWord* ra = _restart_addr;
2510    markFromRootsClosure.reset(ra);
2511    _restart_addr = NULL;
2512    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2513  }
2514  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2515  verify_work_stacks_empty();
2516
2517  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2518  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2519
2520  // Marking completed -- now verify that each bit marked in
2521  // verification_mark_bm() is also marked in markBitMap(); flag all
2522  // errors by printing corresponding objects.
2523  VerifyMarkedClosure vcl(markBitMap());
2524  verification_mark_bm()->iterate(&vcl);
2525  assert(!vcl.failed(), "Else verification above should not have succeeded");
2526}
2527
2528void ConcurrentMarkSweepGeneration::save_marks() {
2529  // delegate to CMS space
2530  cmsSpace()->save_marks();
2531  for (uint i = 0; i < ParallelGCThreads; i++) {
2532    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2533  }
2534}
2535
2536bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2537  return cmsSpace()->no_allocs_since_save_marks();
2538}
2539
2540#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2541                                                                \
2542void ConcurrentMarkSweepGeneration::                            \
2543oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2544  cl->set_generation(this);                                     \
2545  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2546  cl->reset_generation();                                       \
2547  save_marks();                                                 \
2548}
2549
2550ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2551
2552void
2553ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2554  if (freelistLock()->owned_by_self()) {
2555    Generation::oop_iterate(cl);
2556  } else {
2557    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2558    Generation::oop_iterate(cl);
2559  }
2560}
2561
2562void
2563ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2564  if (freelistLock()->owned_by_self()) {
2565    Generation::object_iterate(cl);
2566  } else {
2567    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2568    Generation::object_iterate(cl);
2569  }
2570}
2571
2572void
2573ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2574  if (freelistLock()->owned_by_self()) {
2575    Generation::safe_object_iterate(cl);
2576  } else {
2577    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2578    Generation::safe_object_iterate(cl);
2579  }
2580}
2581
2582void
2583ConcurrentMarkSweepGeneration::post_compact() {
2584}
2585
2586void
2587ConcurrentMarkSweepGeneration::prepare_for_verify() {
2588  // Fix the linear allocation blocks to look like free blocks.
2589
2590  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2591  // are not called when the heap is verified during universe initialization and
2592  // at vm shutdown.
2593  if (freelistLock()->owned_by_self()) {
2594    cmsSpace()->prepare_for_verify();
2595  } else {
2596    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2597    cmsSpace()->prepare_for_verify();
2598  }
2599}
2600
2601void
2602ConcurrentMarkSweepGeneration::verify() {
2603  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2604  // are not called when the heap is verified during universe initialization and
2605  // at vm shutdown.
2606  if (freelistLock()->owned_by_self()) {
2607    cmsSpace()->verify();
2608  } else {
2609    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2610    cmsSpace()->verify();
2611  }
2612}
2613
2614void CMSCollector::verify() {
2615  _cmsGen->verify();
2616}
2617
2618#ifndef PRODUCT
2619bool CMSCollector::overflow_list_is_empty() const {
2620  assert(_num_par_pushes >= 0, "Inconsistency");
2621  if (_overflow_list == NULL) {
2622    assert(_num_par_pushes == 0, "Inconsistency");
2623  }
2624  return _overflow_list == NULL;
2625}
2626
2627// The methods verify_work_stacks_empty() and verify_overflow_empty()
2628// merely consolidate assertion checks that appear to occur together frequently.
2629void CMSCollector::verify_work_stacks_empty() const {
2630  assert(_markStack.isEmpty(), "Marking stack should be empty");
2631  assert(overflow_list_is_empty(), "Overflow list should be empty");
2632}
2633
2634void CMSCollector::verify_overflow_empty() const {
2635  assert(overflow_list_is_empty(), "Overflow list should be empty");
2636  assert(no_preserved_marks(), "No preserved marks");
2637}
2638#endif // PRODUCT
2639
2640// Decide if we want to enable class unloading as part of the
2641// ensuing concurrent GC cycle. We will collect and
2642// unload classes if it's the case that:
2643// (1) an explicit gc request has been made and the flag
2644//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2645// (2) (a) class unloading is enabled at the command line, and
2646//     (b) old gen is getting really full
2647// NOTE: Provided there is no change in the state of the heap between
2648// calls to this method, it should have idempotent results. Moreover,
2649// its results should be monotonically increasing (i.e. going from 0 to 1,
2650// but not 1 to 0) between successive calls between which the heap was
2651// not collected. For the implementation below, it must thus rely on
2652// the property that concurrent_cycles_since_last_unload()
2653// will not decrease unless a collection cycle happened and that
2654// _cmsGen->is_too_full() are
2655// themselves also monotonic in that sense. See check_monotonicity()
2656// below.
2657void CMSCollector::update_should_unload_classes() {
2658  _should_unload_classes = false;
2659  // Condition 1 above
2660  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2661    _should_unload_classes = true;
2662  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2663    // Disjuncts 2.b.(i,ii,iii) above
2664    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2665                              CMSClassUnloadingMaxInterval)
2666                           || _cmsGen->is_too_full();
2667  }
2668}
2669
2670bool ConcurrentMarkSweepGeneration::is_too_full() const {
2671  bool res = should_concurrent_collect();
2672  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2673  return res;
2674}
2675
2676void CMSCollector::setup_cms_unloading_and_verification_state() {
2677  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2678                             || VerifyBeforeExit;
2679  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2680
2681  // We set the proper root for this CMS cycle here.
2682  if (should_unload_classes()) {   // Should unload classes this cycle
2683    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2684    set_verifying(should_verify);    // Set verification state for this cycle
2685    return;                            // Nothing else needs to be done at this time
2686  }
2687
2688  // Not unloading classes this cycle
2689  assert(!should_unload_classes(), "Inconsistency!");
2690
2691  // If we are not unloading classes then add SO_AllCodeCache to root
2692  // scanning options.
2693  add_root_scanning_option(rso);
2694
2695  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2696    set_verifying(true);
2697  } else if (verifying() && !should_verify) {
2698    // We were verifying, but some verification flags got disabled.
2699    set_verifying(false);
2700    // Exclude symbols, strings and code cache elements from root scanning to
2701    // reduce IM and RM pauses.
2702    remove_root_scanning_option(rso);
2703  }
2704}
2705
2706
2707#ifndef PRODUCT
2708HeapWord* CMSCollector::block_start(const void* p) const {
2709  const HeapWord* addr = (HeapWord*)p;
2710  if (_span.contains(p)) {
2711    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2712      return _cmsGen->cmsSpace()->block_start(p);
2713    }
2714  }
2715  return NULL;
2716}
2717#endif
2718
2719HeapWord*
2720ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2721                                                   bool   tlab,
2722                                                   bool   parallel) {
2723  CMSSynchronousYieldRequest yr;
2724  assert(!tlab, "Can't deal with TLAB allocation");
2725  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2726  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2727  if (GCExpandToAllocateDelayMillis > 0) {
2728    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2729  }
2730  return have_lock_and_allocate(word_size, tlab);
2731}
2732
2733void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2734    size_t bytes,
2735    size_t expand_bytes,
2736    CMSExpansionCause::Cause cause)
2737{
2738
2739  bool success = expand(bytes, expand_bytes);
2740
2741  // remember why we expanded; this information is used
2742  // by shouldConcurrentCollect() when making decisions on whether to start
2743  // a new CMS cycle.
2744  if (success) {
2745    set_expansion_cause(cause);
2746    if (PrintGCDetails && Verbose) {
2747      gclog_or_tty->print_cr("Expanded CMS gen for %s",
2748        CMSExpansionCause::to_string(cause));
2749    }
2750  }
2751}
2752
2753HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2754  HeapWord* res = NULL;
2755  MutexLocker x(ParGCRareEvent_lock);
2756  while (true) {
2757    // Expansion by some other thread might make alloc OK now:
2758    res = ps->lab.alloc(word_sz);
2759    if (res != NULL) return res;
2760    // If there's not enough expansion space available, give up.
2761    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2762      return NULL;
2763    }
2764    // Otherwise, we try expansion.
2765    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2766    // Now go around the loop and try alloc again;
2767    // A competing par_promote might beat us to the expansion space,
2768    // so we may go around the loop again if promotion fails again.
2769    if (GCExpandToAllocateDelayMillis > 0) {
2770      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2771    }
2772  }
2773}
2774
2775
2776bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2777  PromotionInfo* promo) {
2778  MutexLocker x(ParGCRareEvent_lock);
2779  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2780  while (true) {
2781    // Expansion by some other thread might make alloc OK now:
2782    if (promo->ensure_spooling_space()) {
2783      assert(promo->has_spooling_space(),
2784             "Post-condition of successful ensure_spooling_space()");
2785      return true;
2786    }
2787    // If there's not enough expansion space available, give up.
2788    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2789      return false;
2790    }
2791    // Otherwise, we try expansion.
2792    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2793    // Now go around the loop and try alloc again;
2794    // A competing allocation might beat us to the expansion space,
2795    // so we may go around the loop again if allocation fails again.
2796    if (GCExpandToAllocateDelayMillis > 0) {
2797      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2798    }
2799  }
2800}
2801
2802void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2803  // Only shrink if a compaction was done so that all the free space
2804  // in the generation is in a contiguous block at the end.
2805  if (did_compact()) {
2806    CardGeneration::shrink(bytes);
2807  }
2808}
2809
2810void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2811  assert_locked_or_safepoint(Heap_lock);
2812}
2813
2814void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2815  assert_locked_or_safepoint(Heap_lock);
2816  assert_lock_strong(freelistLock());
2817  if (PrintGCDetails && Verbose) {
2818    warning("Shrinking of CMS not yet implemented");
2819  }
2820  return;
2821}
2822
2823
2824// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2825// phases.
2826class CMSPhaseAccounting: public StackObj {
2827 public:
2828  CMSPhaseAccounting(CMSCollector *collector,
2829                     const char *phase,
2830                     bool print_cr = true);
2831  ~CMSPhaseAccounting();
2832
2833 private:
2834  CMSCollector *_collector;
2835  const char *_phase;
2836  elapsedTimer _wallclock;
2837  bool _print_cr;
2838
2839 public:
2840  // Not MT-safe; so do not pass around these StackObj's
2841  // where they may be accessed by other threads.
2842  jlong wallclock_millis() {
2843    assert(_wallclock.is_active(), "Wall clock should not stop");
2844    _wallclock.stop();  // to record time
2845    jlong ret = _wallclock.milliseconds();
2846    _wallclock.start(); // restart
2847    return ret;
2848  }
2849};
2850
2851CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2852                                       const char *phase,
2853                                       bool print_cr) :
2854  _collector(collector), _phase(phase), _print_cr(print_cr) {
2855
2856  if (PrintCMSStatistics != 0) {
2857    _collector->resetYields();
2858  }
2859  if (PrintGCDetails) {
2860    gclog_or_tty->gclog_stamp();
2861    gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2862      _collector->cmsGen()->short_name(), _phase);
2863  }
2864  _collector->resetTimer();
2865  _wallclock.start();
2866  _collector->startTimer();
2867}
2868
2869CMSPhaseAccounting::~CMSPhaseAccounting() {
2870  assert(_wallclock.is_active(), "Wall clock should not have stopped");
2871  _collector->stopTimer();
2872  _wallclock.stop();
2873  if (PrintGCDetails) {
2874    gclog_or_tty->gclog_stamp();
2875    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2876                 _collector->cmsGen()->short_name(),
2877                 _phase, _collector->timerValue(), _wallclock.seconds());
2878    if (_print_cr) {
2879      gclog_or_tty->cr();
2880    }
2881    if (PrintCMSStatistics != 0) {
2882      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2883                    _collector->yields());
2884    }
2885  }
2886}
2887
2888// CMS work
2889
2890// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2891class CMSParMarkTask : public AbstractGangTask {
2892 protected:
2893  CMSCollector*     _collector;
2894  uint              _n_workers;
2895  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2896      AbstractGangTask(name),
2897      _collector(collector),
2898      _n_workers(n_workers) {}
2899  // Work method in support of parallel rescan ... of young gen spaces
2900  void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2901                             ContiguousSpace* space,
2902                             HeapWord** chunk_array, size_t chunk_top);
2903  void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2904};
2905
2906// Parallel initial mark task
2907class CMSParInitialMarkTask: public CMSParMarkTask {
2908  StrongRootsScope* _strong_roots_scope;
2909 public:
2910  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2911      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2912      _strong_roots_scope(strong_roots_scope) {}
2913  void work(uint worker_id);
2914};
2915
2916// Checkpoint the roots into this generation from outside
2917// this generation. [Note this initial checkpoint need only
2918// be approximate -- we'll do a catch up phase subsequently.]
2919void CMSCollector::checkpointRootsInitial() {
2920  assert(_collectorState == InitialMarking, "Wrong collector state");
2921  check_correct_thread_executing();
2922  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2923
2924  save_heap_summary();
2925  report_heap_summary(GCWhen::BeforeGC);
2926
2927  ReferenceProcessor* rp = ref_processor();
2928  assert(_restart_addr == NULL, "Control point invariant");
2929  {
2930    // acquire locks for subsequent manipulations
2931    MutexLockerEx x(bitMapLock(),
2932                    Mutex::_no_safepoint_check_flag);
2933    checkpointRootsInitialWork();
2934    // enable ("weak") refs discovery
2935    rp->enable_discovery();
2936    _collectorState = Marking;
2937  }
2938}
2939
2940void CMSCollector::checkpointRootsInitialWork() {
2941  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2942  assert(_collectorState == InitialMarking, "just checking");
2943
2944  // Already have locks.
2945  assert_lock_strong(bitMapLock());
2946  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2947
2948  // Setup the verification and class unloading state for this
2949  // CMS collection cycle.
2950  setup_cms_unloading_and_verification_state();
2951
2952  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2953    PrintGCDetails && Verbose, true, _gc_timer_cm);)
2954
2955  // Reset all the PLAB chunk arrays if necessary.
2956  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2957    reset_survivor_plab_arrays();
2958  }
2959
2960  ResourceMark rm;
2961  HandleMark  hm;
2962
2963  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2964  GenCollectedHeap* gch = GenCollectedHeap::heap();
2965
2966  verify_work_stacks_empty();
2967  verify_overflow_empty();
2968
2969  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2970  // Update the saved marks which may affect the root scans.
2971  gch->save_marks();
2972
2973  // weak reference processing has not started yet.
2974  ref_processor()->set_enqueuing_is_done(false);
2975
2976  // Need to remember all newly created CLDs,
2977  // so that we can guarantee that the remark finds them.
2978  ClassLoaderDataGraph::remember_new_clds(true);
2979
2980  // Whenever a CLD is found, it will be claimed before proceeding to mark
2981  // the klasses. The claimed marks need to be cleared before marking starts.
2982  ClassLoaderDataGraph::clear_claimed_marks();
2983
2984  if (CMSPrintEdenSurvivorChunks) {
2985    print_eden_and_survivor_chunk_arrays();
2986  }
2987
2988  {
2989#if defined(COMPILER2) || INCLUDE_JVMCI
2990    DerivedPointerTableDeactivate dpt_deact;
2991#endif
2992    if (CMSParallelInitialMarkEnabled) {
2993      // The parallel version.
2994      WorkGang* workers = gch->workers();
2995      assert(workers != NULL, "Need parallel worker threads.");
2996      uint n_workers = workers->active_workers();
2997
2998      StrongRootsScope srs(n_workers);
2999
3000      CMSParInitialMarkTask tsk(this, &srs, n_workers);
3001      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3002      if (n_workers > 1) {
3003        workers->run_task(&tsk);
3004      } else {
3005        tsk.work(0);
3006      }
3007    } else {
3008      // The serial version.
3009      CLDToOopClosure cld_closure(&notOlder, true);
3010      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3011
3012      StrongRootsScope srs(1);
3013
3014      gch->gen_process_roots(&srs,
3015                             GenCollectedHeap::OldGen,
3016                             true,   // young gen as roots
3017                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
3018                             should_unload_classes(),
3019                             &notOlder,
3020                             NULL,
3021                             &cld_closure);
3022    }
3023  }
3024
3025  // Clear mod-union table; it will be dirtied in the prologue of
3026  // CMS generation per each young generation collection.
3027
3028  assert(_modUnionTable.isAllClear(),
3029       "Was cleared in most recent final checkpoint phase"
3030       " or no bits are set in the gc_prologue before the start of the next "
3031       "subsequent marking phase.");
3032
3033  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3034
3035  // Save the end of the used_region of the constituent generations
3036  // to be used to limit the extent of sweep in each generation.
3037  save_sweep_limits();
3038  verify_overflow_empty();
3039}
3040
3041bool CMSCollector::markFromRoots() {
3042  // we might be tempted to assert that:
3043  // assert(!SafepointSynchronize::is_at_safepoint(),
3044  //        "inconsistent argument?");
3045  // However that wouldn't be right, because it's possible that
3046  // a safepoint is indeed in progress as a young generation
3047  // stop-the-world GC happens even as we mark in this generation.
3048  assert(_collectorState == Marking, "inconsistent state?");
3049  check_correct_thread_executing();
3050  verify_overflow_empty();
3051
3052  // Weak ref discovery note: We may be discovering weak
3053  // refs in this generation concurrent (but interleaved) with
3054  // weak ref discovery by the young generation collector.
3055
3056  CMSTokenSyncWithLocks ts(true, bitMapLock());
3057  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3058  CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3059  bool res = markFromRootsWork();
3060  if (res) {
3061    _collectorState = Precleaning;
3062  } else { // We failed and a foreground collection wants to take over
3063    assert(_foregroundGCIsActive, "internal state inconsistency");
3064    assert(_restart_addr == NULL,  "foreground will restart from scratch");
3065    if (PrintGCDetails) {
3066      gclog_or_tty->print_cr("bailing out to foreground collection");
3067    }
3068  }
3069  verify_overflow_empty();
3070  return res;
3071}
3072
3073bool CMSCollector::markFromRootsWork() {
3074  // iterate over marked bits in bit map, doing a full scan and mark
3075  // from these roots using the following algorithm:
3076  // . if oop is to the right of the current scan pointer,
3077  //   mark corresponding bit (we'll process it later)
3078  // . else (oop is to left of current scan pointer)
3079  //   push oop on marking stack
3080  // . drain the marking stack
3081
3082  // Note that when we do a marking step we need to hold the
3083  // bit map lock -- recall that direct allocation (by mutators)
3084  // and promotion (by the young generation collector) is also
3085  // marking the bit map. [the so-called allocate live policy.]
3086  // Because the implementation of bit map marking is not
3087  // robust wrt simultaneous marking of bits in the same word,
3088  // we need to make sure that there is no such interference
3089  // between concurrent such updates.
3090
3091  // already have locks
3092  assert_lock_strong(bitMapLock());
3093
3094  verify_work_stacks_empty();
3095  verify_overflow_empty();
3096  bool result = false;
3097  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3098    result = do_marking_mt();
3099  } else {
3100    result = do_marking_st();
3101  }
3102  return result;
3103}
3104
3105// Forward decl
3106class CMSConcMarkingTask;
3107
3108class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3109  CMSCollector*       _collector;
3110  CMSConcMarkingTask* _task;
3111 public:
3112  virtual void yield();
3113
3114  // "n_threads" is the number of threads to be terminated.
3115  // "queue_set" is a set of work queues of other threads.
3116  // "collector" is the CMS collector associated with this task terminator.
3117  // "yield" indicates whether we need the gang as a whole to yield.
3118  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3119    ParallelTaskTerminator(n_threads, queue_set),
3120    _collector(collector) { }
3121
3122  void set_task(CMSConcMarkingTask* task) {
3123    _task = task;
3124  }
3125};
3126
3127class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3128  CMSConcMarkingTask* _task;
3129 public:
3130  bool should_exit_termination();
3131  void set_task(CMSConcMarkingTask* task) {
3132    _task = task;
3133  }
3134};
3135
3136// MT Concurrent Marking Task
3137class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3138  CMSCollector* _collector;
3139  uint          _n_workers;       // requested/desired # workers
3140  bool          _result;
3141  CompactibleFreeListSpace*  _cms_space;
3142  char          _pad_front[64];   // padding to ...
3143  HeapWord*     _global_finger;   // ... avoid sharing cache line
3144  char          _pad_back[64];
3145  HeapWord*     _restart_addr;
3146
3147  //  Exposed here for yielding support
3148  Mutex* const _bit_map_lock;
3149
3150  // The per thread work queues, available here for stealing
3151  OopTaskQueueSet*  _task_queues;
3152
3153  // Termination (and yielding) support
3154  CMSConcMarkingTerminator _term;
3155  CMSConcMarkingTerminatorTerminator _term_term;
3156
3157 public:
3158  CMSConcMarkingTask(CMSCollector* collector,
3159                 CompactibleFreeListSpace* cms_space,
3160                 YieldingFlexibleWorkGang* workers,
3161                 OopTaskQueueSet* task_queues):
3162    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3163    _collector(collector),
3164    _cms_space(cms_space),
3165    _n_workers(0), _result(true),
3166    _task_queues(task_queues),
3167    _term(_n_workers, task_queues, _collector),
3168    _bit_map_lock(collector->bitMapLock())
3169  {
3170    _requested_size = _n_workers;
3171    _term.set_task(this);
3172    _term_term.set_task(this);
3173    _restart_addr = _global_finger = _cms_space->bottom();
3174  }
3175
3176
3177  OopTaskQueueSet* task_queues()  { return _task_queues; }
3178
3179  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3180
3181  HeapWord** global_finger_addr() { return &_global_finger; }
3182
3183  CMSConcMarkingTerminator* terminator() { return &_term; }
3184
3185  virtual void set_for_termination(uint active_workers) {
3186    terminator()->reset_for_reuse(active_workers);
3187  }
3188
3189  void work(uint worker_id);
3190  bool should_yield() {
3191    return    ConcurrentMarkSweepThread::should_yield()
3192           && !_collector->foregroundGCIsActive();
3193  }
3194
3195  virtual void coordinator_yield();  // stuff done by coordinator
3196  bool result() { return _result; }
3197
3198  void reset(HeapWord* ra) {
3199    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3200    _restart_addr = _global_finger = ra;
3201    _term.reset_for_reuse();
3202  }
3203
3204  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3205                                           OopTaskQueue* work_q);
3206
3207 private:
3208  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3209  void do_work_steal(int i);
3210  void bump_global_finger(HeapWord* f);
3211};
3212
3213bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3214  assert(_task != NULL, "Error");
3215  return _task->yielding();
3216  // Note that we do not need the disjunct || _task->should_yield() above
3217  // because we want terminating threads to yield only if the task
3218  // is already in the midst of yielding, which happens only after at least one
3219  // thread has yielded.
3220}
3221
3222void CMSConcMarkingTerminator::yield() {
3223  if (_task->should_yield()) {
3224    _task->yield();
3225  } else {
3226    ParallelTaskTerminator::yield();
3227  }
3228}
3229
3230////////////////////////////////////////////////////////////////
3231// Concurrent Marking Algorithm Sketch
3232////////////////////////////////////////////////////////////////
3233// Until all tasks exhausted (both spaces):
3234// -- claim next available chunk
3235// -- bump global finger via CAS
3236// -- find first object that starts in this chunk
3237//    and start scanning bitmap from that position
3238// -- scan marked objects for oops
3239// -- CAS-mark target, and if successful:
3240//    . if target oop is above global finger (volatile read)
3241//      nothing to do
3242//    . if target oop is in chunk and above local finger
3243//        then nothing to do
3244//    . else push on work-queue
3245// -- Deal with possible overflow issues:
3246//    . local work-queue overflow causes stuff to be pushed on
3247//      global (common) overflow queue
3248//    . always first empty local work queue
3249//    . then get a batch of oops from global work queue if any
3250//    . then do work stealing
3251// -- When all tasks claimed (both spaces)
3252//    and local work queue empty,
3253//    then in a loop do:
3254//    . check global overflow stack; steal a batch of oops and trace
3255//    . try to steal from other threads oif GOS is empty
3256//    . if neither is available, offer termination
3257// -- Terminate and return result
3258//
3259void CMSConcMarkingTask::work(uint worker_id) {
3260  elapsedTimer _timer;
3261  ResourceMark rm;
3262  HandleMark hm;
3263
3264  DEBUG_ONLY(_collector->verify_overflow_empty();)
3265
3266  // Before we begin work, our work queue should be empty
3267  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3268  // Scan the bitmap covering _cms_space, tracing through grey objects.
3269  _timer.start();
3270  do_scan_and_mark(worker_id, _cms_space);
3271  _timer.stop();
3272  if (PrintCMSStatistics != 0) {
3273    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3274      worker_id, _timer.seconds());
3275      // XXX: need xxx/xxx type of notation, two timers
3276  }
3277
3278  // ... do work stealing
3279  _timer.reset();
3280  _timer.start();
3281  do_work_steal(worker_id);
3282  _timer.stop();
3283  if (PrintCMSStatistics != 0) {
3284    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3285      worker_id, _timer.seconds());
3286      // XXX: need xxx/xxx type of notation, two timers
3287  }
3288  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3289  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3290  // Note that under the current task protocol, the
3291  // following assertion is true even of the spaces
3292  // expanded since the completion of the concurrent
3293  // marking. XXX This will likely change under a strict
3294  // ABORT semantics.
3295  // After perm removal the comparison was changed to
3296  // greater than or equal to from strictly greater than.
3297  // Before perm removal the highest address sweep would
3298  // have been at the end of perm gen but now is at the
3299  // end of the tenured gen.
3300  assert(_global_finger >=  _cms_space->end(),
3301         "All tasks have been completed");
3302  DEBUG_ONLY(_collector->verify_overflow_empty();)
3303}
3304
3305void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3306  HeapWord* read = _global_finger;
3307  HeapWord* cur  = read;
3308  while (f > read) {
3309    cur = read;
3310    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3311    if (cur == read) {
3312      // our cas succeeded
3313      assert(_global_finger >= f, "protocol consistency");
3314      break;
3315    }
3316  }
3317}
3318
3319// This is really inefficient, and should be redone by
3320// using (not yet available) block-read and -write interfaces to the
3321// stack and the work_queue. XXX FIX ME !!!
3322bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3323                                                      OopTaskQueue* work_q) {
3324  // Fast lock-free check
3325  if (ovflw_stk->length() == 0) {
3326    return false;
3327  }
3328  assert(work_q->size() == 0, "Shouldn't steal");
3329  MutexLockerEx ml(ovflw_stk->par_lock(),
3330                   Mutex::_no_safepoint_check_flag);
3331  // Grab up to 1/4 the size of the work queue
3332  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3333                    (size_t)ParGCDesiredObjsFromOverflowList);
3334  num = MIN2(num, ovflw_stk->length());
3335  for (int i = (int) num; i > 0; i--) {
3336    oop cur = ovflw_stk->pop();
3337    assert(cur != NULL, "Counted wrong?");
3338    work_q->push(cur);
3339  }
3340  return num > 0;
3341}
3342
3343void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3344  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3345  int n_tasks = pst->n_tasks();
3346  // We allow that there may be no tasks to do here because
3347  // we are restarting after a stack overflow.
3348  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3349  uint nth_task = 0;
3350
3351  HeapWord* aligned_start = sp->bottom();
3352  if (sp->used_region().contains(_restart_addr)) {
3353    // Align down to a card boundary for the start of 0th task
3354    // for this space.
3355    aligned_start =
3356      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3357                                 CardTableModRefBS::card_size);
3358  }
3359
3360  size_t chunk_size = sp->marking_task_size();
3361  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3362    // Having claimed the nth task in this space,
3363    // compute the chunk that it corresponds to:
3364    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3365                               aligned_start + (nth_task+1)*chunk_size);
3366    // Try and bump the global finger via a CAS;
3367    // note that we need to do the global finger bump
3368    // _before_ taking the intersection below, because
3369    // the task corresponding to that region will be
3370    // deemed done even if the used_region() expands
3371    // because of allocation -- as it almost certainly will
3372    // during start-up while the threads yield in the
3373    // closure below.
3374    HeapWord* finger = span.end();
3375    bump_global_finger(finger);   // atomically
3376    // There are null tasks here corresponding to chunks
3377    // beyond the "top" address of the space.
3378    span = span.intersection(sp->used_region());
3379    if (!span.is_empty()) {  // Non-null task
3380      HeapWord* prev_obj;
3381      assert(!span.contains(_restart_addr) || nth_task == 0,
3382             "Inconsistency");
3383      if (nth_task == 0) {
3384        // For the 0th task, we'll not need to compute a block_start.
3385        if (span.contains(_restart_addr)) {
3386          // In the case of a restart because of stack overflow,
3387          // we might additionally skip a chunk prefix.
3388          prev_obj = _restart_addr;
3389        } else {
3390          prev_obj = span.start();
3391        }
3392      } else {
3393        // We want to skip the first object because
3394        // the protocol is to scan any object in its entirety
3395        // that _starts_ in this span; a fortiori, any
3396        // object starting in an earlier span is scanned
3397        // as part of an earlier claimed task.
3398        // Below we use the "careful" version of block_start
3399        // so we do not try to navigate uninitialized objects.
3400        prev_obj = sp->block_start_careful(span.start());
3401        // Below we use a variant of block_size that uses the
3402        // Printezis bits to avoid waiting for allocated
3403        // objects to become initialized/parsable.
3404        while (prev_obj < span.start()) {
3405          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3406          if (sz > 0) {
3407            prev_obj += sz;
3408          } else {
3409            // In this case we may end up doing a bit of redundant
3410            // scanning, but that appears unavoidable, short of
3411            // locking the free list locks; see bug 6324141.
3412            break;
3413          }
3414        }
3415      }
3416      if (prev_obj < span.end()) {
3417        MemRegion my_span = MemRegion(prev_obj, span.end());
3418        // Do the marking work within a non-empty span --
3419        // the last argument to the constructor indicates whether the
3420        // iteration should be incremental with periodic yields.
3421        Par_MarkFromRootsClosure cl(this, _collector, my_span,
3422                                    &_collector->_markBitMap,
3423                                    work_queue(i),
3424                                    &_collector->_markStack);
3425        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3426      } // else nothing to do for this task
3427    }   // else nothing to do for this task
3428  }
3429  // We'd be tempted to assert here that since there are no
3430  // more tasks left to claim in this space, the global_finger
3431  // must exceed space->top() and a fortiori space->end(). However,
3432  // that would not quite be correct because the bumping of
3433  // global_finger occurs strictly after the claiming of a task,
3434  // so by the time we reach here the global finger may not yet
3435  // have been bumped up by the thread that claimed the last
3436  // task.
3437  pst->all_tasks_completed();
3438}
3439
3440class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3441 private:
3442  CMSCollector* _collector;
3443  CMSConcMarkingTask* _task;
3444  MemRegion     _span;
3445  CMSBitMap*    _bit_map;
3446  CMSMarkStack* _overflow_stack;
3447  OopTaskQueue* _work_queue;
3448 protected:
3449  DO_OOP_WORK_DEFN
3450 public:
3451  Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3452                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3453    MetadataAwareOopClosure(collector->ref_processor()),
3454    _collector(collector),
3455    _task(task),
3456    _span(collector->_span),
3457    _work_queue(work_queue),
3458    _bit_map(bit_map),
3459    _overflow_stack(overflow_stack)
3460  { }
3461  virtual void do_oop(oop* p);
3462  virtual void do_oop(narrowOop* p);
3463
3464  void trim_queue(size_t max);
3465  void handle_stack_overflow(HeapWord* lost);
3466  void do_yield_check() {
3467    if (_task->should_yield()) {
3468      _task->yield();
3469    }
3470  }
3471};
3472
3473// Grey object scanning during work stealing phase --
3474// the salient assumption here is that any references
3475// that are in these stolen objects being scanned must
3476// already have been initialized (else they would not have
3477// been published), so we do not need to check for
3478// uninitialized objects before pushing here.
3479void Par_ConcMarkingClosure::do_oop(oop obj) {
3480  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3481  HeapWord* addr = (HeapWord*)obj;
3482  // Check if oop points into the CMS generation
3483  // and is not marked
3484  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3485    // a white object ...
3486    // If we manage to "claim" the object, by being the
3487    // first thread to mark it, then we push it on our
3488    // marking stack
3489    if (_bit_map->par_mark(addr)) {     // ... now grey
3490      // push on work queue (grey set)
3491      bool simulate_overflow = false;
3492      NOT_PRODUCT(
3493        if (CMSMarkStackOverflowALot &&
3494            _collector->simulate_overflow()) {
3495          // simulate a stack overflow
3496          simulate_overflow = true;
3497        }
3498      )
3499      if (simulate_overflow ||
3500          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3501        // stack overflow
3502        if (PrintCMSStatistics != 0) {
3503          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3504                                 SIZE_FORMAT, _overflow_stack->capacity());
3505        }
3506        // We cannot assert that the overflow stack is full because
3507        // it may have been emptied since.
3508        assert(simulate_overflow ||
3509               _work_queue->size() == _work_queue->max_elems(),
3510              "Else push should have succeeded");
3511        handle_stack_overflow(addr);
3512      }
3513    } // Else, some other thread got there first
3514    do_yield_check();
3515  }
3516}
3517
3518void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3519void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3520
3521void Par_ConcMarkingClosure::trim_queue(size_t max) {
3522  while (_work_queue->size() > max) {
3523    oop new_oop;
3524    if (_work_queue->pop_local(new_oop)) {
3525      assert(new_oop->is_oop(), "Should be an oop");
3526      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3527      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3528      new_oop->oop_iterate(this);  // do_oop() above
3529      do_yield_check();
3530    }
3531  }
3532}
3533
3534// Upon stack overflow, we discard (part of) the stack,
3535// remembering the least address amongst those discarded
3536// in CMSCollector's _restart_address.
3537void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3538  // We need to do this under a mutex to prevent other
3539  // workers from interfering with the work done below.
3540  MutexLockerEx ml(_overflow_stack->par_lock(),
3541                   Mutex::_no_safepoint_check_flag);
3542  // Remember the least grey address discarded
3543  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3544  _collector->lower_restart_addr(ra);
3545  _overflow_stack->reset();  // discard stack contents
3546  _overflow_stack->expand(); // expand the stack if possible
3547}
3548
3549
3550void CMSConcMarkingTask::do_work_steal(int i) {
3551  OopTaskQueue* work_q = work_queue(i);
3552  oop obj_to_scan;
3553  CMSBitMap* bm = &(_collector->_markBitMap);
3554  CMSMarkStack* ovflw = &(_collector->_markStack);
3555  int* seed = _collector->hash_seed(i);
3556  Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3557  while (true) {
3558    cl.trim_queue(0);
3559    assert(work_q->size() == 0, "Should have been emptied above");
3560    if (get_work_from_overflow_stack(ovflw, work_q)) {
3561      // Can't assert below because the work obtained from the
3562      // overflow stack may already have been stolen from us.
3563      // assert(work_q->size() > 0, "Work from overflow stack");
3564      continue;
3565    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3566      assert(obj_to_scan->is_oop(), "Should be an oop");
3567      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3568      obj_to_scan->oop_iterate(&cl);
3569    } else if (terminator()->offer_termination(&_term_term)) {
3570      assert(work_q->size() == 0, "Impossible!");
3571      break;
3572    } else if (yielding() || should_yield()) {
3573      yield();
3574    }
3575  }
3576}
3577
3578// This is run by the CMS (coordinator) thread.
3579void CMSConcMarkingTask::coordinator_yield() {
3580  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3581         "CMS thread should hold CMS token");
3582  // First give up the locks, then yield, then re-lock
3583  // We should probably use a constructor/destructor idiom to
3584  // do this unlock/lock or modify the MutexUnlocker class to
3585  // serve our purpose. XXX
3586  assert_lock_strong(_bit_map_lock);
3587  _bit_map_lock->unlock();
3588  ConcurrentMarkSweepThread::desynchronize(true);
3589  _collector->stopTimer();
3590  if (PrintCMSStatistics != 0) {
3591    _collector->incrementYields();
3592  }
3593
3594  // It is possible for whichever thread initiated the yield request
3595  // not to get a chance to wake up and take the bitmap lock between
3596  // this thread releasing it and reacquiring it. So, while the
3597  // should_yield() flag is on, let's sleep for a bit to give the
3598  // other thread a chance to wake up. The limit imposed on the number
3599  // of iterations is defensive, to avoid any unforseen circumstances
3600  // putting us into an infinite loop. Since it's always been this
3601  // (coordinator_yield()) method that was observed to cause the
3602  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3603  // which is by default non-zero. For the other seven methods that
3604  // also perform the yield operation, as are using a different
3605  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3606  // can enable the sleeping for those methods too, if necessary.
3607  // See 6442774.
3608  //
3609  // We really need to reconsider the synchronization between the GC
3610  // thread and the yield-requesting threads in the future and we
3611  // should really use wait/notify, which is the recommended
3612  // way of doing this type of interaction. Additionally, we should
3613  // consolidate the eight methods that do the yield operation and they
3614  // are almost identical into one for better maintainability and
3615  // readability. See 6445193.
3616  //
3617  // Tony 2006.06.29
3618  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3619                   ConcurrentMarkSweepThread::should_yield() &&
3620                   !CMSCollector::foregroundGCIsActive(); ++i) {
3621    os::sleep(Thread::current(), 1, false);
3622  }
3623
3624  ConcurrentMarkSweepThread::synchronize(true);
3625  _bit_map_lock->lock_without_safepoint_check();
3626  _collector->startTimer();
3627}
3628
3629bool CMSCollector::do_marking_mt() {
3630  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3631  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3632                                                                  conc_workers()->active_workers(),
3633                                                                  Threads::number_of_non_daemon_threads());
3634  conc_workers()->set_active_workers(num_workers);
3635
3636  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3637
3638  CMSConcMarkingTask tsk(this,
3639                         cms_space,
3640                         conc_workers(),
3641                         task_queues());
3642
3643  // Since the actual number of workers we get may be different
3644  // from the number we requested above, do we need to do anything different
3645  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3646  // class?? XXX
3647  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3648
3649  // Refs discovery is already non-atomic.
3650  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3651  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3652  conc_workers()->start_task(&tsk);
3653  while (tsk.yielded()) {
3654    tsk.coordinator_yield();
3655    conc_workers()->continue_task(&tsk);
3656  }
3657  // If the task was aborted, _restart_addr will be non-NULL
3658  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3659  while (_restart_addr != NULL) {
3660    // XXX For now we do not make use of ABORTED state and have not
3661    // yet implemented the right abort semantics (even in the original
3662    // single-threaded CMS case). That needs some more investigation
3663    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3664    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3665    // If _restart_addr is non-NULL, a marking stack overflow
3666    // occurred; we need to do a fresh marking iteration from the
3667    // indicated restart address.
3668    if (_foregroundGCIsActive) {
3669      // We may be running into repeated stack overflows, having
3670      // reached the limit of the stack size, while making very
3671      // slow forward progress. It may be best to bail out and
3672      // let the foreground collector do its job.
3673      // Clear _restart_addr, so that foreground GC
3674      // works from scratch. This avoids the headache of
3675      // a "rescan" which would otherwise be needed because
3676      // of the dirty mod union table & card table.
3677      _restart_addr = NULL;
3678      return false;
3679    }
3680    // Adjust the task to restart from _restart_addr
3681    tsk.reset(_restart_addr);
3682    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3683                  _restart_addr);
3684    _restart_addr = NULL;
3685    // Get the workers going again
3686    conc_workers()->start_task(&tsk);
3687    while (tsk.yielded()) {
3688      tsk.coordinator_yield();
3689      conc_workers()->continue_task(&tsk);
3690    }
3691  }
3692  assert(tsk.completed(), "Inconsistency");
3693  assert(tsk.result() == true, "Inconsistency");
3694  return true;
3695}
3696
3697bool CMSCollector::do_marking_st() {
3698  ResourceMark rm;
3699  HandleMark   hm;
3700
3701  // Temporarily make refs discovery single threaded (non-MT)
3702  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3703  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3704    &_markStack, CMSYield);
3705  // the last argument to iterate indicates whether the iteration
3706  // should be incremental with periodic yields.
3707  _markBitMap.iterate(&markFromRootsClosure);
3708  // If _restart_addr is non-NULL, a marking stack overflow
3709  // occurred; we need to do a fresh iteration from the
3710  // indicated restart address.
3711  while (_restart_addr != NULL) {
3712    if (_foregroundGCIsActive) {
3713      // We may be running into repeated stack overflows, having
3714      // reached the limit of the stack size, while making very
3715      // slow forward progress. It may be best to bail out and
3716      // let the foreground collector do its job.
3717      // Clear _restart_addr, so that foreground GC
3718      // works from scratch. This avoids the headache of
3719      // a "rescan" which would otherwise be needed because
3720      // of the dirty mod union table & card table.
3721      _restart_addr = NULL;
3722      return false;  // indicating failure to complete marking
3723    }
3724    // Deal with stack overflow:
3725    // we restart marking from _restart_addr
3726    HeapWord* ra = _restart_addr;
3727    markFromRootsClosure.reset(ra);
3728    _restart_addr = NULL;
3729    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3730  }
3731  return true;
3732}
3733
3734void CMSCollector::preclean() {
3735  check_correct_thread_executing();
3736  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3737  verify_work_stacks_empty();
3738  verify_overflow_empty();
3739  _abort_preclean = false;
3740  if (CMSPrecleaningEnabled) {
3741    if (!CMSEdenChunksRecordAlways) {
3742      _eden_chunk_index = 0;
3743    }
3744    size_t used = get_eden_used();
3745    size_t capacity = get_eden_capacity();
3746    // Don't start sampling unless we will get sufficiently
3747    // many samples.
3748    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3749                * CMSScheduleRemarkEdenPenetration)) {
3750      _start_sampling = true;
3751    } else {
3752      _start_sampling = false;
3753    }
3754    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3755    CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3756    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3757  }
3758  CMSTokenSync x(true); // is cms thread
3759  if (CMSPrecleaningEnabled) {
3760    sample_eden();
3761    _collectorState = AbortablePreclean;
3762  } else {
3763    _collectorState = FinalMarking;
3764  }
3765  verify_work_stacks_empty();
3766  verify_overflow_empty();
3767}
3768
3769// Try and schedule the remark such that young gen
3770// occupancy is CMSScheduleRemarkEdenPenetration %.
3771void CMSCollector::abortable_preclean() {
3772  check_correct_thread_executing();
3773  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3774  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3775
3776  // If Eden's current occupancy is below this threshold,
3777  // immediately schedule the remark; else preclean
3778  // past the next scavenge in an effort to
3779  // schedule the pause as described above. By choosing
3780  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3781  // we will never do an actual abortable preclean cycle.
3782  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3783    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3784    CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3785    // We need more smarts in the abortable preclean
3786    // loop below to deal with cases where allocation
3787    // in young gen is very very slow, and our precleaning
3788    // is running a losing race against a horde of
3789    // mutators intent on flooding us with CMS updates
3790    // (dirty cards).
3791    // One, admittedly dumb, strategy is to give up
3792    // after a certain number of abortable precleaning loops
3793    // or after a certain maximum time. We want to make
3794    // this smarter in the next iteration.
3795    // XXX FIX ME!!! YSR
3796    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3797    while (!(should_abort_preclean() ||
3798             ConcurrentMarkSweepThread::should_terminate())) {
3799      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3800      cumworkdone += workdone;
3801      loops++;
3802      // Voluntarily terminate abortable preclean phase if we have
3803      // been at it for too long.
3804      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3805          loops >= CMSMaxAbortablePrecleanLoops) {
3806        if (PrintGCDetails) {
3807          gclog_or_tty->print(" CMS: abort preclean due to loops ");
3808        }
3809        break;
3810      }
3811      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3812        if (PrintGCDetails) {
3813          gclog_or_tty->print(" CMS: abort preclean due to time ");
3814        }
3815        break;
3816      }
3817      // If we are doing little work each iteration, we should
3818      // take a short break.
3819      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3820        // Sleep for some time, waiting for work to accumulate
3821        stopTimer();
3822        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3823        startTimer();
3824        waited++;
3825      }
3826    }
3827    if (PrintCMSStatistics > 0) {
3828      gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3829                          loops, waited, cumworkdone);
3830    }
3831  }
3832  CMSTokenSync x(true); // is cms thread
3833  if (_collectorState != Idling) {
3834    assert(_collectorState == AbortablePreclean,
3835           "Spontaneous state transition?");
3836    _collectorState = FinalMarking;
3837  } // Else, a foreground collection completed this CMS cycle.
3838  return;
3839}
3840
3841// Respond to an Eden sampling opportunity
3842void CMSCollector::sample_eden() {
3843  // Make sure a young gc cannot sneak in between our
3844  // reading and recording of a sample.
3845  assert(Thread::current()->is_ConcurrentGC_thread(),
3846         "Only the cms thread may collect Eden samples");
3847  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3848         "Should collect samples while holding CMS token");
3849  if (!_start_sampling) {
3850    return;
3851  }
3852  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3853  // is populated by the young generation.
3854  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3855    if (_eden_chunk_index < _eden_chunk_capacity) {
3856      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3857      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3858             "Unexpected state of Eden");
3859      // We'd like to check that what we just sampled is an oop-start address;
3860      // however, we cannot do that here since the object may not yet have been
3861      // initialized. So we'll instead do the check when we _use_ this sample
3862      // later.
3863      if (_eden_chunk_index == 0 ||
3864          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3865                         _eden_chunk_array[_eden_chunk_index-1])
3866           >= CMSSamplingGrain)) {
3867        _eden_chunk_index++;  // commit sample
3868      }
3869    }
3870  }
3871  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3872    size_t used = get_eden_used();
3873    size_t capacity = get_eden_capacity();
3874    assert(used <= capacity, "Unexpected state of Eden");
3875    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3876      _abort_preclean = true;
3877    }
3878  }
3879}
3880
3881
3882size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3883  assert(_collectorState == Precleaning ||
3884         _collectorState == AbortablePreclean, "incorrect state");
3885  ResourceMark rm;
3886  HandleMark   hm;
3887
3888  // Precleaning is currently not MT but the reference processor
3889  // may be set for MT.  Disable it temporarily here.
3890  ReferenceProcessor* rp = ref_processor();
3891  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3892
3893  // Do one pass of scrubbing the discovered reference lists
3894  // to remove any reference objects with strongly-reachable
3895  // referents.
3896  if (clean_refs) {
3897    CMSPrecleanRefsYieldClosure yield_cl(this);
3898    assert(rp->span().equals(_span), "Spans should be equal");
3899    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3900                                   &_markStack, true /* preclean */);
3901    CMSDrainMarkingStackClosure complete_trace(this,
3902                                   _span, &_markBitMap, &_markStack,
3903                                   &keep_alive, true /* preclean */);
3904
3905    // We don't want this step to interfere with a young
3906    // collection because we don't want to take CPU
3907    // or memory bandwidth away from the young GC threads
3908    // (which may be as many as there are CPUs).
3909    // Note that we don't need to protect ourselves from
3910    // interference with mutators because they can't
3911    // manipulate the discovered reference lists nor affect
3912    // the computed reachability of the referents, the
3913    // only properties manipulated by the precleaning
3914    // of these reference lists.
3915    stopTimer();
3916    CMSTokenSyncWithLocks x(true /* is cms thread */,
3917                            bitMapLock());
3918    startTimer();
3919    sample_eden();
3920
3921    // The following will yield to allow foreground
3922    // collection to proceed promptly. XXX YSR:
3923    // The code in this method may need further
3924    // tweaking for better performance and some restructuring
3925    // for cleaner interfaces.
3926    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3927    rp->preclean_discovered_references(
3928          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3929          gc_timer);
3930  }
3931
3932  if (clean_survivor) {  // preclean the active survivor space(s)
3933    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3934                             &_markBitMap, &_modUnionTable,
3935                             &_markStack, true /* precleaning phase */);
3936    stopTimer();
3937    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3938                             bitMapLock());
3939    startTimer();
3940    unsigned int before_count =
3941      GenCollectedHeap::heap()->total_collections();
3942    SurvivorSpacePrecleanClosure
3943      sss_cl(this, _span, &_markBitMap, &_markStack,
3944             &pam_cl, before_count, CMSYield);
3945    _young_gen->from()->object_iterate_careful(&sss_cl);
3946    _young_gen->to()->object_iterate_careful(&sss_cl);
3947  }
3948  MarkRefsIntoAndScanClosure
3949    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3950             &_markStack, this, CMSYield,
3951             true /* precleaning phase */);
3952  // CAUTION: The following closure has persistent state that may need to
3953  // be reset upon a decrease in the sequence of addresses it
3954  // processes.
3955  ScanMarkedObjectsAgainCarefullyClosure
3956    smoac_cl(this, _span,
3957      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3958
3959  // Preclean dirty cards in ModUnionTable and CardTable using
3960  // appropriate convergence criterion;
3961  // repeat CMSPrecleanIter times unless we find that
3962  // we are losing.
3963  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3964  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3965         "Bad convergence multiplier");
3966  assert(CMSPrecleanThreshold >= 100,
3967         "Unreasonably low CMSPrecleanThreshold");
3968
3969  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3970  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3971       numIter < CMSPrecleanIter;
3972       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3973    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3974    if (Verbose && PrintGCDetails) {
3975      gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3976    }
3977    // Either there are very few dirty cards, so re-mark
3978    // pause will be small anyway, or our pre-cleaning isn't
3979    // that much faster than the rate at which cards are being
3980    // dirtied, so we might as well stop and re-mark since
3981    // precleaning won't improve our re-mark time by much.
3982    if (curNumCards <= CMSPrecleanThreshold ||
3983        (numIter > 0 &&
3984         (curNumCards * CMSPrecleanDenominator >
3985         lastNumCards * CMSPrecleanNumerator))) {
3986      numIter++;
3987      cumNumCards += curNumCards;
3988      break;
3989    }
3990  }
3991
3992  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3993
3994  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3995  cumNumCards += curNumCards;
3996  if (PrintGCDetails && PrintCMSStatistics != 0) {
3997    gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3998                  curNumCards, cumNumCards, numIter);
3999  }
4000  return cumNumCards;   // as a measure of useful work done
4001}
4002
4003// PRECLEANING NOTES:
4004// Precleaning involves:
4005// . reading the bits of the modUnionTable and clearing the set bits.
4006// . For the cards corresponding to the set bits, we scan the
4007//   objects on those cards. This means we need the free_list_lock
4008//   so that we can safely iterate over the CMS space when scanning
4009//   for oops.
4010// . When we scan the objects, we'll be both reading and setting
4011//   marks in the marking bit map, so we'll need the marking bit map.
4012// . For protecting _collector_state transitions, we take the CGC_lock.
4013//   Note that any races in the reading of of card table entries by the
4014//   CMS thread on the one hand and the clearing of those entries by the
4015//   VM thread or the setting of those entries by the mutator threads on the
4016//   other are quite benign. However, for efficiency it makes sense to keep
4017//   the VM thread from racing with the CMS thread while the latter is
4018//   dirty card info to the modUnionTable. We therefore also use the
4019//   CGC_lock to protect the reading of the card table and the mod union
4020//   table by the CM thread.
4021// . We run concurrently with mutator updates, so scanning
4022//   needs to be done carefully  -- we should not try to scan
4023//   potentially uninitialized objects.
4024//
4025// Locking strategy: While holding the CGC_lock, we scan over and
4026// reset a maximal dirty range of the mod union / card tables, then lock
4027// the free_list_lock and bitmap lock to do a full marking, then
4028// release these locks; and repeat the cycle. This allows for a
4029// certain amount of fairness in the sharing of these locks between
4030// the CMS collector on the one hand, and the VM thread and the
4031// mutators on the other.
4032
4033// NOTE: preclean_mod_union_table() and preclean_card_table()
4034// further below are largely identical; if you need to modify
4035// one of these methods, please check the other method too.
4036
4037size_t CMSCollector::preclean_mod_union_table(
4038  ConcurrentMarkSweepGeneration* old_gen,
4039  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4040  verify_work_stacks_empty();
4041  verify_overflow_empty();
4042
4043  // strategy: starting with the first card, accumulate contiguous
4044  // ranges of dirty cards; clear these cards, then scan the region
4045  // covered by these cards.
4046
4047  // Since all of the MUT is committed ahead, we can just use
4048  // that, in case the generations expand while we are precleaning.
4049  // It might also be fine to just use the committed part of the
4050  // generation, but we might potentially miss cards when the
4051  // generation is rapidly expanding while we are in the midst
4052  // of precleaning.
4053  HeapWord* startAddr = old_gen->reserved().start();
4054  HeapWord* endAddr   = old_gen->reserved().end();
4055
4056  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4057
4058  size_t numDirtyCards, cumNumDirtyCards;
4059  HeapWord *nextAddr, *lastAddr;
4060  for (cumNumDirtyCards = numDirtyCards = 0,
4061       nextAddr = lastAddr = startAddr;
4062       nextAddr < endAddr;
4063       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4064
4065    ResourceMark rm;
4066    HandleMark   hm;
4067
4068    MemRegion dirtyRegion;
4069    {
4070      stopTimer();
4071      // Potential yield point
4072      CMSTokenSync ts(true);
4073      startTimer();
4074      sample_eden();
4075      // Get dirty region starting at nextOffset (inclusive),
4076      // simultaneously clearing it.
4077      dirtyRegion =
4078        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4079      assert(dirtyRegion.start() >= nextAddr,
4080             "returned region inconsistent?");
4081    }
4082    // Remember where the next search should begin.
4083    // The returned region (if non-empty) is a right open interval,
4084    // so lastOffset is obtained from the right end of that
4085    // interval.
4086    lastAddr = dirtyRegion.end();
4087    // Should do something more transparent and less hacky XXX
4088    numDirtyCards =
4089      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4090
4091    // We'll scan the cards in the dirty region (with periodic
4092    // yields for foreground GC as needed).
4093    if (!dirtyRegion.is_empty()) {
4094      assert(numDirtyCards > 0, "consistency check");
4095      HeapWord* stop_point = NULL;
4096      stopTimer();
4097      // Potential yield point
4098      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
4099                               bitMapLock());
4100      startTimer();
4101      {
4102        verify_work_stacks_empty();
4103        verify_overflow_empty();
4104        sample_eden();
4105        stop_point =
4106          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4107      }
4108      if (stop_point != NULL) {
4109        // The careful iteration stopped early either because it found an
4110        // uninitialized object, or because we were in the midst of an
4111        // "abortable preclean", which should now be aborted. Redirty
4112        // the bits corresponding to the partially-scanned or unscanned
4113        // cards. We'll either restart at the next block boundary or
4114        // abort the preclean.
4115        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4116               "Should only be AbortablePreclean.");
4117        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4118        if (should_abort_preclean()) {
4119          break; // out of preclean loop
4120        } else {
4121          // Compute the next address at which preclean should pick up;
4122          // might need bitMapLock in order to read P-bits.
4123          lastAddr = next_card_start_after_block(stop_point);
4124        }
4125      }
4126    } else {
4127      assert(lastAddr == endAddr, "consistency check");
4128      assert(numDirtyCards == 0, "consistency check");
4129      break;
4130    }
4131  }
4132  verify_work_stacks_empty();
4133  verify_overflow_empty();
4134  return cumNumDirtyCards;
4135}
4136
4137// NOTE: preclean_mod_union_table() above and preclean_card_table()
4138// below are largely identical; if you need to modify
4139// one of these methods, please check the other method too.
4140
4141size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4142  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4143  // strategy: it's similar to precleamModUnionTable above, in that
4144  // we accumulate contiguous ranges of dirty cards, mark these cards
4145  // precleaned, then scan the region covered by these cards.
4146  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4147  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4148
4149  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4150
4151  size_t numDirtyCards, cumNumDirtyCards;
4152  HeapWord *lastAddr, *nextAddr;
4153
4154  for (cumNumDirtyCards = numDirtyCards = 0,
4155       nextAddr = lastAddr = startAddr;
4156       nextAddr < endAddr;
4157       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4158
4159    ResourceMark rm;
4160    HandleMark   hm;
4161
4162    MemRegion dirtyRegion;
4163    {
4164      // See comments in "Precleaning notes" above on why we
4165      // do this locking. XXX Could the locking overheads be
4166      // too high when dirty cards are sparse? [I don't think so.]
4167      stopTimer();
4168      CMSTokenSync x(true); // is cms thread
4169      startTimer();
4170      sample_eden();
4171      // Get and clear dirty region from card table
4172      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4173                                    MemRegion(nextAddr, endAddr),
4174                                    true,
4175                                    CardTableModRefBS::precleaned_card_val());
4176
4177      assert(dirtyRegion.start() >= nextAddr,
4178             "returned region inconsistent?");
4179    }
4180    lastAddr = dirtyRegion.end();
4181    numDirtyCards =
4182      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4183
4184    if (!dirtyRegion.is_empty()) {
4185      stopTimer();
4186      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4187      startTimer();
4188      sample_eden();
4189      verify_work_stacks_empty();
4190      verify_overflow_empty();
4191      HeapWord* stop_point =
4192        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4193      if (stop_point != NULL) {
4194        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4195               "Should only be AbortablePreclean.");
4196        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4197        if (should_abort_preclean()) {
4198          break; // out of preclean loop
4199        } else {
4200          // Compute the next address at which preclean should pick up.
4201          lastAddr = next_card_start_after_block(stop_point);
4202        }
4203      }
4204    } else {
4205      break;
4206    }
4207  }
4208  verify_work_stacks_empty();
4209  verify_overflow_empty();
4210  return cumNumDirtyCards;
4211}
4212
4213class PrecleanKlassClosure : public KlassClosure {
4214  KlassToOopClosure _cm_klass_closure;
4215 public:
4216  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4217  void do_klass(Klass* k) {
4218    if (k->has_accumulated_modified_oops()) {
4219      k->clear_accumulated_modified_oops();
4220
4221      _cm_klass_closure.do_klass(k);
4222    }
4223  }
4224};
4225
4226// The freelist lock is needed to prevent asserts, is it really needed?
4227void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4228
4229  cl->set_freelistLock(freelistLock);
4230
4231  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4232
4233  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4234  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4235  PrecleanKlassClosure preclean_klass_closure(cl);
4236  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4237
4238  verify_work_stacks_empty();
4239  verify_overflow_empty();
4240}
4241
4242void CMSCollector::checkpointRootsFinal() {
4243  assert(_collectorState == FinalMarking, "incorrect state transition?");
4244  check_correct_thread_executing();
4245  // world is stopped at this checkpoint
4246  assert(SafepointSynchronize::is_at_safepoint(),
4247         "world should be stopped");
4248  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4249
4250  verify_work_stacks_empty();
4251  verify_overflow_empty();
4252
4253  if (PrintGCDetails) {
4254    gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4255                        _young_gen->used() / K,
4256                        _young_gen->capacity() / K);
4257  }
4258  {
4259    if (CMSScavengeBeforeRemark) {
4260      GenCollectedHeap* gch = GenCollectedHeap::heap();
4261      // Temporarily set flag to false, GCH->do_collection will
4262      // expect it to be false and set to true
4263      FlagSetting fl(gch->_is_gc_active, false);
4264      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4265        PrintGCDetails && Verbose, true, _gc_timer_cm);)
4266      gch->do_collection(true,                      // full (i.e. force, see below)
4267                         false,                     // !clear_all_soft_refs
4268                         0,                         // size
4269                         false,                     // is_tlab
4270                         GenCollectedHeap::YoungGen // type
4271        );
4272    }
4273    FreelistLocker x(this);
4274    MutexLockerEx y(bitMapLock(),
4275                    Mutex::_no_safepoint_check_flag);
4276    checkpointRootsFinalWork();
4277  }
4278  verify_work_stacks_empty();
4279  verify_overflow_empty();
4280}
4281
4282void CMSCollector::checkpointRootsFinalWork() {
4283  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4284
4285  assert(haveFreelistLocks(), "must have free list locks");
4286  assert_lock_strong(bitMapLock());
4287
4288  ResourceMark rm;
4289  HandleMark   hm;
4290
4291  GenCollectedHeap* gch = GenCollectedHeap::heap();
4292
4293  if (should_unload_classes()) {
4294    CodeCache::gc_prologue();
4295  }
4296  assert(haveFreelistLocks(), "must have free list locks");
4297  assert_lock_strong(bitMapLock());
4298
4299  // We might assume that we need not fill TLAB's when
4300  // CMSScavengeBeforeRemark is set, because we may have just done
4301  // a scavenge which would have filled all TLAB's -- and besides
4302  // Eden would be empty. This however may not always be the case --
4303  // for instance although we asked for a scavenge, it may not have
4304  // happened because of a JNI critical section. We probably need
4305  // a policy for deciding whether we can in that case wait until
4306  // the critical section releases and then do the remark following
4307  // the scavenge, and skip it here. In the absence of that policy,
4308  // or of an indication of whether the scavenge did indeed occur,
4309  // we cannot rely on TLAB's having been filled and must do
4310  // so here just in case a scavenge did not happen.
4311  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4312  // Update the saved marks which may affect the root scans.
4313  gch->save_marks();
4314
4315  if (CMSPrintEdenSurvivorChunks) {
4316    print_eden_and_survivor_chunk_arrays();
4317  }
4318
4319  {
4320#if defined(COMPILER2) || INCLUDE_JVMCI
4321    DerivedPointerTableDeactivate dpt_deact;
4322#endif
4323
4324    // Note on the role of the mod union table:
4325    // Since the marker in "markFromRoots" marks concurrently with
4326    // mutators, it is possible for some reachable objects not to have been
4327    // scanned. For instance, an only reference to an object A was
4328    // placed in object B after the marker scanned B. Unless B is rescanned,
4329    // A would be collected. Such updates to references in marked objects
4330    // are detected via the mod union table which is the set of all cards
4331    // dirtied since the first checkpoint in this GC cycle and prior to
4332    // the most recent young generation GC, minus those cleaned up by the
4333    // concurrent precleaning.
4334    if (CMSParallelRemarkEnabled) {
4335      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4336      do_remark_parallel();
4337    } else {
4338      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
4339      do_remark_non_parallel();
4340    }
4341  }
4342  verify_work_stacks_empty();
4343  verify_overflow_empty();
4344
4345  {
4346    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4347    refProcessingWork();
4348  }
4349  verify_work_stacks_empty();
4350  verify_overflow_empty();
4351
4352  if (should_unload_classes()) {
4353    CodeCache::gc_epilogue();
4354  }
4355  JvmtiExport::gc_epilogue();
4356
4357  // If we encountered any (marking stack / work queue) overflow
4358  // events during the current CMS cycle, take appropriate
4359  // remedial measures, where possible, so as to try and avoid
4360  // recurrence of that condition.
4361  assert(_markStack.isEmpty(), "No grey objects");
4362  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4363                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4364  if (ser_ovflw > 0) {
4365    if (PrintCMSStatistics != 0) {
4366      gclog_or_tty->print_cr("Marking stack overflow (benign) "
4367        "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
4368        ", kac_preclean=" SIZE_FORMAT ")",
4369        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4370        _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4371    }
4372    _markStack.expand();
4373    _ser_pmc_remark_ovflw = 0;
4374    _ser_pmc_preclean_ovflw = 0;
4375    _ser_kac_preclean_ovflw = 0;
4376    _ser_kac_ovflw = 0;
4377  }
4378  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4379    if (PrintCMSStatistics != 0) {
4380      gclog_or_tty->print_cr("Work queue overflow (benign) "
4381        "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4382        _par_pmc_remark_ovflw, _par_kac_ovflw);
4383    }
4384    _par_pmc_remark_ovflw = 0;
4385    _par_kac_ovflw = 0;
4386  }
4387  if (PrintCMSStatistics != 0) {
4388     if (_markStack._hit_limit > 0) {
4389       gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4390                              _markStack._hit_limit);
4391     }
4392     if (_markStack._failed_double > 0) {
4393       gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
4394                              " current capacity " SIZE_FORMAT,
4395                              _markStack._failed_double,
4396                              _markStack.capacity());
4397     }
4398  }
4399  _markStack._hit_limit = 0;
4400  _markStack._failed_double = 0;
4401
4402  if ((VerifyAfterGC || VerifyDuringGC) &&
4403      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4404    verify_after_remark();
4405  }
4406
4407  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4408
4409  // Change under the freelistLocks.
4410  _collectorState = Sweeping;
4411  // Call isAllClear() under bitMapLock
4412  assert(_modUnionTable.isAllClear(),
4413      "Should be clear by end of the final marking");
4414  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4415      "Should be clear by end of the final marking");
4416}
4417
4418void CMSParInitialMarkTask::work(uint worker_id) {
4419  elapsedTimer _timer;
4420  ResourceMark rm;
4421  HandleMark   hm;
4422
4423  // ---------- scan from roots --------------
4424  _timer.start();
4425  GenCollectedHeap* gch = GenCollectedHeap::heap();
4426  Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4427
4428  // ---------- young gen roots --------------
4429  {
4430    work_on_young_gen_roots(worker_id, &par_mri_cl);
4431    _timer.stop();
4432    if (PrintCMSStatistics != 0) {
4433      gclog_or_tty->print_cr(
4434        "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4435        worker_id, _timer.seconds());
4436    }
4437  }
4438
4439  // ---------- remaining roots --------------
4440  _timer.reset();
4441  _timer.start();
4442
4443  CLDToOopClosure cld_closure(&par_mri_cl, true);
4444
4445  gch->gen_process_roots(_strong_roots_scope,
4446                         GenCollectedHeap::OldGen,
4447                         false,     // yg was scanned above
4448                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4449                         _collector->should_unload_classes(),
4450                         &par_mri_cl,
4451                         NULL,
4452                         &cld_closure);
4453  assert(_collector->should_unload_classes()
4454         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4455         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4456  _timer.stop();
4457  if (PrintCMSStatistics != 0) {
4458    gclog_or_tty->print_cr(
4459      "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4460      worker_id, _timer.seconds());
4461  }
4462}
4463
4464// Parallel remark task
4465class CMSParRemarkTask: public CMSParMarkTask {
4466  CompactibleFreeListSpace* _cms_space;
4467
4468  // The per-thread work queues, available here for stealing.
4469  OopTaskQueueSet*       _task_queues;
4470  ParallelTaskTerminator _term;
4471  StrongRootsScope*      _strong_roots_scope;
4472
4473 public:
4474  // A value of 0 passed to n_workers will cause the number of
4475  // workers to be taken from the active workers in the work gang.
4476  CMSParRemarkTask(CMSCollector* collector,
4477                   CompactibleFreeListSpace* cms_space,
4478                   uint n_workers, WorkGang* workers,
4479                   OopTaskQueueSet* task_queues,
4480                   StrongRootsScope* strong_roots_scope):
4481    CMSParMarkTask("Rescan roots and grey objects in parallel",
4482                   collector, n_workers),
4483    _cms_space(cms_space),
4484    _task_queues(task_queues),
4485    _term(n_workers, task_queues),
4486    _strong_roots_scope(strong_roots_scope) { }
4487
4488  OopTaskQueueSet* task_queues() { return _task_queues; }
4489
4490  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4491
4492  ParallelTaskTerminator* terminator() { return &_term; }
4493  uint n_workers() { return _n_workers; }
4494
4495  void work(uint worker_id);
4496
4497 private:
4498  // ... of  dirty cards in old space
4499  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4500                                  Par_MarkRefsIntoAndScanClosure* cl);
4501
4502  // ... work stealing for the above
4503  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4504};
4505
4506class RemarkKlassClosure : public KlassClosure {
4507  KlassToOopClosure _cm_klass_closure;
4508 public:
4509  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4510  void do_klass(Klass* k) {
4511    // Check if we have modified any oops in the Klass during the concurrent marking.
4512    if (k->has_accumulated_modified_oops()) {
4513      k->clear_accumulated_modified_oops();
4514
4515      // We could have transfered the current modified marks to the accumulated marks,
4516      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4517    } else if (k->has_modified_oops()) {
4518      // Don't clear anything, this info is needed by the next young collection.
4519    } else {
4520      // No modified oops in the Klass.
4521      return;
4522    }
4523
4524    // The klass has modified fields, need to scan the klass.
4525    _cm_klass_closure.do_klass(k);
4526  }
4527};
4528
4529void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4530  ParNewGeneration* young_gen = _collector->_young_gen;
4531  ContiguousSpace* eden_space = young_gen->eden();
4532  ContiguousSpace* from_space = young_gen->from();
4533  ContiguousSpace* to_space   = young_gen->to();
4534
4535  HeapWord** eca = _collector->_eden_chunk_array;
4536  size_t     ect = _collector->_eden_chunk_index;
4537  HeapWord** sca = _collector->_survivor_chunk_array;
4538  size_t     sct = _collector->_survivor_chunk_index;
4539
4540  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4541  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4542
4543  do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4544  do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4545  do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4546}
4547
4548// work_queue(i) is passed to the closure
4549// Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4550// also is passed to do_dirty_card_rescan_tasks() and to
4551// do_work_steal() to select the i-th task_queue.
4552
4553void CMSParRemarkTask::work(uint worker_id) {
4554  elapsedTimer _timer;
4555  ResourceMark rm;
4556  HandleMark   hm;
4557
4558  // ---------- rescan from roots --------------
4559  _timer.start();
4560  GenCollectedHeap* gch = GenCollectedHeap::heap();
4561  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4562    _collector->_span, _collector->ref_processor(),
4563    &(_collector->_markBitMap),
4564    work_queue(worker_id));
4565
4566  // Rescan young gen roots first since these are likely
4567  // coarsely partitioned and may, on that account, constitute
4568  // the critical path; thus, it's best to start off that
4569  // work first.
4570  // ---------- young gen roots --------------
4571  {
4572    work_on_young_gen_roots(worker_id, &par_mrias_cl);
4573    _timer.stop();
4574    if (PrintCMSStatistics != 0) {
4575      gclog_or_tty->print_cr(
4576        "Finished young gen rescan work in %dth thread: %3.3f sec",
4577        worker_id, _timer.seconds());
4578    }
4579  }
4580
4581  // ---------- remaining roots --------------
4582  _timer.reset();
4583  _timer.start();
4584  gch->gen_process_roots(_strong_roots_scope,
4585                         GenCollectedHeap::OldGen,
4586                         false,     // yg was scanned above
4587                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4588                         _collector->should_unload_classes(),
4589                         &par_mrias_cl,
4590                         NULL,
4591                         NULL);     // The dirty klasses will be handled below
4592
4593  assert(_collector->should_unload_classes()
4594         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4595         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4596  _timer.stop();
4597  if (PrintCMSStatistics != 0) {
4598    gclog_or_tty->print_cr(
4599      "Finished remaining root rescan work in %dth thread: %3.3f sec",
4600      worker_id, _timer.seconds());
4601  }
4602
4603  // ---------- unhandled CLD scanning ----------
4604  if (worker_id == 0) { // Single threaded at the moment.
4605    _timer.reset();
4606    _timer.start();
4607
4608    // Scan all new class loader data objects and new dependencies that were
4609    // introduced during concurrent marking.
4610    ResourceMark rm;
4611    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4612    for (int i = 0; i < array->length(); i++) {
4613      par_mrias_cl.do_cld_nv(array->at(i));
4614    }
4615
4616    // We don't need to keep track of new CLDs anymore.
4617    ClassLoaderDataGraph::remember_new_clds(false);
4618
4619    _timer.stop();
4620    if (PrintCMSStatistics != 0) {
4621      gclog_or_tty->print_cr(
4622          "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4623          worker_id, _timer.seconds());
4624    }
4625  }
4626
4627  // ---------- dirty klass scanning ----------
4628  if (worker_id == 0) { // Single threaded at the moment.
4629    _timer.reset();
4630    _timer.start();
4631
4632    // Scan all classes that was dirtied during the concurrent marking phase.
4633    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4634    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4635
4636    _timer.stop();
4637    if (PrintCMSStatistics != 0) {
4638      gclog_or_tty->print_cr(
4639          "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4640          worker_id, _timer.seconds());
4641    }
4642  }
4643
4644  // We might have added oops to ClassLoaderData::_handles during the
4645  // concurrent marking phase. These oops point to newly allocated objects
4646  // that are guaranteed to be kept alive. Either by the direct allocation
4647  // code, or when the young collector processes the roots. Hence,
4648  // we don't have to revisit the _handles block during the remark phase.
4649
4650  // ---------- rescan dirty cards ------------
4651  _timer.reset();
4652  _timer.start();
4653
4654  // Do the rescan tasks for each of the two spaces
4655  // (cms_space) in turn.
4656  // "worker_id" is passed to select the task_queue for "worker_id"
4657  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4658  _timer.stop();
4659  if (PrintCMSStatistics != 0) {
4660    gclog_or_tty->print_cr(
4661      "Finished dirty card rescan work in %dth thread: %3.3f sec",
4662      worker_id, _timer.seconds());
4663  }
4664
4665  // ---------- steal work from other threads ...
4666  // ---------- ... and drain overflow list.
4667  _timer.reset();
4668  _timer.start();
4669  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4670  _timer.stop();
4671  if (PrintCMSStatistics != 0) {
4672    gclog_or_tty->print_cr(
4673      "Finished work stealing in %dth thread: %3.3f sec",
4674      worker_id, _timer.seconds());
4675  }
4676}
4677
4678// Note that parameter "i" is not used.
4679void
4680CMSParMarkTask::do_young_space_rescan(uint worker_id,
4681  OopsInGenClosure* cl, ContiguousSpace* space,
4682  HeapWord** chunk_array, size_t chunk_top) {
4683  // Until all tasks completed:
4684  // . claim an unclaimed task
4685  // . compute region boundaries corresponding to task claimed
4686  //   using chunk_array
4687  // . par_oop_iterate(cl) over that region
4688
4689  ResourceMark rm;
4690  HandleMark   hm;
4691
4692  SequentialSubTasksDone* pst = space->par_seq_tasks();
4693
4694  uint nth_task = 0;
4695  uint n_tasks  = pst->n_tasks();
4696
4697  if (n_tasks > 0) {
4698    assert(pst->valid(), "Uninitialized use?");
4699    HeapWord *start, *end;
4700    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4701      // We claimed task # nth_task; compute its boundaries.
4702      if (chunk_top == 0) {  // no samples were taken
4703        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4704        start = space->bottom();
4705        end   = space->top();
4706      } else if (nth_task == 0) {
4707        start = space->bottom();
4708        end   = chunk_array[nth_task];
4709      } else if (nth_task < (uint)chunk_top) {
4710        assert(nth_task >= 1, "Control point invariant");
4711        start = chunk_array[nth_task - 1];
4712        end   = chunk_array[nth_task];
4713      } else {
4714        assert(nth_task == (uint)chunk_top, "Control point invariant");
4715        start = chunk_array[chunk_top - 1];
4716        end   = space->top();
4717      }
4718      MemRegion mr(start, end);
4719      // Verify that mr is in space
4720      assert(mr.is_empty() || space->used_region().contains(mr),
4721             "Should be in space");
4722      // Verify that "start" is an object boundary
4723      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4724             "Should be an oop");
4725      space->par_oop_iterate(mr, cl);
4726    }
4727    pst->all_tasks_completed();
4728  }
4729}
4730
4731void
4732CMSParRemarkTask::do_dirty_card_rescan_tasks(
4733  CompactibleFreeListSpace* sp, int i,
4734  Par_MarkRefsIntoAndScanClosure* cl) {
4735  // Until all tasks completed:
4736  // . claim an unclaimed task
4737  // . compute region boundaries corresponding to task claimed
4738  // . transfer dirty bits ct->mut for that region
4739  // . apply rescanclosure to dirty mut bits for that region
4740
4741  ResourceMark rm;
4742  HandleMark   hm;
4743
4744  OopTaskQueue* work_q = work_queue(i);
4745  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4746  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4747  // CAUTION: This closure has state that persists across calls to
4748  // the work method dirty_range_iterate_clear() in that it has
4749  // embedded in it a (subtype of) UpwardsObjectClosure. The
4750  // use of that state in the embedded UpwardsObjectClosure instance
4751  // assumes that the cards are always iterated (even if in parallel
4752  // by several threads) in monotonically increasing order per each
4753  // thread. This is true of the implementation below which picks
4754  // card ranges (chunks) in monotonically increasing order globally
4755  // and, a-fortiori, in monotonically increasing order per thread
4756  // (the latter order being a subsequence of the former).
4757  // If the work code below is ever reorganized into a more chaotic
4758  // work-partitioning form than the current "sequential tasks"
4759  // paradigm, the use of that persistent state will have to be
4760  // revisited and modified appropriately. See also related
4761  // bug 4756801 work on which should examine this code to make
4762  // sure that the changes there do not run counter to the
4763  // assumptions made here and necessary for correctness and
4764  // efficiency. Note also that this code might yield inefficient
4765  // behavior in the case of very large objects that span one or
4766  // more work chunks. Such objects would potentially be scanned
4767  // several times redundantly. Work on 4756801 should try and
4768  // address that performance anomaly if at all possible. XXX
4769  MemRegion  full_span  = _collector->_span;
4770  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4771  MarkFromDirtyCardsClosure
4772    greyRescanClosure(_collector, full_span, // entire span of interest
4773                      sp, bm, work_q, cl);
4774
4775  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4776  assert(pst->valid(), "Uninitialized use?");
4777  uint nth_task = 0;
4778  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4779  MemRegion span = sp->used_region();
4780  HeapWord* start_addr = span.start();
4781  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4782                                           alignment);
4783  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4784  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4785         start_addr, "Check alignment");
4786  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4787         chunk_size, "Check alignment");
4788
4789  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4790    // Having claimed the nth_task, compute corresponding mem-region,
4791    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4792    // The alignment restriction ensures that we do not need any
4793    // synchronization with other gang-workers while setting or
4794    // clearing bits in thus chunk of the MUT.
4795    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4796                                    start_addr + (nth_task+1)*chunk_size);
4797    // The last chunk's end might be way beyond end of the
4798    // used region. In that case pull back appropriately.
4799    if (this_span.end() > end_addr) {
4800      this_span.set_end(end_addr);
4801      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4802    }
4803    // Iterate over the dirty cards covering this chunk, marking them
4804    // precleaned, and setting the corresponding bits in the mod union
4805    // table. Since we have been careful to partition at Card and MUT-word
4806    // boundaries no synchronization is needed between parallel threads.
4807    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4808                                                 &modUnionClosure);
4809
4810    // Having transferred these marks into the modUnionTable,
4811    // rescan the marked objects on the dirty cards in the modUnionTable.
4812    // Even if this is at a synchronous collection, the initial marking
4813    // may have been done during an asynchronous collection so there
4814    // may be dirty bits in the mod-union table.
4815    _collector->_modUnionTable.dirty_range_iterate_clear(
4816                  this_span, &greyRescanClosure);
4817    _collector->_modUnionTable.verifyNoOneBitsInRange(
4818                                 this_span.start(),
4819                                 this_span.end());
4820  }
4821  pst->all_tasks_completed();  // declare that i am done
4822}
4823
4824// . see if we can share work_queues with ParNew? XXX
4825void
4826CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4827                                int* seed) {
4828  OopTaskQueue* work_q = work_queue(i);
4829  NOT_PRODUCT(int num_steals = 0;)
4830  oop obj_to_scan;
4831  CMSBitMap* bm = &(_collector->_markBitMap);
4832
4833  while (true) {
4834    // Completely finish any left over work from (an) earlier round(s)
4835    cl->trim_queue(0);
4836    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4837                                         (size_t)ParGCDesiredObjsFromOverflowList);
4838    // Now check if there's any work in the overflow list
4839    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4840    // only affects the number of attempts made to get work from the
4841    // overflow list and does not affect the number of workers.  Just
4842    // pass ParallelGCThreads so this behavior is unchanged.
4843    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4844                                                work_q,
4845                                                ParallelGCThreads)) {
4846      // found something in global overflow list;
4847      // not yet ready to go stealing work from others.
4848      // We'd like to assert(work_q->size() != 0, ...)
4849      // because we just took work from the overflow list,
4850      // but of course we can't since all of that could have
4851      // been already stolen from us.
4852      // "He giveth and He taketh away."
4853      continue;
4854    }
4855    // Verify that we have no work before we resort to stealing
4856    assert(work_q->size() == 0, "Have work, shouldn't steal");
4857    // Try to steal from other queues that have work
4858    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4859      NOT_PRODUCT(num_steals++;)
4860      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4861      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4862      // Do scanning work
4863      obj_to_scan->oop_iterate(cl);
4864      // Loop around, finish this work, and try to steal some more
4865    } else if (terminator()->offer_termination()) {
4866        break;  // nirvana from the infinite cycle
4867    }
4868  }
4869  NOT_PRODUCT(
4870    if (PrintCMSStatistics != 0) {
4871      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4872    }
4873  )
4874  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4875         "Else our work is not yet done");
4876}
4877
4878// Record object boundaries in _eden_chunk_array by sampling the eden
4879// top in the slow-path eden object allocation code path and record
4880// the boundaries, if CMSEdenChunksRecordAlways is true. If
4881// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4882// sampling in sample_eden() that activates during the part of the
4883// preclean phase.
4884void CMSCollector::sample_eden_chunk() {
4885  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4886    if (_eden_chunk_lock->try_lock()) {
4887      // Record a sample. This is the critical section. The contents
4888      // of the _eden_chunk_array have to be non-decreasing in the
4889      // address order.
4890      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4891      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4892             "Unexpected state of Eden");
4893      if (_eden_chunk_index == 0 ||
4894          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4895           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4896                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4897        _eden_chunk_index++;  // commit sample
4898      }
4899      _eden_chunk_lock->unlock();
4900    }
4901  }
4902}
4903
4904// Return a thread-local PLAB recording array, as appropriate.
4905void* CMSCollector::get_data_recorder(int thr_num) {
4906  if (_survivor_plab_array != NULL &&
4907      (CMSPLABRecordAlways ||
4908       (_collectorState > Marking && _collectorState < FinalMarking))) {
4909    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4910    ChunkArray* ca = &_survivor_plab_array[thr_num];
4911    ca->reset();   // clear it so that fresh data is recorded
4912    return (void*) ca;
4913  } else {
4914    return NULL;
4915  }
4916}
4917
4918// Reset all the thread-local PLAB recording arrays
4919void CMSCollector::reset_survivor_plab_arrays() {
4920  for (uint i = 0; i < ParallelGCThreads; i++) {
4921    _survivor_plab_array[i].reset();
4922  }
4923}
4924
4925// Merge the per-thread plab arrays into the global survivor chunk
4926// array which will provide the partitioning of the survivor space
4927// for CMS initial scan and rescan.
4928void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4929                                              int no_of_gc_threads) {
4930  assert(_survivor_plab_array  != NULL, "Error");
4931  assert(_survivor_chunk_array != NULL, "Error");
4932  assert(_collectorState == FinalMarking ||
4933         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4934  for (int j = 0; j < no_of_gc_threads; j++) {
4935    _cursor[j] = 0;
4936  }
4937  HeapWord* top = surv->top();
4938  size_t i;
4939  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4940    HeapWord* min_val = top;          // Higher than any PLAB address
4941    uint      min_tid = 0;            // position of min_val this round
4942    for (int j = 0; j < no_of_gc_threads; j++) {
4943      ChunkArray* cur_sca = &_survivor_plab_array[j];
4944      if (_cursor[j] == cur_sca->end()) {
4945        continue;
4946      }
4947      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4948      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4949      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4950      if (cur_val < min_val) {
4951        min_tid = j;
4952        min_val = cur_val;
4953      } else {
4954        assert(cur_val < top, "All recorded addresses should be less");
4955      }
4956    }
4957    // At this point min_val and min_tid are respectively
4958    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4959    // and the thread (j) that witnesses that address.
4960    // We record this address in the _survivor_chunk_array[i]
4961    // and increment _cursor[min_tid] prior to the next round i.
4962    if (min_val == top) {
4963      break;
4964    }
4965    _survivor_chunk_array[i] = min_val;
4966    _cursor[min_tid]++;
4967  }
4968  // We are all done; record the size of the _survivor_chunk_array
4969  _survivor_chunk_index = i; // exclusive: [0, i)
4970  if (PrintCMSStatistics > 0) {
4971    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4972  }
4973  // Verify that we used up all the recorded entries
4974  #ifdef ASSERT
4975    size_t total = 0;
4976    for (int j = 0; j < no_of_gc_threads; j++) {
4977      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4978      total += _cursor[j];
4979    }
4980    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4981    // Check that the merged array is in sorted order
4982    if (total > 0) {
4983      for (size_t i = 0; i < total - 1; i++) {
4984        if (PrintCMSStatistics > 0) {
4985          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4986                              i, p2i(_survivor_chunk_array[i]));
4987        }
4988        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4989               "Not sorted");
4990      }
4991    }
4992  #endif // ASSERT
4993}
4994
4995// Set up the space's par_seq_tasks structure for work claiming
4996// for parallel initial scan and rescan of young gen.
4997// See ParRescanTask where this is currently used.
4998void
4999CMSCollector::
5000initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5001  assert(n_threads > 0, "Unexpected n_threads argument");
5002
5003  // Eden space
5004  if (!_young_gen->eden()->is_empty()) {
5005    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5006    assert(!pst->valid(), "Clobbering existing data?");
5007    // Each valid entry in [0, _eden_chunk_index) represents a task.
5008    size_t n_tasks = _eden_chunk_index + 1;
5009    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5010    // Sets the condition for completion of the subtask (how many threads
5011    // need to finish in order to be done).
5012    pst->set_n_threads(n_threads);
5013    pst->set_n_tasks((int)n_tasks);
5014  }
5015
5016  // Merge the survivor plab arrays into _survivor_chunk_array
5017  if (_survivor_plab_array != NULL) {
5018    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5019  } else {
5020    assert(_survivor_chunk_index == 0, "Error");
5021  }
5022
5023  // To space
5024  {
5025    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5026    assert(!pst->valid(), "Clobbering existing data?");
5027    // Sets the condition for completion of the subtask (how many threads
5028    // need to finish in order to be done).
5029    pst->set_n_threads(n_threads);
5030    pst->set_n_tasks(1);
5031    assert(pst->valid(), "Error");
5032  }
5033
5034  // From space
5035  {
5036    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5037    assert(!pst->valid(), "Clobbering existing data?");
5038    size_t n_tasks = _survivor_chunk_index + 1;
5039    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5040    // Sets the condition for completion of the subtask (how many threads
5041    // need to finish in order to be done).
5042    pst->set_n_threads(n_threads);
5043    pst->set_n_tasks((int)n_tasks);
5044    assert(pst->valid(), "Error");
5045  }
5046}
5047
5048// Parallel version of remark
5049void CMSCollector::do_remark_parallel() {
5050  GenCollectedHeap* gch = GenCollectedHeap::heap();
5051  WorkGang* workers = gch->workers();
5052  assert(workers != NULL, "Need parallel worker threads.");
5053  // Choose to use the number of GC workers most recently set
5054  // into "active_workers".
5055  uint n_workers = workers->active_workers();
5056
5057  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5058
5059  StrongRootsScope srs(n_workers);
5060
5061  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5062
5063  // We won't be iterating over the cards in the card table updating
5064  // the younger_gen cards, so we shouldn't call the following else
5065  // the verification code as well as subsequent younger_refs_iterate
5066  // code would get confused. XXX
5067  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5068
5069  // The young gen rescan work will not be done as part of
5070  // process_roots (which currently doesn't know how to
5071  // parallelize such a scan), but rather will be broken up into
5072  // a set of parallel tasks (via the sampling that the [abortable]
5073  // preclean phase did of eden, plus the [two] tasks of
5074  // scanning the [two] survivor spaces. Further fine-grain
5075  // parallelization of the scanning of the survivor spaces
5076  // themselves, and of precleaning of the young gen itself
5077  // is deferred to the future.
5078  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5079
5080  // The dirty card rescan work is broken up into a "sequence"
5081  // of parallel tasks (per constituent space) that are dynamically
5082  // claimed by the parallel threads.
5083  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5084
5085  // It turns out that even when we're using 1 thread, doing the work in a
5086  // separate thread causes wide variance in run times.  We can't help this
5087  // in the multi-threaded case, but we special-case n=1 here to get
5088  // repeatable measurements of the 1-thread overhead of the parallel code.
5089  if (n_workers > 1) {
5090    // Make refs discovery MT-safe, if it isn't already: it may not
5091    // necessarily be so, since it's possible that we are doing
5092    // ST marking.
5093    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5094    workers->run_task(&tsk);
5095  } else {
5096    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5097    tsk.work(0);
5098  }
5099
5100  // restore, single-threaded for now, any preserved marks
5101  // as a result of work_q overflow
5102  restore_preserved_marks_if_any();
5103}
5104
5105// Non-parallel version of remark
5106void CMSCollector::do_remark_non_parallel() {
5107  ResourceMark rm;
5108  HandleMark   hm;
5109  GenCollectedHeap* gch = GenCollectedHeap::heap();
5110  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5111
5112  MarkRefsIntoAndScanClosure
5113    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5114             &_markStack, this,
5115             false /* should_yield */, false /* not precleaning */);
5116  MarkFromDirtyCardsClosure
5117    markFromDirtyCardsClosure(this, _span,
5118                              NULL,  // space is set further below
5119                              &_markBitMap, &_markStack, &mrias_cl);
5120  {
5121    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5122    // Iterate over the dirty cards, setting the corresponding bits in the
5123    // mod union table.
5124    {
5125      ModUnionClosure modUnionClosure(&_modUnionTable);
5126      _ct->ct_bs()->dirty_card_iterate(
5127                      _cmsGen->used_region(),
5128                      &modUnionClosure);
5129    }
5130    // Having transferred these marks into the modUnionTable, we just need
5131    // to rescan the marked objects on the dirty cards in the modUnionTable.
5132    // The initial marking may have been done during an asynchronous
5133    // collection so there may be dirty bits in the mod-union table.
5134    const int alignment =
5135      CardTableModRefBS::card_size * BitsPerWord;
5136    {
5137      // ... First handle dirty cards in CMS gen
5138      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5139      MemRegion ur = _cmsGen->used_region();
5140      HeapWord* lb = ur.start();
5141      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5142      MemRegion cms_span(lb, ub);
5143      _modUnionTable.dirty_range_iterate_clear(cms_span,
5144                                               &markFromDirtyCardsClosure);
5145      verify_work_stacks_empty();
5146      if (PrintCMSStatistics != 0) {
5147        gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5148          markFromDirtyCardsClosure.num_dirty_cards());
5149      }
5150    }
5151  }
5152  if (VerifyDuringGC &&
5153      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5154    HandleMark hm;  // Discard invalid handles created during verification
5155    Universe::verify();
5156  }
5157  {
5158    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5159
5160    verify_work_stacks_empty();
5161
5162    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5163    StrongRootsScope srs(1);
5164
5165    gch->gen_process_roots(&srs,
5166                           GenCollectedHeap::OldGen,
5167                           true,  // young gen as roots
5168                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
5169                           should_unload_classes(),
5170                           &mrias_cl,
5171                           NULL,
5172                           NULL); // The dirty klasses will be handled below
5173
5174    assert(should_unload_classes()
5175           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5176           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5177  }
5178
5179  {
5180    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5181
5182    verify_work_stacks_empty();
5183
5184    // Scan all class loader data objects that might have been introduced
5185    // during concurrent marking.
5186    ResourceMark rm;
5187    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5188    for (int i = 0; i < array->length(); i++) {
5189      mrias_cl.do_cld_nv(array->at(i));
5190    }
5191
5192    // We don't need to keep track of new CLDs anymore.
5193    ClassLoaderDataGraph::remember_new_clds(false);
5194
5195    verify_work_stacks_empty();
5196  }
5197
5198  {
5199    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5200
5201    verify_work_stacks_empty();
5202
5203    RemarkKlassClosure remark_klass_closure(&mrias_cl);
5204    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5205
5206    verify_work_stacks_empty();
5207  }
5208
5209  // We might have added oops to ClassLoaderData::_handles during the
5210  // concurrent marking phase. These oops point to newly allocated objects
5211  // that are guaranteed to be kept alive. Either by the direct allocation
5212  // code, or when the young collector processes the roots. Hence,
5213  // we don't have to revisit the _handles block during the remark phase.
5214
5215  verify_work_stacks_empty();
5216  // Restore evacuated mark words, if any, used for overflow list links
5217  if (!CMSOverflowEarlyRestoration) {
5218    restore_preserved_marks_if_any();
5219  }
5220  verify_overflow_empty();
5221}
5222
5223////////////////////////////////////////////////////////
5224// Parallel Reference Processing Task Proxy Class
5225////////////////////////////////////////////////////////
5226class AbstractGangTaskWOopQueues : public AbstractGangTask {
5227  OopTaskQueueSet*       _queues;
5228  ParallelTaskTerminator _terminator;
5229 public:
5230  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5231    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5232  ParallelTaskTerminator* terminator() { return &_terminator; }
5233  OopTaskQueueSet* queues() { return _queues; }
5234};
5235
5236class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5237  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5238  CMSCollector*          _collector;
5239  CMSBitMap*             _mark_bit_map;
5240  const MemRegion        _span;
5241  ProcessTask&           _task;
5242
5243public:
5244  CMSRefProcTaskProxy(ProcessTask&     task,
5245                      CMSCollector*    collector,
5246                      const MemRegion& span,
5247                      CMSBitMap*       mark_bit_map,
5248                      AbstractWorkGang* workers,
5249                      OopTaskQueueSet* task_queues):
5250    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5251      task_queues,
5252      workers->active_workers()),
5253    _task(task),
5254    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5255  {
5256    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5257           "Inconsistency in _span");
5258  }
5259
5260  OopTaskQueueSet* task_queues() { return queues(); }
5261
5262  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5263
5264  void do_work_steal(int i,
5265                     CMSParDrainMarkingStackClosure* drain,
5266                     CMSParKeepAliveClosure* keep_alive,
5267                     int* seed);
5268
5269  virtual void work(uint worker_id);
5270};
5271
5272void CMSRefProcTaskProxy::work(uint worker_id) {
5273  ResourceMark rm;
5274  HandleMark hm;
5275  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5276  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5277                                        _mark_bit_map,
5278                                        work_queue(worker_id));
5279  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5280                                                 _mark_bit_map,
5281                                                 work_queue(worker_id));
5282  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5283  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5284  if (_task.marks_oops_alive()) {
5285    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5286                  _collector->hash_seed(worker_id));
5287  }
5288  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5289  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5290}
5291
5292class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5293  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5294  EnqueueTask& _task;
5295
5296public:
5297  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5298    : AbstractGangTask("Enqueue reference objects in parallel"),
5299      _task(task)
5300  { }
5301
5302  virtual void work(uint worker_id)
5303  {
5304    _task.work(worker_id);
5305  }
5306};
5307
5308CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5309  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5310   _span(span),
5311   _bit_map(bit_map),
5312   _work_queue(work_queue),
5313   _mark_and_push(collector, span, bit_map, work_queue),
5314   _low_water_mark(MIN2((work_queue->max_elems()/4),
5315                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5316{ }
5317
5318// . see if we can share work_queues with ParNew? XXX
5319void CMSRefProcTaskProxy::do_work_steal(int i,
5320  CMSParDrainMarkingStackClosure* drain,
5321  CMSParKeepAliveClosure* keep_alive,
5322  int* seed) {
5323  OopTaskQueue* work_q = work_queue(i);
5324  NOT_PRODUCT(int num_steals = 0;)
5325  oop obj_to_scan;
5326
5327  while (true) {
5328    // Completely finish any left over work from (an) earlier round(s)
5329    drain->trim_queue(0);
5330    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5331                                         (size_t)ParGCDesiredObjsFromOverflowList);
5332    // Now check if there's any work in the overflow list
5333    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5334    // only affects the number of attempts made to get work from the
5335    // overflow list and does not affect the number of workers.  Just
5336    // pass ParallelGCThreads so this behavior is unchanged.
5337    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5338                                                work_q,
5339                                                ParallelGCThreads)) {
5340      // Found something in global overflow list;
5341      // not yet ready to go stealing work from others.
5342      // We'd like to assert(work_q->size() != 0, ...)
5343      // because we just took work from the overflow list,
5344      // but of course we can't, since all of that might have
5345      // been already stolen from us.
5346      continue;
5347    }
5348    // Verify that we have no work before we resort to stealing
5349    assert(work_q->size() == 0, "Have work, shouldn't steal");
5350    // Try to steal from other queues that have work
5351    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5352      NOT_PRODUCT(num_steals++;)
5353      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5354      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5355      // Do scanning work
5356      obj_to_scan->oop_iterate(keep_alive);
5357      // Loop around, finish this work, and try to steal some more
5358    } else if (terminator()->offer_termination()) {
5359      break;  // nirvana from the infinite cycle
5360    }
5361  }
5362  NOT_PRODUCT(
5363    if (PrintCMSStatistics != 0) {
5364      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5365    }
5366  )
5367}
5368
5369void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5370{
5371  GenCollectedHeap* gch = GenCollectedHeap::heap();
5372  WorkGang* workers = gch->workers();
5373  assert(workers != NULL, "Need parallel worker threads.");
5374  CMSRefProcTaskProxy rp_task(task, &_collector,
5375                              _collector.ref_processor()->span(),
5376                              _collector.markBitMap(),
5377                              workers, _collector.task_queues());
5378  workers->run_task(&rp_task);
5379}
5380
5381void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5382{
5383
5384  GenCollectedHeap* gch = GenCollectedHeap::heap();
5385  WorkGang* workers = gch->workers();
5386  assert(workers != NULL, "Need parallel worker threads.");
5387  CMSRefEnqueueTaskProxy enq_task(task);
5388  workers->run_task(&enq_task);
5389}
5390
5391void CMSCollector::refProcessingWork() {
5392  ResourceMark rm;
5393  HandleMark   hm;
5394
5395  ReferenceProcessor* rp = ref_processor();
5396  assert(rp->span().equals(_span), "Spans should be equal");
5397  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5398  // Process weak references.
5399  rp->setup_policy(false);
5400  verify_work_stacks_empty();
5401
5402  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5403                                          &_markStack, false /* !preclean */);
5404  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5405                                _span, &_markBitMap, &_markStack,
5406                                &cmsKeepAliveClosure, false /* !preclean */);
5407  {
5408    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5409
5410    ReferenceProcessorStats stats;
5411    if (rp->processing_is_mt()) {
5412      // Set the degree of MT here.  If the discovery is done MT, there
5413      // may have been a different number of threads doing the discovery
5414      // and a different number of discovered lists may have Ref objects.
5415      // That is OK as long as the Reference lists are balanced (see
5416      // balance_all_queues() and balance_queues()).
5417      GenCollectedHeap* gch = GenCollectedHeap::heap();
5418      uint active_workers = ParallelGCThreads;
5419      WorkGang* workers = gch->workers();
5420      if (workers != NULL) {
5421        active_workers = workers->active_workers();
5422        // The expectation is that active_workers will have already
5423        // been set to a reasonable value.  If it has not been set,
5424        // investigate.
5425        assert(active_workers > 0, "Should have been set during scavenge");
5426      }
5427      rp->set_active_mt_degree(active_workers);
5428      CMSRefProcTaskExecutor task_executor(*this);
5429      stats = rp->process_discovered_references(&_is_alive_closure,
5430                                        &cmsKeepAliveClosure,
5431                                        &cmsDrainMarkingStackClosure,
5432                                        &task_executor,
5433                                        _gc_timer_cm);
5434    } else {
5435      stats = rp->process_discovered_references(&_is_alive_closure,
5436                                        &cmsKeepAliveClosure,
5437                                        &cmsDrainMarkingStackClosure,
5438                                        NULL,
5439                                        _gc_timer_cm);
5440    }
5441    _gc_tracer_cm->report_gc_reference_stats(stats);
5442
5443  }
5444
5445  // This is the point where the entire marking should have completed.
5446  verify_work_stacks_empty();
5447
5448  if (should_unload_classes()) {
5449    {
5450      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5451
5452      // Unload classes and purge the SystemDictionary.
5453      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5454
5455      // Unload nmethods.
5456      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5457
5458      // Prune dead klasses from subklass/sibling/implementor lists.
5459      Klass::clean_weak_klass_links(&_is_alive_closure);
5460    }
5461
5462    {
5463      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5464      // Clean up unreferenced symbols in symbol table.
5465      SymbolTable::unlink();
5466    }
5467
5468    {
5469      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5470      // Delete entries for dead interned strings.
5471      StringTable::unlink(&_is_alive_closure);
5472    }
5473  }
5474
5475
5476  // Restore any preserved marks as a result of mark stack or
5477  // work queue overflow
5478  restore_preserved_marks_if_any();  // done single-threaded for now
5479
5480  rp->set_enqueuing_is_done(true);
5481  if (rp->processing_is_mt()) {
5482    rp->balance_all_queues();
5483    CMSRefProcTaskExecutor task_executor(*this);
5484    rp->enqueue_discovered_references(&task_executor);
5485  } else {
5486    rp->enqueue_discovered_references(NULL);
5487  }
5488  rp->verify_no_references_recorded();
5489  assert(!rp->discovery_enabled(), "should have been disabled");
5490}
5491
5492#ifndef PRODUCT
5493void CMSCollector::check_correct_thread_executing() {
5494  Thread* t = Thread::current();
5495  // Only the VM thread or the CMS thread should be here.
5496  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5497         "Unexpected thread type");
5498  // If this is the vm thread, the foreground process
5499  // should not be waiting.  Note that _foregroundGCIsActive is
5500  // true while the foreground collector is waiting.
5501  if (_foregroundGCShouldWait) {
5502    // We cannot be the VM thread
5503    assert(t->is_ConcurrentGC_thread(),
5504           "Should be CMS thread");
5505  } else {
5506    // We can be the CMS thread only if we are in a stop-world
5507    // phase of CMS collection.
5508    if (t->is_ConcurrentGC_thread()) {
5509      assert(_collectorState == InitialMarking ||
5510             _collectorState == FinalMarking,
5511             "Should be a stop-world phase");
5512      // The CMS thread should be holding the CMS_token.
5513      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5514             "Potential interference with concurrently "
5515             "executing VM thread");
5516    }
5517  }
5518}
5519#endif
5520
5521void CMSCollector::sweep() {
5522  assert(_collectorState == Sweeping, "just checking");
5523  check_correct_thread_executing();
5524  verify_work_stacks_empty();
5525  verify_overflow_empty();
5526  increment_sweep_count();
5527  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5528
5529  _inter_sweep_timer.stop();
5530  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5531
5532  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5533  _intra_sweep_timer.reset();
5534  _intra_sweep_timer.start();
5535  {
5536    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5537    CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5538    // First sweep the old gen
5539    {
5540      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5541                               bitMapLock());
5542      sweepWork(_cmsGen);
5543    }
5544
5545    // Update Universe::_heap_*_at_gc figures.
5546    // We need all the free list locks to make the abstract state
5547    // transition from Sweeping to Resetting. See detailed note
5548    // further below.
5549    {
5550      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5551      // Update heap occupancy information which is used as
5552      // input to soft ref clearing policy at the next gc.
5553      Universe::update_heap_info_at_gc();
5554      _collectorState = Resizing;
5555    }
5556  }
5557  verify_work_stacks_empty();
5558  verify_overflow_empty();
5559
5560  if (should_unload_classes()) {
5561    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5562    // requires that the virtual spaces are stable and not deleted.
5563    ClassLoaderDataGraph::set_should_purge(true);
5564  }
5565
5566  _intra_sweep_timer.stop();
5567  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5568
5569  _inter_sweep_timer.reset();
5570  _inter_sweep_timer.start();
5571
5572  // We need to use a monotonically non-decreasing time in ms
5573  // or we will see time-warp warnings and os::javaTimeMillis()
5574  // does not guarantee monotonicity.
5575  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5576  update_time_of_last_gc(now);
5577
5578  // NOTE on abstract state transitions:
5579  // Mutators allocate-live and/or mark the mod-union table dirty
5580  // based on the state of the collection.  The former is done in
5581  // the interval [Marking, Sweeping] and the latter in the interval
5582  // [Marking, Sweeping).  Thus the transitions into the Marking state
5583  // and out of the Sweeping state must be synchronously visible
5584  // globally to the mutators.
5585  // The transition into the Marking state happens with the world
5586  // stopped so the mutators will globally see it.  Sweeping is
5587  // done asynchronously by the background collector so the transition
5588  // from the Sweeping state to the Resizing state must be done
5589  // under the freelistLock (as is the check for whether to
5590  // allocate-live and whether to dirty the mod-union table).
5591  assert(_collectorState == Resizing, "Change of collector state to"
5592    " Resizing must be done under the freelistLocks (plural)");
5593
5594  // Now that sweeping has been completed, we clear
5595  // the incremental_collection_failed flag,
5596  // thus inviting a younger gen collection to promote into
5597  // this generation. If such a promotion may still fail,
5598  // the flag will be set again when a young collection is
5599  // attempted.
5600  GenCollectedHeap* gch = GenCollectedHeap::heap();
5601  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5602  gch->update_full_collections_completed(_collection_count_start);
5603}
5604
5605// FIX ME!!! Looks like this belongs in CFLSpace, with
5606// CMSGen merely delegating to it.
5607void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5608  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5609  HeapWord*  minAddr        = _cmsSpace->bottom();
5610  HeapWord*  largestAddr    =
5611    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5612  if (largestAddr == NULL) {
5613    // The dictionary appears to be empty.  In this case
5614    // try to coalesce at the end of the heap.
5615    largestAddr = _cmsSpace->end();
5616  }
5617  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5618  size_t nearLargestOffset =
5619    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5620  if (PrintFLSStatistics != 0) {
5621    gclog_or_tty->print_cr(
5622      "CMS: Large Block: " PTR_FORMAT ";"
5623      " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5624      p2i(largestAddr),
5625      p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5626  }
5627  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5628}
5629
5630bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5631  return addr >= _cmsSpace->nearLargestChunk();
5632}
5633
5634FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5635  return _cmsSpace->find_chunk_at_end();
5636}
5637
5638void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5639                                                    bool full) {
5640  // If the young generation has been collected, gather any statistics
5641  // that are of interest at this point.
5642  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5643  if (!full && current_is_young) {
5644    // Gather statistics on the young generation collection.
5645    collector()->stats().record_gc0_end(used());
5646  }
5647}
5648
5649void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5650  // We iterate over the space(s) underlying this generation,
5651  // checking the mark bit map to see if the bits corresponding
5652  // to specific blocks are marked or not. Blocks that are
5653  // marked are live and are not swept up. All remaining blocks
5654  // are swept up, with coalescing on-the-fly as we sweep up
5655  // contiguous free and/or garbage blocks:
5656  // We need to ensure that the sweeper synchronizes with allocators
5657  // and stop-the-world collectors. In particular, the following
5658  // locks are used:
5659  // . CMS token: if this is held, a stop the world collection cannot occur
5660  // . freelistLock: if this is held no allocation can occur from this
5661  //                 generation by another thread
5662  // . bitMapLock: if this is held, no other thread can access or update
5663  //
5664
5665  // Note that we need to hold the freelistLock if we use
5666  // block iterate below; else the iterator might go awry if
5667  // a mutator (or promotion) causes block contents to change
5668  // (for instance if the allocator divvies up a block).
5669  // If we hold the free list lock, for all practical purposes
5670  // young generation GC's can't occur (they'll usually need to
5671  // promote), so we might as well prevent all young generation
5672  // GC's while we do a sweeping step. For the same reason, we might
5673  // as well take the bit map lock for the entire duration
5674
5675  // check that we hold the requisite locks
5676  assert(have_cms_token(), "Should hold cms token");
5677  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5678  assert_lock_strong(old_gen->freelistLock());
5679  assert_lock_strong(bitMapLock());
5680
5681  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5682  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5683  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5684                                          _inter_sweep_estimate.padded_average(),
5685                                          _intra_sweep_estimate.padded_average());
5686  old_gen->setNearLargestChunk();
5687
5688  {
5689    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5690    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5691    // We need to free-up/coalesce garbage/blocks from a
5692    // co-terminal free run. This is done in the SweepClosure
5693    // destructor; so, do not remove this scope, else the
5694    // end-of-sweep-census below will be off by a little bit.
5695  }
5696  old_gen->cmsSpace()->sweep_completed();
5697  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5698  if (should_unload_classes()) {                // unloaded classes this cycle,
5699    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5700  } else {                                      // did not unload classes,
5701    _concurrent_cycles_since_last_unload++;     // ... increment count
5702  }
5703}
5704
5705// Reset CMS data structures (for now just the marking bit map)
5706// preparatory for the next cycle.
5707void CMSCollector::reset_concurrent() {
5708  CMSTokenSyncWithLocks ts(true, bitMapLock());
5709
5710  // If the state is not "Resetting", the foreground  thread
5711  // has done a collection and the resetting.
5712  if (_collectorState != Resetting) {
5713    assert(_collectorState == Idling, "The state should only change"
5714      " because the foreground collector has finished the collection");
5715    return;
5716  }
5717
5718  // Clear the mark bitmap (no grey objects to start with)
5719  // for the next cycle.
5720  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5721  CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5722
5723  HeapWord* curAddr = _markBitMap.startWord();
5724  while (curAddr < _markBitMap.endWord()) {
5725    size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5726    MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5727    _markBitMap.clear_large_range(chunk);
5728    if (ConcurrentMarkSweepThread::should_yield() &&
5729        !foregroundGCIsActive() &&
5730        CMSYield) {
5731      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5732             "CMS thread should hold CMS token");
5733      assert_lock_strong(bitMapLock());
5734      bitMapLock()->unlock();
5735      ConcurrentMarkSweepThread::desynchronize(true);
5736      stopTimer();
5737      if (PrintCMSStatistics != 0) {
5738        incrementYields();
5739      }
5740
5741      // See the comment in coordinator_yield()
5742      for (unsigned i = 0; i < CMSYieldSleepCount &&
5743                       ConcurrentMarkSweepThread::should_yield() &&
5744                       !CMSCollector::foregroundGCIsActive(); ++i) {
5745        os::sleep(Thread::current(), 1, false);
5746      }
5747
5748      ConcurrentMarkSweepThread::synchronize(true);
5749      bitMapLock()->lock_without_safepoint_check();
5750      startTimer();
5751    }
5752    curAddr = chunk.end();
5753  }
5754  // A successful mostly concurrent collection has been done.
5755  // Because only the full (i.e., concurrent mode failure) collections
5756  // are being measured for gc overhead limits, clean the "near" flag
5757  // and count.
5758  size_policy()->reset_gc_overhead_limit_count();
5759  _collectorState = Idling;
5760
5761  register_gc_end();
5762}
5763
5764// Same as above but for STW paths
5765void CMSCollector::reset_stw() {
5766  // already have the lock
5767  assert(_collectorState == Resetting, "just checking");
5768  assert_lock_strong(bitMapLock());
5769  GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5770  _markBitMap.clear_all();
5771  _collectorState = Idling;
5772  register_gc_end();
5773}
5774
5775void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5776  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5777  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5778  TraceCollectorStats tcs(counters());
5779
5780  switch (op) {
5781    case CMS_op_checkpointRootsInitial: {
5782      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5783      checkpointRootsInitial();
5784      if (PrintGC) {
5785        _cmsGen->printOccupancy("initial-mark");
5786      }
5787      break;
5788    }
5789    case CMS_op_checkpointRootsFinal: {
5790      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5791      checkpointRootsFinal();
5792      if (PrintGC) {
5793        _cmsGen->printOccupancy("remark");
5794      }
5795      break;
5796    }
5797    default:
5798      fatal("No such CMS_op");
5799  }
5800}
5801
5802#ifndef PRODUCT
5803size_t const CMSCollector::skip_header_HeapWords() {
5804  return FreeChunk::header_size();
5805}
5806
5807// Try and collect here conditions that should hold when
5808// CMS thread is exiting. The idea is that the foreground GC
5809// thread should not be blocked if it wants to terminate
5810// the CMS thread and yet continue to run the VM for a while
5811// after that.
5812void CMSCollector::verify_ok_to_terminate() const {
5813  assert(Thread::current()->is_ConcurrentGC_thread(),
5814         "should be called by CMS thread");
5815  assert(!_foregroundGCShouldWait, "should be false");
5816  // We could check here that all the various low-level locks
5817  // are not held by the CMS thread, but that is overkill; see
5818  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5819  // is checked.
5820}
5821#endif
5822
5823size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5824   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5825          "missing Printezis mark?");
5826  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5827  size_t size = pointer_delta(nextOneAddr + 1, addr);
5828  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5829         "alignment problem");
5830  assert(size >= 3, "Necessary for Printezis marks to work");
5831  return size;
5832}
5833
5834// A variant of the above (block_size_using_printezis_bits()) except
5835// that we return 0 if the P-bits are not yet set.
5836size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5837  if (_markBitMap.isMarked(addr + 1)) {
5838    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5839    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5840    size_t size = pointer_delta(nextOneAddr + 1, addr);
5841    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5842           "alignment problem");
5843    assert(size >= 3, "Necessary for Printezis marks to work");
5844    return size;
5845  }
5846  return 0;
5847}
5848
5849HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5850  size_t sz = 0;
5851  oop p = (oop)addr;
5852  if (p->klass_or_null() != NULL) {
5853    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5854  } else {
5855    sz = block_size_using_printezis_bits(addr);
5856  }
5857  assert(sz > 0, "size must be nonzero");
5858  HeapWord* next_block = addr + sz;
5859  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5860                                             CardTableModRefBS::card_size);
5861  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5862         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5863         "must be different cards");
5864  return next_card;
5865}
5866
5867
5868// CMS Bit Map Wrapper /////////////////////////////////////////
5869
5870// Construct a CMS bit map infrastructure, but don't create the
5871// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5872// further below.
5873CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5874  _bm(),
5875  _shifter(shifter),
5876  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5877                                    Monitor::_safepoint_check_sometimes) : NULL)
5878{
5879  _bmStartWord = 0;
5880  _bmWordSize  = 0;
5881}
5882
5883bool CMSBitMap::allocate(MemRegion mr) {
5884  _bmStartWord = mr.start();
5885  _bmWordSize  = mr.word_size();
5886  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5887                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5888  if (!brs.is_reserved()) {
5889    warning("CMS bit map allocation failure");
5890    return false;
5891  }
5892  // For now we'll just commit all of the bit map up front.
5893  // Later on we'll try to be more parsimonious with swap.
5894  if (!_virtual_space.initialize(brs, brs.size())) {
5895    warning("CMS bit map backing store failure");
5896    return false;
5897  }
5898  assert(_virtual_space.committed_size() == brs.size(),
5899         "didn't reserve backing store for all of CMS bit map?");
5900  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5901  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5902         _bmWordSize, "inconsistency in bit map sizing");
5903  _bm.set_size(_bmWordSize >> _shifter);
5904
5905  // bm.clear(); // can we rely on getting zero'd memory? verify below
5906  assert(isAllClear(),
5907         "Expected zero'd memory from ReservedSpace constructor");
5908  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5909         "consistency check");
5910  return true;
5911}
5912
5913void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5914  HeapWord *next_addr, *end_addr, *last_addr;
5915  assert_locked();
5916  assert(covers(mr), "out-of-range error");
5917  // XXX assert that start and end are appropriately aligned
5918  for (next_addr = mr.start(), end_addr = mr.end();
5919       next_addr < end_addr; next_addr = last_addr) {
5920    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5921    last_addr = dirty_region.end();
5922    if (!dirty_region.is_empty()) {
5923      cl->do_MemRegion(dirty_region);
5924    } else {
5925      assert(last_addr == end_addr, "program logic");
5926      return;
5927    }
5928  }
5929}
5930
5931void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5932  _bm.print_on_error(st, prefix);
5933}
5934
5935#ifndef PRODUCT
5936void CMSBitMap::assert_locked() const {
5937  CMSLockVerifier::assert_locked(lock());
5938}
5939
5940bool CMSBitMap::covers(MemRegion mr) const {
5941  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5942  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5943         "size inconsistency");
5944  return (mr.start() >= _bmStartWord) &&
5945         (mr.end()   <= endWord());
5946}
5947
5948bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5949    return (start >= _bmStartWord && (start + size) <= endWord());
5950}
5951
5952void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5953  // verify that there are no 1 bits in the interval [left, right)
5954  FalseBitMapClosure falseBitMapClosure;
5955  iterate(&falseBitMapClosure, left, right);
5956}
5957
5958void CMSBitMap::region_invariant(MemRegion mr)
5959{
5960  assert_locked();
5961  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5962  assert(!mr.is_empty(), "unexpected empty region");
5963  assert(covers(mr), "mr should be covered by bit map");
5964  // convert address range into offset range
5965  size_t start_ofs = heapWordToOffset(mr.start());
5966  // Make sure that end() is appropriately aligned
5967  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5968                        (1 << (_shifter+LogHeapWordSize))),
5969         "Misaligned mr.end()");
5970  size_t end_ofs   = heapWordToOffset(mr.end());
5971  assert(end_ofs > start_ofs, "Should mark at least one bit");
5972}
5973
5974#endif
5975
5976bool CMSMarkStack::allocate(size_t size) {
5977  // allocate a stack of the requisite depth
5978  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5979                   size * sizeof(oop)));
5980  if (!rs.is_reserved()) {
5981    warning("CMSMarkStack allocation failure");
5982    return false;
5983  }
5984  if (!_virtual_space.initialize(rs, rs.size())) {
5985    warning("CMSMarkStack backing store failure");
5986    return false;
5987  }
5988  assert(_virtual_space.committed_size() == rs.size(),
5989         "didn't reserve backing store for all of CMS stack?");
5990  _base = (oop*)(_virtual_space.low());
5991  _index = 0;
5992  _capacity = size;
5993  NOT_PRODUCT(_max_depth = 0);
5994  return true;
5995}
5996
5997// XXX FIX ME !!! In the MT case we come in here holding a
5998// leaf lock. For printing we need to take a further lock
5999// which has lower rank. We need to recalibrate the two
6000// lock-ranks involved in order to be able to print the
6001// messages below. (Or defer the printing to the caller.
6002// For now we take the expedient path of just disabling the
6003// messages for the problematic case.)
6004void CMSMarkStack::expand() {
6005  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6006  if (_capacity == MarkStackSizeMax) {
6007    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6008      // We print a warning message only once per CMS cycle.
6009      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6010    }
6011    return;
6012  }
6013  // Double capacity if possible
6014  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6015  // Do not give up existing stack until we have managed to
6016  // get the double capacity that we desired.
6017  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6018                   new_capacity * sizeof(oop)));
6019  if (rs.is_reserved()) {
6020    // Release the backing store associated with old stack
6021    _virtual_space.release();
6022    // Reinitialize virtual space for new stack
6023    if (!_virtual_space.initialize(rs, rs.size())) {
6024      fatal("Not enough swap for expanded marking stack");
6025    }
6026    _base = (oop*)(_virtual_space.low());
6027    _index = 0;
6028    _capacity = new_capacity;
6029  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6030    // Failed to double capacity, continue;
6031    // we print a detail message only once per CMS cycle.
6032    gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
6033            SIZE_FORMAT "K",
6034            _capacity / K, new_capacity / K);
6035  }
6036}
6037
6038
6039// Closures
6040// XXX: there seems to be a lot of code  duplication here;
6041// should refactor and consolidate common code.
6042
6043// This closure is used to mark refs into the CMS generation in
6044// the CMS bit map. Called at the first checkpoint. This closure
6045// assumes that we do not need to re-mark dirty cards; if the CMS
6046// generation on which this is used is not an oldest
6047// generation then this will lose younger_gen cards!
6048
6049MarkRefsIntoClosure::MarkRefsIntoClosure(
6050  MemRegion span, CMSBitMap* bitMap):
6051    _span(span),
6052    _bitMap(bitMap)
6053{
6054  assert(ref_processor() == NULL, "deliberately left NULL");
6055  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6056}
6057
6058void MarkRefsIntoClosure::do_oop(oop obj) {
6059  // if p points into _span, then mark corresponding bit in _markBitMap
6060  assert(obj->is_oop(), "expected an oop");
6061  HeapWord* addr = (HeapWord*)obj;
6062  if (_span.contains(addr)) {
6063    // this should be made more efficient
6064    _bitMap->mark(addr);
6065  }
6066}
6067
6068void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6069void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6070
6071Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6072  MemRegion span, CMSBitMap* bitMap):
6073    _span(span),
6074    _bitMap(bitMap)
6075{
6076  assert(ref_processor() == NULL, "deliberately left NULL");
6077  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6078}
6079
6080void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6081  // if p points into _span, then mark corresponding bit in _markBitMap
6082  assert(obj->is_oop(), "expected an oop");
6083  HeapWord* addr = (HeapWord*)obj;
6084  if (_span.contains(addr)) {
6085    // this should be made more efficient
6086    _bitMap->par_mark(addr);
6087  }
6088}
6089
6090void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6091void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6092
6093// A variant of the above, used for CMS marking verification.
6094MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6095  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6096    _span(span),
6097    _verification_bm(verification_bm),
6098    _cms_bm(cms_bm)
6099{
6100  assert(ref_processor() == NULL, "deliberately left NULL");
6101  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6102}
6103
6104void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6105  // if p points into _span, then mark corresponding bit in _markBitMap
6106  assert(obj->is_oop(), "expected an oop");
6107  HeapWord* addr = (HeapWord*)obj;
6108  if (_span.contains(addr)) {
6109    _verification_bm->mark(addr);
6110    if (!_cms_bm->isMarked(addr)) {
6111      oop(addr)->print();
6112      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6113      fatal("... aborting");
6114    }
6115  }
6116}
6117
6118void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6119void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6120
6121//////////////////////////////////////////////////
6122// MarkRefsIntoAndScanClosure
6123//////////////////////////////////////////////////
6124
6125MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6126                                                       ReferenceProcessor* rp,
6127                                                       CMSBitMap* bit_map,
6128                                                       CMSBitMap* mod_union_table,
6129                                                       CMSMarkStack*  mark_stack,
6130                                                       CMSCollector* collector,
6131                                                       bool should_yield,
6132                                                       bool concurrent_precleaning):
6133  _collector(collector),
6134  _span(span),
6135  _bit_map(bit_map),
6136  _mark_stack(mark_stack),
6137  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6138                      mark_stack, concurrent_precleaning),
6139  _yield(should_yield),
6140  _concurrent_precleaning(concurrent_precleaning),
6141  _freelistLock(NULL)
6142{
6143  // FIXME: Should initialize in base class constructor.
6144  assert(rp != NULL, "ref_processor shouldn't be NULL");
6145  set_ref_processor_internal(rp);
6146}
6147
6148// This closure is used to mark refs into the CMS generation at the
6149// second (final) checkpoint, and to scan and transitively follow
6150// the unmarked oops. It is also used during the concurrent precleaning
6151// phase while scanning objects on dirty cards in the CMS generation.
6152// The marks are made in the marking bit map and the marking stack is
6153// used for keeping the (newly) grey objects during the scan.
6154// The parallel version (Par_...) appears further below.
6155void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6156  if (obj != NULL) {
6157    assert(obj->is_oop(), "expected an oop");
6158    HeapWord* addr = (HeapWord*)obj;
6159    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6160    assert(_collector->overflow_list_is_empty(),
6161           "overflow list should be empty");
6162    if (_span.contains(addr) &&
6163        !_bit_map->isMarked(addr)) {
6164      // mark bit map (object is now grey)
6165      _bit_map->mark(addr);
6166      // push on marking stack (stack should be empty), and drain the
6167      // stack by applying this closure to the oops in the oops popped
6168      // from the stack (i.e. blacken the grey objects)
6169      bool res = _mark_stack->push(obj);
6170      assert(res, "Should have space to push on empty stack");
6171      do {
6172        oop new_oop = _mark_stack->pop();
6173        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6174        assert(_bit_map->isMarked((HeapWord*)new_oop),
6175               "only grey objects on this stack");
6176        // iterate over the oops in this oop, marking and pushing
6177        // the ones in CMS heap (i.e. in _span).
6178        new_oop->oop_iterate(&_pushAndMarkClosure);
6179        // check if it's time to yield
6180        do_yield_check();
6181      } while (!_mark_stack->isEmpty() ||
6182               (!_concurrent_precleaning && take_from_overflow_list()));
6183        // if marking stack is empty, and we are not doing this
6184        // during precleaning, then check the overflow list
6185    }
6186    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6187    assert(_collector->overflow_list_is_empty(),
6188           "overflow list was drained above");
6189    // We could restore evacuated mark words, if any, used for
6190    // overflow list links here because the overflow list is
6191    // provably empty here. That would reduce the maximum
6192    // size requirements for preserved_{oop,mark}_stack.
6193    // But we'll just postpone it until we are all done
6194    // so we can just stream through.
6195    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6196      _collector->restore_preserved_marks_if_any();
6197      assert(_collector->no_preserved_marks(), "No preserved marks");
6198    }
6199    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6200           "All preserved marks should have been restored above");
6201  }
6202}
6203
6204void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6205void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6206
6207void MarkRefsIntoAndScanClosure::do_yield_work() {
6208  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6209         "CMS thread should hold CMS token");
6210  assert_lock_strong(_freelistLock);
6211  assert_lock_strong(_bit_map->lock());
6212  // relinquish the free_list_lock and bitMaplock()
6213  _bit_map->lock()->unlock();
6214  _freelistLock->unlock();
6215  ConcurrentMarkSweepThread::desynchronize(true);
6216  _collector->stopTimer();
6217  if (PrintCMSStatistics != 0) {
6218    _collector->incrementYields();
6219  }
6220
6221  // See the comment in coordinator_yield()
6222  for (unsigned i = 0;
6223       i < CMSYieldSleepCount &&
6224       ConcurrentMarkSweepThread::should_yield() &&
6225       !CMSCollector::foregroundGCIsActive();
6226       ++i) {
6227    os::sleep(Thread::current(), 1, false);
6228  }
6229
6230  ConcurrentMarkSweepThread::synchronize(true);
6231  _freelistLock->lock_without_safepoint_check();
6232  _bit_map->lock()->lock_without_safepoint_check();
6233  _collector->startTimer();
6234}
6235
6236///////////////////////////////////////////////////////////
6237// Par_MarkRefsIntoAndScanClosure: a parallel version of
6238//                                 MarkRefsIntoAndScanClosure
6239///////////////////////////////////////////////////////////
6240Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6241  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6242  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6243  _span(span),
6244  _bit_map(bit_map),
6245  _work_queue(work_queue),
6246  _low_water_mark(MIN2((work_queue->max_elems()/4),
6247                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6248  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6249{
6250  // FIXME: Should initialize in base class constructor.
6251  assert(rp != NULL, "ref_processor shouldn't be NULL");
6252  set_ref_processor_internal(rp);
6253}
6254
6255// This closure is used to mark refs into the CMS generation at the
6256// second (final) checkpoint, and to scan and transitively follow
6257// the unmarked oops. The marks are made in the marking bit map and
6258// the work_queue is used for keeping the (newly) grey objects during
6259// the scan phase whence they are also available for stealing by parallel
6260// threads. Since the marking bit map is shared, updates are
6261// synchronized (via CAS).
6262void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6263  if (obj != NULL) {
6264    // Ignore mark word because this could be an already marked oop
6265    // that may be chained at the end of the overflow list.
6266    assert(obj->is_oop(true), "expected an oop");
6267    HeapWord* addr = (HeapWord*)obj;
6268    if (_span.contains(addr) &&
6269        !_bit_map->isMarked(addr)) {
6270      // mark bit map (object will become grey):
6271      // It is possible for several threads to be
6272      // trying to "claim" this object concurrently;
6273      // the unique thread that succeeds in marking the
6274      // object first will do the subsequent push on
6275      // to the work queue (or overflow list).
6276      if (_bit_map->par_mark(addr)) {
6277        // push on work_queue (which may not be empty), and trim the
6278        // queue to an appropriate length by applying this closure to
6279        // the oops in the oops popped from the stack (i.e. blacken the
6280        // grey objects)
6281        bool res = _work_queue->push(obj);
6282        assert(res, "Low water mark should be less than capacity?");
6283        trim_queue(_low_water_mark);
6284      } // Else, another thread claimed the object
6285    }
6286  }
6287}
6288
6289void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6290void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6291
6292// This closure is used to rescan the marked objects on the dirty cards
6293// in the mod union table and the card table proper.
6294size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6295  oop p, MemRegion mr) {
6296
6297  size_t size = 0;
6298  HeapWord* addr = (HeapWord*)p;
6299  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6300  assert(_span.contains(addr), "we are scanning the CMS generation");
6301  // check if it's time to yield
6302  if (do_yield_check()) {
6303    // We yielded for some foreground stop-world work,
6304    // and we have been asked to abort this ongoing preclean cycle.
6305    return 0;
6306  }
6307  if (_bitMap->isMarked(addr)) {
6308    // it's marked; is it potentially uninitialized?
6309    if (p->klass_or_null() != NULL) {
6310        // an initialized object; ignore mark word in verification below
6311        // since we are running concurrent with mutators
6312        assert(p->is_oop(true), "should be an oop");
6313        if (p->is_objArray()) {
6314          // objArrays are precisely marked; restrict scanning
6315          // to dirty cards only.
6316          size = CompactibleFreeListSpace::adjustObjectSize(
6317                   p->oop_iterate_size(_scanningClosure, mr));
6318        } else {
6319          // A non-array may have been imprecisely marked; we need
6320          // to scan object in its entirety.
6321          size = CompactibleFreeListSpace::adjustObjectSize(
6322                   p->oop_iterate_size(_scanningClosure));
6323        }
6324        #ifdef ASSERT
6325          size_t direct_size =
6326            CompactibleFreeListSpace::adjustObjectSize(p->size());
6327          assert(size == direct_size, "Inconsistency in size");
6328          assert(size >= 3, "Necessary for Printezis marks to work");
6329          if (!_bitMap->isMarked(addr+1)) {
6330            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6331          } else {
6332            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6333            assert(_bitMap->isMarked(addr+size-1),
6334                   "inconsistent Printezis mark");
6335          }
6336        #endif // ASSERT
6337    } else {
6338      // An uninitialized object.
6339      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6340      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6341      size = pointer_delta(nextOneAddr + 1, addr);
6342      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6343             "alignment problem");
6344      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6345      // will dirty the card when the klass pointer is installed in the
6346      // object (signaling the completion of initialization).
6347    }
6348  } else {
6349    // Either a not yet marked object or an uninitialized object
6350    if (p->klass_or_null() == NULL) {
6351      // An uninitialized object, skip to the next card, since
6352      // we may not be able to read its P-bits yet.
6353      assert(size == 0, "Initial value");
6354    } else {
6355      // An object not (yet) reached by marking: we merely need to
6356      // compute its size so as to go look at the next block.
6357      assert(p->is_oop(true), "should be an oop");
6358      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6359    }
6360  }
6361  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6362  return size;
6363}
6364
6365void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6366  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6367         "CMS thread should hold CMS token");
6368  assert_lock_strong(_freelistLock);
6369  assert_lock_strong(_bitMap->lock());
6370  // relinquish the free_list_lock and bitMaplock()
6371  _bitMap->lock()->unlock();
6372  _freelistLock->unlock();
6373  ConcurrentMarkSweepThread::desynchronize(true);
6374  _collector->stopTimer();
6375  if (PrintCMSStatistics != 0) {
6376    _collector->incrementYields();
6377  }
6378
6379  // See the comment in coordinator_yield()
6380  for (unsigned i = 0; i < CMSYieldSleepCount &&
6381                   ConcurrentMarkSweepThread::should_yield() &&
6382                   !CMSCollector::foregroundGCIsActive(); ++i) {
6383    os::sleep(Thread::current(), 1, false);
6384  }
6385
6386  ConcurrentMarkSweepThread::synchronize(true);
6387  _freelistLock->lock_without_safepoint_check();
6388  _bitMap->lock()->lock_without_safepoint_check();
6389  _collector->startTimer();
6390}
6391
6392
6393//////////////////////////////////////////////////////////////////
6394// SurvivorSpacePrecleanClosure
6395//////////////////////////////////////////////////////////////////
6396// This (single-threaded) closure is used to preclean the oops in
6397// the survivor spaces.
6398size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6399
6400  HeapWord* addr = (HeapWord*)p;
6401  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6402  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6403  assert(p->klass_or_null() != NULL, "object should be initialized");
6404  // an initialized object; ignore mark word in verification below
6405  // since we are running concurrent with mutators
6406  assert(p->is_oop(true), "should be an oop");
6407  // Note that we do not yield while we iterate over
6408  // the interior oops of p, pushing the relevant ones
6409  // on our marking stack.
6410  size_t size = p->oop_iterate_size(_scanning_closure);
6411  do_yield_check();
6412  // Observe that below, we do not abandon the preclean
6413  // phase as soon as we should; rather we empty the
6414  // marking stack before returning. This is to satisfy
6415  // some existing assertions. In general, it may be a
6416  // good idea to abort immediately and complete the marking
6417  // from the grey objects at a later time.
6418  while (!_mark_stack->isEmpty()) {
6419    oop new_oop = _mark_stack->pop();
6420    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6421    assert(_bit_map->isMarked((HeapWord*)new_oop),
6422           "only grey objects on this stack");
6423    // iterate over the oops in this oop, marking and pushing
6424    // the ones in CMS heap (i.e. in _span).
6425    new_oop->oop_iterate(_scanning_closure);
6426    // check if it's time to yield
6427    do_yield_check();
6428  }
6429  unsigned int after_count =
6430    GenCollectedHeap::heap()->total_collections();
6431  bool abort = (_before_count != after_count) ||
6432               _collector->should_abort_preclean();
6433  return abort ? 0 : size;
6434}
6435
6436void SurvivorSpacePrecleanClosure::do_yield_work() {
6437  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6438         "CMS thread should hold CMS token");
6439  assert_lock_strong(_bit_map->lock());
6440  // Relinquish the bit map lock
6441  _bit_map->lock()->unlock();
6442  ConcurrentMarkSweepThread::desynchronize(true);
6443  _collector->stopTimer();
6444  if (PrintCMSStatistics != 0) {
6445    _collector->incrementYields();
6446  }
6447
6448  // See the comment in coordinator_yield()
6449  for (unsigned i = 0; i < CMSYieldSleepCount &&
6450                       ConcurrentMarkSweepThread::should_yield() &&
6451                       !CMSCollector::foregroundGCIsActive(); ++i) {
6452    os::sleep(Thread::current(), 1, false);
6453  }
6454
6455  ConcurrentMarkSweepThread::synchronize(true);
6456  _bit_map->lock()->lock_without_safepoint_check();
6457  _collector->startTimer();
6458}
6459
6460// This closure is used to rescan the marked objects on the dirty cards
6461// in the mod union table and the card table proper. In the parallel
6462// case, although the bitMap is shared, we do a single read so the
6463// isMarked() query is "safe".
6464bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6465  // Ignore mark word because we are running concurrent with mutators
6466  assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6467  HeapWord* addr = (HeapWord*)p;
6468  assert(_span.contains(addr), "we are scanning the CMS generation");
6469  bool is_obj_array = false;
6470  #ifdef ASSERT
6471    if (!_parallel) {
6472      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6473      assert(_collector->overflow_list_is_empty(),
6474             "overflow list should be empty");
6475
6476    }
6477  #endif // ASSERT
6478  if (_bit_map->isMarked(addr)) {
6479    // Obj arrays are precisely marked, non-arrays are not;
6480    // so we scan objArrays precisely and non-arrays in their
6481    // entirety.
6482    if (p->is_objArray()) {
6483      is_obj_array = true;
6484      if (_parallel) {
6485        p->oop_iterate(_par_scan_closure, mr);
6486      } else {
6487        p->oop_iterate(_scan_closure, mr);
6488      }
6489    } else {
6490      if (_parallel) {
6491        p->oop_iterate(_par_scan_closure);
6492      } else {
6493        p->oop_iterate(_scan_closure);
6494      }
6495    }
6496  }
6497  #ifdef ASSERT
6498    if (!_parallel) {
6499      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6500      assert(_collector->overflow_list_is_empty(),
6501             "overflow list should be empty");
6502
6503    }
6504  #endif // ASSERT
6505  return is_obj_array;
6506}
6507
6508MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6509                        MemRegion span,
6510                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6511                        bool should_yield, bool verifying):
6512  _collector(collector),
6513  _span(span),
6514  _bitMap(bitMap),
6515  _mut(&collector->_modUnionTable),
6516  _markStack(markStack),
6517  _yield(should_yield),
6518  _skipBits(0)
6519{
6520  assert(_markStack->isEmpty(), "stack should be empty");
6521  _finger = _bitMap->startWord();
6522  _threshold = _finger;
6523  assert(_collector->_restart_addr == NULL, "Sanity check");
6524  assert(_span.contains(_finger), "Out of bounds _finger?");
6525  DEBUG_ONLY(_verifying = verifying;)
6526}
6527
6528void MarkFromRootsClosure::reset(HeapWord* addr) {
6529  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6530  assert(_span.contains(addr), "Out of bounds _finger?");
6531  _finger = addr;
6532  _threshold = (HeapWord*)round_to(
6533                 (intptr_t)_finger, CardTableModRefBS::card_size);
6534}
6535
6536// Should revisit to see if this should be restructured for
6537// greater efficiency.
6538bool MarkFromRootsClosure::do_bit(size_t offset) {
6539  if (_skipBits > 0) {
6540    _skipBits--;
6541    return true;
6542  }
6543  // convert offset into a HeapWord*
6544  HeapWord* addr = _bitMap->startWord() + offset;
6545  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6546         "address out of range");
6547  assert(_bitMap->isMarked(addr), "tautology");
6548  if (_bitMap->isMarked(addr+1)) {
6549    // this is an allocated but not yet initialized object
6550    assert(_skipBits == 0, "tautology");
6551    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6552    oop p = oop(addr);
6553    if (p->klass_or_null() == NULL) {
6554      DEBUG_ONLY(if (!_verifying) {)
6555        // We re-dirty the cards on which this object lies and increase
6556        // the _threshold so that we'll come back to scan this object
6557        // during the preclean or remark phase. (CMSCleanOnEnter)
6558        if (CMSCleanOnEnter) {
6559          size_t sz = _collector->block_size_using_printezis_bits(addr);
6560          HeapWord* end_card_addr   = (HeapWord*)round_to(
6561                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6562          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6563          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6564          // Bump _threshold to end_card_addr; note that
6565          // _threshold cannot possibly exceed end_card_addr, anyhow.
6566          // This prevents future clearing of the card as the scan proceeds
6567          // to the right.
6568          assert(_threshold <= end_card_addr,
6569                 "Because we are just scanning into this object");
6570          if (_threshold < end_card_addr) {
6571            _threshold = end_card_addr;
6572          }
6573          if (p->klass_or_null() != NULL) {
6574            // Redirty the range of cards...
6575            _mut->mark_range(redirty_range);
6576          } // ...else the setting of klass will dirty the card anyway.
6577        }
6578      DEBUG_ONLY(})
6579      return true;
6580    }
6581  }
6582  scanOopsInOop(addr);
6583  return true;
6584}
6585
6586// We take a break if we've been at this for a while,
6587// so as to avoid monopolizing the locks involved.
6588void MarkFromRootsClosure::do_yield_work() {
6589  // First give up the locks, then yield, then re-lock
6590  // We should probably use a constructor/destructor idiom to
6591  // do this unlock/lock or modify the MutexUnlocker class to
6592  // serve our purpose. XXX
6593  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6594         "CMS thread should hold CMS token");
6595  assert_lock_strong(_bitMap->lock());
6596  _bitMap->lock()->unlock();
6597  ConcurrentMarkSweepThread::desynchronize(true);
6598  _collector->stopTimer();
6599  if (PrintCMSStatistics != 0) {
6600    _collector->incrementYields();
6601  }
6602
6603  // See the comment in coordinator_yield()
6604  for (unsigned i = 0; i < CMSYieldSleepCount &&
6605                       ConcurrentMarkSweepThread::should_yield() &&
6606                       !CMSCollector::foregroundGCIsActive(); ++i) {
6607    os::sleep(Thread::current(), 1, false);
6608  }
6609
6610  ConcurrentMarkSweepThread::synchronize(true);
6611  _bitMap->lock()->lock_without_safepoint_check();
6612  _collector->startTimer();
6613}
6614
6615void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6616  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6617  assert(_markStack->isEmpty(),
6618         "should drain stack to limit stack usage");
6619  // convert ptr to an oop preparatory to scanning
6620  oop obj = oop(ptr);
6621  // Ignore mark word in verification below, since we
6622  // may be running concurrent with mutators.
6623  assert(obj->is_oop(true), "should be an oop");
6624  assert(_finger <= ptr, "_finger runneth ahead");
6625  // advance the finger to right end of this object
6626  _finger = ptr + obj->size();
6627  assert(_finger > ptr, "we just incremented it above");
6628  // On large heaps, it may take us some time to get through
6629  // the marking phase. During
6630  // this time it's possible that a lot of mutations have
6631  // accumulated in the card table and the mod union table --
6632  // these mutation records are redundant until we have
6633  // actually traced into the corresponding card.
6634  // Here, we check whether advancing the finger would make
6635  // us cross into a new card, and if so clear corresponding
6636  // cards in the MUT (preclean them in the card-table in the
6637  // future).
6638
6639  DEBUG_ONLY(if (!_verifying) {)
6640    // The clean-on-enter optimization is disabled by default,
6641    // until we fix 6178663.
6642    if (CMSCleanOnEnter && (_finger > _threshold)) {
6643      // [_threshold, _finger) represents the interval
6644      // of cards to be cleared  in MUT (or precleaned in card table).
6645      // The set of cards to be cleared is all those that overlap
6646      // with the interval [_threshold, _finger); note that
6647      // _threshold is always kept card-aligned but _finger isn't
6648      // always card-aligned.
6649      HeapWord* old_threshold = _threshold;
6650      assert(old_threshold == (HeapWord*)round_to(
6651              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6652             "_threshold should always be card-aligned");
6653      _threshold = (HeapWord*)round_to(
6654                     (intptr_t)_finger, CardTableModRefBS::card_size);
6655      MemRegion mr(old_threshold, _threshold);
6656      assert(!mr.is_empty(), "Control point invariant");
6657      assert(_span.contains(mr), "Should clear within span");
6658      _mut->clear_range(mr);
6659    }
6660  DEBUG_ONLY(})
6661  // Note: the finger doesn't advance while we drain
6662  // the stack below.
6663  PushOrMarkClosure pushOrMarkClosure(_collector,
6664                                      _span, _bitMap, _markStack,
6665                                      _finger, this);
6666  bool res = _markStack->push(obj);
6667  assert(res, "Empty non-zero size stack should have space for single push");
6668  while (!_markStack->isEmpty()) {
6669    oop new_oop = _markStack->pop();
6670    // Skip verifying header mark word below because we are
6671    // running concurrent with mutators.
6672    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6673    // now scan this oop's oops
6674    new_oop->oop_iterate(&pushOrMarkClosure);
6675    do_yield_check();
6676  }
6677  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6678}
6679
6680Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6681                       CMSCollector* collector, MemRegion span,
6682                       CMSBitMap* bit_map,
6683                       OopTaskQueue* work_queue,
6684                       CMSMarkStack*  overflow_stack):
6685  _collector(collector),
6686  _whole_span(collector->_span),
6687  _span(span),
6688  _bit_map(bit_map),
6689  _mut(&collector->_modUnionTable),
6690  _work_queue(work_queue),
6691  _overflow_stack(overflow_stack),
6692  _skip_bits(0),
6693  _task(task)
6694{
6695  assert(_work_queue->size() == 0, "work_queue should be empty");
6696  _finger = span.start();
6697  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6698  assert(_span.contains(_finger), "Out of bounds _finger?");
6699}
6700
6701// Should revisit to see if this should be restructured for
6702// greater efficiency.
6703bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6704  if (_skip_bits > 0) {
6705    _skip_bits--;
6706    return true;
6707  }
6708  // convert offset into a HeapWord*
6709  HeapWord* addr = _bit_map->startWord() + offset;
6710  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6711         "address out of range");
6712  assert(_bit_map->isMarked(addr), "tautology");
6713  if (_bit_map->isMarked(addr+1)) {
6714    // this is an allocated object that might not yet be initialized
6715    assert(_skip_bits == 0, "tautology");
6716    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6717    oop p = oop(addr);
6718    if (p->klass_or_null() == NULL) {
6719      // in the case of Clean-on-Enter optimization, redirty card
6720      // and avoid clearing card by increasing  the threshold.
6721      return true;
6722    }
6723  }
6724  scan_oops_in_oop(addr);
6725  return true;
6726}
6727
6728void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6729  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6730  // Should we assert that our work queue is empty or
6731  // below some drain limit?
6732  assert(_work_queue->size() == 0,
6733         "should drain stack to limit stack usage");
6734  // convert ptr to an oop preparatory to scanning
6735  oop obj = oop(ptr);
6736  // Ignore mark word in verification below, since we
6737  // may be running concurrent with mutators.
6738  assert(obj->is_oop(true), "should be an oop");
6739  assert(_finger <= ptr, "_finger runneth ahead");
6740  // advance the finger to right end of this object
6741  _finger = ptr + obj->size();
6742  assert(_finger > ptr, "we just incremented it above");
6743  // On large heaps, it may take us some time to get through
6744  // the marking phase. During
6745  // this time it's possible that a lot of mutations have
6746  // accumulated in the card table and the mod union table --
6747  // these mutation records are redundant until we have
6748  // actually traced into the corresponding card.
6749  // Here, we check whether advancing the finger would make
6750  // us cross into a new card, and if so clear corresponding
6751  // cards in the MUT (preclean them in the card-table in the
6752  // future).
6753
6754  // The clean-on-enter optimization is disabled by default,
6755  // until we fix 6178663.
6756  if (CMSCleanOnEnter && (_finger > _threshold)) {
6757    // [_threshold, _finger) represents the interval
6758    // of cards to be cleared  in MUT (or precleaned in card table).
6759    // The set of cards to be cleared is all those that overlap
6760    // with the interval [_threshold, _finger); note that
6761    // _threshold is always kept card-aligned but _finger isn't
6762    // always card-aligned.
6763    HeapWord* old_threshold = _threshold;
6764    assert(old_threshold == (HeapWord*)round_to(
6765            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6766           "_threshold should always be card-aligned");
6767    _threshold = (HeapWord*)round_to(
6768                   (intptr_t)_finger, CardTableModRefBS::card_size);
6769    MemRegion mr(old_threshold, _threshold);
6770    assert(!mr.is_empty(), "Control point invariant");
6771    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6772    _mut->clear_range(mr);
6773  }
6774
6775  // Note: the local finger doesn't advance while we drain
6776  // the stack below, but the global finger sure can and will.
6777  HeapWord** gfa = _task->global_finger_addr();
6778  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6779                                      _span, _bit_map,
6780                                      _work_queue,
6781                                      _overflow_stack,
6782                                      _finger,
6783                                      gfa, this);
6784  bool res = _work_queue->push(obj);   // overflow could occur here
6785  assert(res, "Will hold once we use workqueues");
6786  while (true) {
6787    oop new_oop;
6788    if (!_work_queue->pop_local(new_oop)) {
6789      // We emptied our work_queue; check if there's stuff that can
6790      // be gotten from the overflow stack.
6791      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6792            _overflow_stack, _work_queue)) {
6793        do_yield_check();
6794        continue;
6795      } else {  // done
6796        break;
6797      }
6798    }
6799    // Skip verifying header mark word below because we are
6800    // running concurrent with mutators.
6801    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6802    // now scan this oop's oops
6803    new_oop->oop_iterate(&pushOrMarkClosure);
6804    do_yield_check();
6805  }
6806  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6807}
6808
6809// Yield in response to a request from VM Thread or
6810// from mutators.
6811void Par_MarkFromRootsClosure::do_yield_work() {
6812  assert(_task != NULL, "sanity");
6813  _task->yield();
6814}
6815
6816// A variant of the above used for verifying CMS marking work.
6817MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6818                        MemRegion span,
6819                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6820                        CMSMarkStack*  mark_stack):
6821  _collector(collector),
6822  _span(span),
6823  _verification_bm(verification_bm),
6824  _cms_bm(cms_bm),
6825  _mark_stack(mark_stack),
6826  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6827                      mark_stack)
6828{
6829  assert(_mark_stack->isEmpty(), "stack should be empty");
6830  _finger = _verification_bm->startWord();
6831  assert(_collector->_restart_addr == NULL, "Sanity check");
6832  assert(_span.contains(_finger), "Out of bounds _finger?");
6833}
6834
6835void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6836  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6837  assert(_span.contains(addr), "Out of bounds _finger?");
6838  _finger = addr;
6839}
6840
6841// Should revisit to see if this should be restructured for
6842// greater efficiency.
6843bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6844  // convert offset into a HeapWord*
6845  HeapWord* addr = _verification_bm->startWord() + offset;
6846  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6847         "address out of range");
6848  assert(_verification_bm->isMarked(addr), "tautology");
6849  assert(_cms_bm->isMarked(addr), "tautology");
6850
6851  assert(_mark_stack->isEmpty(),
6852         "should drain stack to limit stack usage");
6853  // convert addr to an oop preparatory to scanning
6854  oop obj = oop(addr);
6855  assert(obj->is_oop(), "should be an oop");
6856  assert(_finger <= addr, "_finger runneth ahead");
6857  // advance the finger to right end of this object
6858  _finger = addr + obj->size();
6859  assert(_finger > addr, "we just incremented it above");
6860  // Note: the finger doesn't advance while we drain
6861  // the stack below.
6862  bool res = _mark_stack->push(obj);
6863  assert(res, "Empty non-zero size stack should have space for single push");
6864  while (!_mark_stack->isEmpty()) {
6865    oop new_oop = _mark_stack->pop();
6866    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6867    // now scan this oop's oops
6868    new_oop->oop_iterate(&_pam_verify_closure);
6869  }
6870  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6871  return true;
6872}
6873
6874PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6875  CMSCollector* collector, MemRegion span,
6876  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6877  CMSMarkStack*  mark_stack):
6878  MetadataAwareOopClosure(collector->ref_processor()),
6879  _collector(collector),
6880  _span(span),
6881  _verification_bm(verification_bm),
6882  _cms_bm(cms_bm),
6883  _mark_stack(mark_stack)
6884{ }
6885
6886void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6887void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6888
6889// Upon stack overflow, we discard (part of) the stack,
6890// remembering the least address amongst those discarded
6891// in CMSCollector's _restart_address.
6892void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6893  // Remember the least grey address discarded
6894  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6895  _collector->lower_restart_addr(ra);
6896  _mark_stack->reset();  // discard stack contents
6897  _mark_stack->expand(); // expand the stack if possible
6898}
6899
6900void PushAndMarkVerifyClosure::do_oop(oop obj) {
6901  assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6902  HeapWord* addr = (HeapWord*)obj;
6903  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6904    // Oop lies in _span and isn't yet grey or black
6905    _verification_bm->mark(addr);            // now grey
6906    if (!_cms_bm->isMarked(addr)) {
6907      oop(addr)->print();
6908      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6909                             p2i(addr));
6910      fatal("... aborting");
6911    }
6912
6913    if (!_mark_stack->push(obj)) { // stack overflow
6914      if (PrintCMSStatistics != 0) {
6915        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6916                               SIZE_FORMAT, _mark_stack->capacity());
6917      }
6918      assert(_mark_stack->isFull(), "Else push should have succeeded");
6919      handle_stack_overflow(addr);
6920    }
6921    // anything including and to the right of _finger
6922    // will be scanned as we iterate over the remainder of the
6923    // bit map
6924  }
6925}
6926
6927PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6928                     MemRegion span,
6929                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6930                     HeapWord* finger, MarkFromRootsClosure* parent) :
6931  MetadataAwareOopClosure(collector->ref_processor()),
6932  _collector(collector),
6933  _span(span),
6934  _bitMap(bitMap),
6935  _markStack(markStack),
6936  _finger(finger),
6937  _parent(parent)
6938{ }
6939
6940Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6941                     MemRegion span,
6942                     CMSBitMap* bit_map,
6943                     OopTaskQueue* work_queue,
6944                     CMSMarkStack*  overflow_stack,
6945                     HeapWord* finger,
6946                     HeapWord** global_finger_addr,
6947                     Par_MarkFromRootsClosure* parent) :
6948  MetadataAwareOopClosure(collector->ref_processor()),
6949  _collector(collector),
6950  _whole_span(collector->_span),
6951  _span(span),
6952  _bit_map(bit_map),
6953  _work_queue(work_queue),
6954  _overflow_stack(overflow_stack),
6955  _finger(finger),
6956  _global_finger_addr(global_finger_addr),
6957  _parent(parent)
6958{ }
6959
6960// Assumes thread-safe access by callers, who are
6961// responsible for mutual exclusion.
6962void CMSCollector::lower_restart_addr(HeapWord* low) {
6963  assert(_span.contains(low), "Out of bounds addr");
6964  if (_restart_addr == NULL) {
6965    _restart_addr = low;
6966  } else {
6967    _restart_addr = MIN2(_restart_addr, low);
6968  }
6969}
6970
6971// Upon stack overflow, we discard (part of) the stack,
6972// remembering the least address amongst those discarded
6973// in CMSCollector's _restart_address.
6974void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6975  // Remember the least grey address discarded
6976  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6977  _collector->lower_restart_addr(ra);
6978  _markStack->reset();  // discard stack contents
6979  _markStack->expand(); // expand the stack if possible
6980}
6981
6982// Upon stack overflow, we discard (part of) the stack,
6983// remembering the least address amongst those discarded
6984// in CMSCollector's _restart_address.
6985void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6986  // We need to do this under a mutex to prevent other
6987  // workers from interfering with the work done below.
6988  MutexLockerEx ml(_overflow_stack->par_lock(),
6989                   Mutex::_no_safepoint_check_flag);
6990  // Remember the least grey address discarded
6991  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6992  _collector->lower_restart_addr(ra);
6993  _overflow_stack->reset();  // discard stack contents
6994  _overflow_stack->expand(); // expand the stack if possible
6995}
6996
6997void PushOrMarkClosure::do_oop(oop obj) {
6998  // Ignore mark word because we are running concurrent with mutators.
6999  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7000  HeapWord* addr = (HeapWord*)obj;
7001  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7002    // Oop lies in _span and isn't yet grey or black
7003    _bitMap->mark(addr);            // now grey
7004    if (addr < _finger) {
7005      // the bit map iteration has already either passed, or
7006      // sampled, this bit in the bit map; we'll need to
7007      // use the marking stack to scan this oop's oops.
7008      bool simulate_overflow = false;
7009      NOT_PRODUCT(
7010        if (CMSMarkStackOverflowALot &&
7011            _collector->simulate_overflow()) {
7012          // simulate a stack overflow
7013          simulate_overflow = true;
7014        }
7015      )
7016      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7017        if (PrintCMSStatistics != 0) {
7018          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7019                                 SIZE_FORMAT, _markStack->capacity());
7020        }
7021        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7022        handle_stack_overflow(addr);
7023      }
7024    }
7025    // anything including and to the right of _finger
7026    // will be scanned as we iterate over the remainder of the
7027    // bit map
7028    do_yield_check();
7029  }
7030}
7031
7032void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7033void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7034
7035void Par_PushOrMarkClosure::do_oop(oop obj) {
7036  // Ignore mark word because we are running concurrent with mutators.
7037  assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7038  HeapWord* addr = (HeapWord*)obj;
7039  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7040    // Oop lies in _span and isn't yet grey or black
7041    // We read the global_finger (volatile read) strictly after marking oop
7042    bool res = _bit_map->par_mark(addr);    // now grey
7043    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7044    // Should we push this marked oop on our stack?
7045    // -- if someone else marked it, nothing to do
7046    // -- if target oop is above global finger nothing to do
7047    // -- if target oop is in chunk and above local finger
7048    //      then nothing to do
7049    // -- else push on work queue
7050    if (   !res       // someone else marked it, they will deal with it
7051        || (addr >= *gfa)  // will be scanned in a later task
7052        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7053      return;
7054    }
7055    // the bit map iteration has already either passed, or
7056    // sampled, this bit in the bit map; we'll need to
7057    // use the marking stack to scan this oop's oops.
7058    bool simulate_overflow = false;
7059    NOT_PRODUCT(
7060      if (CMSMarkStackOverflowALot &&
7061          _collector->simulate_overflow()) {
7062        // simulate a stack overflow
7063        simulate_overflow = true;
7064      }
7065    )
7066    if (simulate_overflow ||
7067        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7068      // stack overflow
7069      if (PrintCMSStatistics != 0) {
7070        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7071                               SIZE_FORMAT, _overflow_stack->capacity());
7072      }
7073      // We cannot assert that the overflow stack is full because
7074      // it may have been emptied since.
7075      assert(simulate_overflow ||
7076             _work_queue->size() == _work_queue->max_elems(),
7077            "Else push should have succeeded");
7078      handle_stack_overflow(addr);
7079    }
7080    do_yield_check();
7081  }
7082}
7083
7084void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7085void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7086
7087PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7088                                       MemRegion span,
7089                                       ReferenceProcessor* rp,
7090                                       CMSBitMap* bit_map,
7091                                       CMSBitMap* mod_union_table,
7092                                       CMSMarkStack*  mark_stack,
7093                                       bool           concurrent_precleaning):
7094  MetadataAwareOopClosure(rp),
7095  _collector(collector),
7096  _span(span),
7097  _bit_map(bit_map),
7098  _mod_union_table(mod_union_table),
7099  _mark_stack(mark_stack),
7100  _concurrent_precleaning(concurrent_precleaning)
7101{
7102  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
7103}
7104
7105// Grey object rescan during pre-cleaning and second checkpoint phases --
7106// the non-parallel version (the parallel version appears further below.)
7107void PushAndMarkClosure::do_oop(oop obj) {
7108  // Ignore mark word verification. If during concurrent precleaning,
7109  // the object monitor may be locked. If during the checkpoint
7110  // phases, the object may already have been reached by a  different
7111  // path and may be at the end of the global overflow list (so
7112  // the mark word may be NULL).
7113  assert(obj->is_oop_or_null(true /* ignore mark word */),
7114         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7115  HeapWord* addr = (HeapWord*)obj;
7116  // Check if oop points into the CMS generation
7117  // and is not marked
7118  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7119    // a white object ...
7120    _bit_map->mark(addr);         // ... now grey
7121    // push on the marking stack (grey set)
7122    bool simulate_overflow = false;
7123    NOT_PRODUCT(
7124      if (CMSMarkStackOverflowALot &&
7125          _collector->simulate_overflow()) {
7126        // simulate a stack overflow
7127        simulate_overflow = true;
7128      }
7129    )
7130    if (simulate_overflow || !_mark_stack->push(obj)) {
7131      if (_concurrent_precleaning) {
7132         // During precleaning we can just dirty the appropriate card(s)
7133         // in the mod union table, thus ensuring that the object remains
7134         // in the grey set  and continue. In the case of object arrays
7135         // we need to dirty all of the cards that the object spans,
7136         // since the rescan of object arrays will be limited to the
7137         // dirty cards.
7138         // Note that no one can be interfering with us in this action
7139         // of dirtying the mod union table, so no locking or atomics
7140         // are required.
7141         if (obj->is_objArray()) {
7142           size_t sz = obj->size();
7143           HeapWord* end_card_addr = (HeapWord*)round_to(
7144                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7145           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7146           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7147           _mod_union_table->mark_range(redirty_range);
7148         } else {
7149           _mod_union_table->mark(addr);
7150         }
7151         _collector->_ser_pmc_preclean_ovflw++;
7152      } else {
7153         // During the remark phase, we need to remember this oop
7154         // in the overflow list.
7155         _collector->push_on_overflow_list(obj);
7156         _collector->_ser_pmc_remark_ovflw++;
7157      }
7158    }
7159  }
7160}
7161
7162Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7163                                               MemRegion span,
7164                                               ReferenceProcessor* rp,
7165                                               CMSBitMap* bit_map,
7166                                               OopTaskQueue* work_queue):
7167  MetadataAwareOopClosure(rp),
7168  _collector(collector),
7169  _span(span),
7170  _bit_map(bit_map),
7171  _work_queue(work_queue)
7172{
7173  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
7174}
7175
7176void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7177void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7178
7179// Grey object rescan during second checkpoint phase --
7180// the parallel version.
7181void Par_PushAndMarkClosure::do_oop(oop obj) {
7182  // In the assert below, we ignore the mark word because
7183  // this oop may point to an already visited object that is
7184  // on the overflow stack (in which case the mark word has
7185  // been hijacked for chaining into the overflow stack --
7186  // if this is the last object in the overflow stack then
7187  // its mark word will be NULL). Because this object may
7188  // have been subsequently popped off the global overflow
7189  // stack, and the mark word possibly restored to the prototypical
7190  // value, by the time we get to examined this failing assert in
7191  // the debugger, is_oop_or_null(false) may subsequently start
7192  // to hold.
7193  assert(obj->is_oop_or_null(true),
7194         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7195  HeapWord* addr = (HeapWord*)obj;
7196  // Check if oop points into the CMS generation
7197  // and is not marked
7198  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7199    // a white object ...
7200    // If we manage to "claim" the object, by being the
7201    // first thread to mark it, then we push it on our
7202    // marking stack
7203    if (_bit_map->par_mark(addr)) {     // ... now grey
7204      // push on work queue (grey set)
7205      bool simulate_overflow = false;
7206      NOT_PRODUCT(
7207        if (CMSMarkStackOverflowALot &&
7208            _collector->par_simulate_overflow()) {
7209          // simulate a stack overflow
7210          simulate_overflow = true;
7211        }
7212      )
7213      if (simulate_overflow || !_work_queue->push(obj)) {
7214        _collector->par_push_on_overflow_list(obj);
7215        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7216      }
7217    } // Else, some other thread got there first
7218  }
7219}
7220
7221void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7222void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7223
7224void CMSPrecleanRefsYieldClosure::do_yield_work() {
7225  Mutex* bml = _collector->bitMapLock();
7226  assert_lock_strong(bml);
7227  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7228         "CMS thread should hold CMS token");
7229
7230  bml->unlock();
7231  ConcurrentMarkSweepThread::desynchronize(true);
7232
7233  _collector->stopTimer();
7234  if (PrintCMSStatistics != 0) {
7235    _collector->incrementYields();
7236  }
7237
7238  // See the comment in coordinator_yield()
7239  for (unsigned i = 0; i < CMSYieldSleepCount &&
7240                       ConcurrentMarkSweepThread::should_yield() &&
7241                       !CMSCollector::foregroundGCIsActive(); ++i) {
7242    os::sleep(Thread::current(), 1, false);
7243  }
7244
7245  ConcurrentMarkSweepThread::synchronize(true);
7246  bml->lock();
7247
7248  _collector->startTimer();
7249}
7250
7251bool CMSPrecleanRefsYieldClosure::should_return() {
7252  if (ConcurrentMarkSweepThread::should_yield()) {
7253    do_yield_work();
7254  }
7255  return _collector->foregroundGCIsActive();
7256}
7257
7258void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7259  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7260         "mr should be aligned to start at a card boundary");
7261  // We'd like to assert:
7262  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7263  //        "mr should be a range of cards");
7264  // However, that would be too strong in one case -- the last
7265  // partition ends at _unallocated_block which, in general, can be
7266  // an arbitrary boundary, not necessarily card aligned.
7267  if (PrintCMSStatistics != 0) {
7268    _num_dirty_cards +=
7269         mr.word_size()/CardTableModRefBS::card_size_in_words;
7270  }
7271  _space->object_iterate_mem(mr, &_scan_cl);
7272}
7273
7274SweepClosure::SweepClosure(CMSCollector* collector,
7275                           ConcurrentMarkSweepGeneration* g,
7276                           CMSBitMap* bitMap, bool should_yield) :
7277  _collector(collector),
7278  _g(g),
7279  _sp(g->cmsSpace()),
7280  _limit(_sp->sweep_limit()),
7281  _freelistLock(_sp->freelistLock()),
7282  _bitMap(bitMap),
7283  _yield(should_yield),
7284  _inFreeRange(false),           // No free range at beginning of sweep
7285  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7286  _lastFreeRangeCoalesced(false),
7287  _freeFinger(g->used_region().start())
7288{
7289  NOT_PRODUCT(
7290    _numObjectsFreed = 0;
7291    _numWordsFreed   = 0;
7292    _numObjectsLive = 0;
7293    _numWordsLive = 0;
7294    _numObjectsAlreadyFree = 0;
7295    _numWordsAlreadyFree = 0;
7296    _last_fc = NULL;
7297
7298    _sp->initializeIndexedFreeListArrayReturnedBytes();
7299    _sp->dictionary()->initialize_dict_returned_bytes();
7300  )
7301  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7302         "sweep _limit out of bounds");
7303  if (CMSTraceSweeper) {
7304    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7305                        p2i(_limit));
7306  }
7307}
7308
7309void SweepClosure::print_on(outputStream* st) const {
7310  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7311                p2i(_sp->bottom()), p2i(_sp->end()));
7312  tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7313  tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7314  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7315  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7316                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7317}
7318
7319#ifndef PRODUCT
7320// Assertion checking only:  no useful work in product mode --
7321// however, if any of the flags below become product flags,
7322// you may need to review this code to see if it needs to be
7323// enabled in product mode.
7324SweepClosure::~SweepClosure() {
7325  assert_lock_strong(_freelistLock);
7326  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7327         "sweep _limit out of bounds");
7328  if (inFreeRange()) {
7329    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7330    print();
7331    ShouldNotReachHere();
7332  }
7333  if (Verbose && PrintGC) {
7334    gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7335                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7336    gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects,  "
7337                           SIZE_FORMAT " bytes  "
7338      "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7339      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7340      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7341    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7342                        * sizeof(HeapWord);
7343    gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7344
7345    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7346      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7347      size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7348      size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7349      gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
7350      gclog_or_tty->print("   Indexed List Returned " SIZE_FORMAT " bytes",
7351        indexListReturnedBytes);
7352      gclog_or_tty->print_cr("        Dictionary Returned " SIZE_FORMAT " bytes",
7353        dict_returned_bytes);
7354    }
7355  }
7356  if (CMSTraceSweeper) {
7357    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7358                           p2i(_limit));
7359  }
7360}
7361#endif  // PRODUCT
7362
7363void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7364    bool freeRangeInFreeLists) {
7365  if (CMSTraceSweeper) {
7366    gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7367               p2i(freeFinger), freeRangeInFreeLists);
7368  }
7369  assert(!inFreeRange(), "Trampling existing free range");
7370  set_inFreeRange(true);
7371  set_lastFreeRangeCoalesced(false);
7372
7373  set_freeFinger(freeFinger);
7374  set_freeRangeInFreeLists(freeRangeInFreeLists);
7375  if (CMSTestInFreeList) {
7376    if (freeRangeInFreeLists) {
7377      FreeChunk* fc = (FreeChunk*) freeFinger;
7378      assert(fc->is_free(), "A chunk on the free list should be free.");
7379      assert(fc->size() > 0, "Free range should have a size");
7380      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7381    }
7382  }
7383}
7384
7385// Note that the sweeper runs concurrently with mutators. Thus,
7386// it is possible for direct allocation in this generation to happen
7387// in the middle of the sweep. Note that the sweeper also coalesces
7388// contiguous free blocks. Thus, unless the sweeper and the allocator
7389// synchronize appropriately freshly allocated blocks may get swept up.
7390// This is accomplished by the sweeper locking the free lists while
7391// it is sweeping. Thus blocks that are determined to be free are
7392// indeed free. There is however one additional complication:
7393// blocks that have been allocated since the final checkpoint and
7394// mark, will not have been marked and so would be treated as
7395// unreachable and swept up. To prevent this, the allocator marks
7396// the bit map when allocating during the sweep phase. This leads,
7397// however, to a further complication -- objects may have been allocated
7398// but not yet initialized -- in the sense that the header isn't yet
7399// installed. The sweeper can not then determine the size of the block
7400// in order to skip over it. To deal with this case, we use a technique
7401// (due to Printezis) to encode such uninitialized block sizes in the
7402// bit map. Since the bit map uses a bit per every HeapWord, but the
7403// CMS generation has a minimum object size of 3 HeapWords, it follows
7404// that "normal marks" won't be adjacent in the bit map (there will
7405// always be at least two 0 bits between successive 1 bits). We make use
7406// of these "unused" bits to represent uninitialized blocks -- the bit
7407// corresponding to the start of the uninitialized object and the next
7408// bit are both set. Finally, a 1 bit marks the end of the object that
7409// started with the two consecutive 1 bits to indicate its potentially
7410// uninitialized state.
7411
7412size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7413  FreeChunk* fc = (FreeChunk*)addr;
7414  size_t res;
7415
7416  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7417  // than "addr == _limit" because although _limit was a block boundary when
7418  // we started the sweep, it may no longer be one because heap expansion
7419  // may have caused us to coalesce the block ending at the address _limit
7420  // with a newly expanded chunk (this happens when _limit was set to the
7421  // previous _end of the space), so we may have stepped past _limit:
7422  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7423  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7424    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7425           "sweep _limit out of bounds");
7426    assert(addr < _sp->end(), "addr out of bounds");
7427    // Flush any free range we might be holding as a single
7428    // coalesced chunk to the appropriate free list.
7429    if (inFreeRange()) {
7430      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7431             "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7432      flush_cur_free_chunk(freeFinger(),
7433                           pointer_delta(addr, freeFinger()));
7434      if (CMSTraceSweeper) {
7435        gclog_or_tty->print("Sweep: last chunk: ");
7436        gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7437                   "[coalesced:%d]\n",
7438                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7439                   lastFreeRangeCoalesced() ? 1 : 0);
7440      }
7441    }
7442
7443    // help the iterator loop finish
7444    return pointer_delta(_sp->end(), addr);
7445  }
7446
7447  assert(addr < _limit, "sweep invariant");
7448  // check if we should yield
7449  do_yield_check(addr);
7450  if (fc->is_free()) {
7451    // Chunk that is already free
7452    res = fc->size();
7453    do_already_free_chunk(fc);
7454    debug_only(_sp->verifyFreeLists());
7455    // If we flush the chunk at hand in lookahead_and_flush()
7456    // and it's coalesced with a preceding chunk, then the
7457    // process of "mangling" the payload of the coalesced block
7458    // will cause erasure of the size information from the
7459    // (erstwhile) header of all the coalesced blocks but the
7460    // first, so the first disjunct in the assert will not hold
7461    // in that specific case (in which case the second disjunct
7462    // will hold).
7463    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7464           "Otherwise the size info doesn't change at this step");
7465    NOT_PRODUCT(
7466      _numObjectsAlreadyFree++;
7467      _numWordsAlreadyFree += res;
7468    )
7469    NOT_PRODUCT(_last_fc = fc;)
7470  } else if (!_bitMap->isMarked(addr)) {
7471    // Chunk is fresh garbage
7472    res = do_garbage_chunk(fc);
7473    debug_only(_sp->verifyFreeLists());
7474    NOT_PRODUCT(
7475      _numObjectsFreed++;
7476      _numWordsFreed += res;
7477    )
7478  } else {
7479    // Chunk that is alive.
7480    res = do_live_chunk(fc);
7481    debug_only(_sp->verifyFreeLists());
7482    NOT_PRODUCT(
7483        _numObjectsLive++;
7484        _numWordsLive += res;
7485    )
7486  }
7487  return res;
7488}
7489
7490// For the smart allocation, record following
7491//  split deaths - a free chunk is removed from its free list because
7492//      it is being split into two or more chunks.
7493//  split birth - a free chunk is being added to its free list because
7494//      a larger free chunk has been split and resulted in this free chunk.
7495//  coal death - a free chunk is being removed from its free list because
7496//      it is being coalesced into a large free chunk.
7497//  coal birth - a free chunk is being added to its free list because
7498//      it was created when two or more free chunks where coalesced into
7499//      this free chunk.
7500//
7501// These statistics are used to determine the desired number of free
7502// chunks of a given size.  The desired number is chosen to be relative
7503// to the end of a CMS sweep.  The desired number at the end of a sweep
7504// is the
7505//      count-at-end-of-previous-sweep (an amount that was enough)
7506//              - count-at-beginning-of-current-sweep  (the excess)
7507//              + split-births  (gains in this size during interval)
7508//              - split-deaths  (demands on this size during interval)
7509// where the interval is from the end of one sweep to the end of the
7510// next.
7511//
7512// When sweeping the sweeper maintains an accumulated chunk which is
7513// the chunk that is made up of chunks that have been coalesced.  That
7514// will be termed the left-hand chunk.  A new chunk of garbage that
7515// is being considered for coalescing will be referred to as the
7516// right-hand chunk.
7517//
7518// When making a decision on whether to coalesce a right-hand chunk with
7519// the current left-hand chunk, the current count vs. the desired count
7520// of the left-hand chunk is considered.  Also if the right-hand chunk
7521// is near the large chunk at the end of the heap (see
7522// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7523// left-hand chunk is coalesced.
7524//
7525// When making a decision about whether to split a chunk, the desired count
7526// vs. the current count of the candidate to be split is also considered.
7527// If the candidate is underpopulated (currently fewer chunks than desired)
7528// a chunk of an overpopulated (currently more chunks than desired) size may
7529// be chosen.  The "hint" associated with a free list, if non-null, points
7530// to a free list which may be overpopulated.
7531//
7532
7533void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7534  const size_t size = fc->size();
7535  // Chunks that cannot be coalesced are not in the
7536  // free lists.
7537  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7538    assert(_sp->verify_chunk_in_free_list(fc),
7539      "free chunk should be in free lists");
7540  }
7541  // a chunk that is already free, should not have been
7542  // marked in the bit map
7543  HeapWord* const addr = (HeapWord*) fc;
7544  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7545  // Verify that the bit map has no bits marked between
7546  // addr and purported end of this block.
7547  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7548
7549  // Some chunks cannot be coalesced under any circumstances.
7550  // See the definition of cantCoalesce().
7551  if (!fc->cantCoalesce()) {
7552    // This chunk can potentially be coalesced.
7553    if (_sp->adaptive_freelists()) {
7554      // All the work is done in
7555      do_post_free_or_garbage_chunk(fc, size);
7556    } else {  // Not adaptive free lists
7557      // this is a free chunk that can potentially be coalesced by the sweeper;
7558      if (!inFreeRange()) {
7559        // if the next chunk is a free block that can't be coalesced
7560        // it doesn't make sense to remove this chunk from the free lists
7561        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7562        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7563        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7564            nextChunk->is_free()               &&     // ... which is free...
7565            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7566          // nothing to do
7567        } else {
7568          // Potentially the start of a new free range:
7569          // Don't eagerly remove it from the free lists.
7570          // No need to remove it if it will just be put
7571          // back again.  (Also from a pragmatic point of view
7572          // if it is a free block in a region that is beyond
7573          // any allocated blocks, an assertion will fail)
7574          // Remember the start of a free run.
7575          initialize_free_range(addr, true);
7576          // end - can coalesce with next chunk
7577        }
7578      } else {
7579        // the midst of a free range, we are coalescing
7580        print_free_block_coalesced(fc);
7581        if (CMSTraceSweeper) {
7582          gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7583        }
7584        // remove it from the free lists
7585        _sp->removeFreeChunkFromFreeLists(fc);
7586        set_lastFreeRangeCoalesced(true);
7587        // If the chunk is being coalesced and the current free range is
7588        // in the free lists, remove the current free range so that it
7589        // will be returned to the free lists in its entirety - all
7590        // the coalesced pieces included.
7591        if (freeRangeInFreeLists()) {
7592          FreeChunk* ffc = (FreeChunk*) freeFinger();
7593          assert(ffc->size() == pointer_delta(addr, freeFinger()),
7594            "Size of free range is inconsistent with chunk size.");
7595          if (CMSTestInFreeList) {
7596            assert(_sp->verify_chunk_in_free_list(ffc),
7597              "free range is not in free lists");
7598          }
7599          _sp->removeFreeChunkFromFreeLists(ffc);
7600          set_freeRangeInFreeLists(false);
7601        }
7602      }
7603    }
7604    // Note that if the chunk is not coalescable (the else arm
7605    // below), we unconditionally flush, without needing to do
7606    // a "lookahead," as we do below.
7607    if (inFreeRange()) lookahead_and_flush(fc, size);
7608  } else {
7609    // Code path common to both original and adaptive free lists.
7610
7611    // cant coalesce with previous block; this should be treated
7612    // as the end of a free run if any
7613    if (inFreeRange()) {
7614      // we kicked some butt; time to pick up the garbage
7615      assert(freeFinger() < addr, "freeFinger points too high");
7616      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7617    }
7618    // else, nothing to do, just continue
7619  }
7620}
7621
7622size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7623  // This is a chunk of garbage.  It is not in any free list.
7624  // Add it to a free list or let it possibly be coalesced into
7625  // a larger chunk.
7626  HeapWord* const addr = (HeapWord*) fc;
7627  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7628
7629  if (_sp->adaptive_freelists()) {
7630    // Verify that the bit map has no bits marked between
7631    // addr and purported end of just dead object.
7632    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7633
7634    do_post_free_or_garbage_chunk(fc, size);
7635  } else {
7636    if (!inFreeRange()) {
7637      // start of a new free range
7638      assert(size > 0, "A free range should have a size");
7639      initialize_free_range(addr, false);
7640    } else {
7641      // this will be swept up when we hit the end of the
7642      // free range
7643      if (CMSTraceSweeper) {
7644        gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7645      }
7646      // If the chunk is being coalesced and the current free range is
7647      // in the free lists, remove the current free range so that it
7648      // will be returned to the free lists in its entirety - all
7649      // the coalesced pieces included.
7650      if (freeRangeInFreeLists()) {
7651        FreeChunk* ffc = (FreeChunk*)freeFinger();
7652        assert(ffc->size() == pointer_delta(addr, freeFinger()),
7653          "Size of free range is inconsistent with chunk size.");
7654        if (CMSTestInFreeList) {
7655          assert(_sp->verify_chunk_in_free_list(ffc),
7656            "free range is not in free lists");
7657        }
7658        _sp->removeFreeChunkFromFreeLists(ffc);
7659        set_freeRangeInFreeLists(false);
7660      }
7661      set_lastFreeRangeCoalesced(true);
7662    }
7663    // this will be swept up when we hit the end of the free range
7664
7665    // Verify that the bit map has no bits marked between
7666    // addr and purported end of just dead object.
7667    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7668  }
7669  assert(_limit >= addr + size,
7670         "A freshly garbage chunk can't possibly straddle over _limit");
7671  if (inFreeRange()) lookahead_and_flush(fc, size);
7672  return size;
7673}
7674
7675size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7676  HeapWord* addr = (HeapWord*) fc;
7677  // The sweeper has just found a live object. Return any accumulated
7678  // left hand chunk to the free lists.
7679  if (inFreeRange()) {
7680    assert(freeFinger() < addr, "freeFinger points too high");
7681    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7682  }
7683
7684  // This object is live: we'd normally expect this to be
7685  // an oop, and like to assert the following:
7686  // assert(oop(addr)->is_oop(), "live block should be an oop");
7687  // However, as we commented above, this may be an object whose
7688  // header hasn't yet been initialized.
7689  size_t size;
7690  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7691  if (_bitMap->isMarked(addr + 1)) {
7692    // Determine the size from the bit map, rather than trying to
7693    // compute it from the object header.
7694    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7695    size = pointer_delta(nextOneAddr + 1, addr);
7696    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7697           "alignment problem");
7698
7699#ifdef ASSERT
7700      if (oop(addr)->klass_or_null() != NULL) {
7701        // Ignore mark word because we are running concurrent with mutators
7702        assert(oop(addr)->is_oop(true), "live block should be an oop");
7703        assert(size ==
7704               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7705               "P-mark and computed size do not agree");
7706      }
7707#endif
7708
7709  } else {
7710    // This should be an initialized object that's alive.
7711    assert(oop(addr)->klass_or_null() != NULL,
7712           "Should be an initialized object");
7713    // Ignore mark word because we are running concurrent with mutators
7714    assert(oop(addr)->is_oop(true), "live block should be an oop");
7715    // Verify that the bit map has no bits marked between
7716    // addr and purported end of this block.
7717    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7718    assert(size >= 3, "Necessary for Printezis marks to work");
7719    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7720    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7721  }
7722  return size;
7723}
7724
7725void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7726                                                 size_t chunkSize) {
7727  // do_post_free_or_garbage_chunk() should only be called in the case
7728  // of the adaptive free list allocator.
7729  const bool fcInFreeLists = fc->is_free();
7730  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7731  assert((HeapWord*)fc <= _limit, "sweep invariant");
7732  if (CMSTestInFreeList && fcInFreeLists) {
7733    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7734  }
7735
7736  if (CMSTraceSweeper) {
7737    gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7738  }
7739
7740  HeapWord* const fc_addr = (HeapWord*) fc;
7741
7742  bool coalesce;
7743  const size_t left  = pointer_delta(fc_addr, freeFinger());
7744  const size_t right = chunkSize;
7745  switch (FLSCoalescePolicy) {
7746    // numeric value forms a coalition aggressiveness metric
7747    case 0:  { // never coalesce
7748      coalesce = false;
7749      break;
7750    }
7751    case 1: { // coalesce if left & right chunks on overpopulated lists
7752      coalesce = _sp->coalOverPopulated(left) &&
7753                 _sp->coalOverPopulated(right);
7754      break;
7755    }
7756    case 2: { // coalesce if left chunk on overpopulated list (default)
7757      coalesce = _sp->coalOverPopulated(left);
7758      break;
7759    }
7760    case 3: { // coalesce if left OR right chunk on overpopulated list
7761      coalesce = _sp->coalOverPopulated(left) ||
7762                 _sp->coalOverPopulated(right);
7763      break;
7764    }
7765    case 4: { // always coalesce
7766      coalesce = true;
7767      break;
7768    }
7769    default:
7770     ShouldNotReachHere();
7771  }
7772
7773  // Should the current free range be coalesced?
7774  // If the chunk is in a free range and either we decided to coalesce above
7775  // or the chunk is near the large block at the end of the heap
7776  // (isNearLargestChunk() returns true), then coalesce this chunk.
7777  const bool doCoalesce = inFreeRange()
7778                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7779  if (doCoalesce) {
7780    // Coalesce the current free range on the left with the new
7781    // chunk on the right.  If either is on a free list,
7782    // it must be removed from the list and stashed in the closure.
7783    if (freeRangeInFreeLists()) {
7784      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7785      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7786        "Size of free range is inconsistent with chunk size.");
7787      if (CMSTestInFreeList) {
7788        assert(_sp->verify_chunk_in_free_list(ffc),
7789          "Chunk is not in free lists");
7790      }
7791      _sp->coalDeath(ffc->size());
7792      _sp->removeFreeChunkFromFreeLists(ffc);
7793      set_freeRangeInFreeLists(false);
7794    }
7795    if (fcInFreeLists) {
7796      _sp->coalDeath(chunkSize);
7797      assert(fc->size() == chunkSize,
7798        "The chunk has the wrong size or is not in the free lists");
7799      _sp->removeFreeChunkFromFreeLists(fc);
7800    }
7801    set_lastFreeRangeCoalesced(true);
7802    print_free_block_coalesced(fc);
7803  } else {  // not in a free range and/or should not coalesce
7804    // Return the current free range and start a new one.
7805    if (inFreeRange()) {
7806      // In a free range but cannot coalesce with the right hand chunk.
7807      // Put the current free range into the free lists.
7808      flush_cur_free_chunk(freeFinger(),
7809                           pointer_delta(fc_addr, freeFinger()));
7810    }
7811    // Set up for new free range.  Pass along whether the right hand
7812    // chunk is in the free lists.
7813    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7814  }
7815}
7816
7817// Lookahead flush:
7818// If we are tracking a free range, and this is the last chunk that
7819// we'll look at because its end crosses past _limit, we'll preemptively
7820// flush it along with any free range we may be holding on to. Note that
7821// this can be the case only for an already free or freshly garbage
7822// chunk. If this block is an object, it can never straddle
7823// over _limit. The "straddling" occurs when _limit is set at
7824// the previous end of the space when this cycle started, and
7825// a subsequent heap expansion caused the previously co-terminal
7826// free block to be coalesced with the newly expanded portion,
7827// thus rendering _limit a non-block-boundary making it dangerous
7828// for the sweeper to step over and examine.
7829void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7830  assert(inFreeRange(), "Should only be called if currently in a free range.");
7831  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7832  assert(_sp->used_region().contains(eob - 1),
7833         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7834         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7835         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7836         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7837  if (eob >= _limit) {
7838    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7839    if (CMSTraceSweeper) {
7840      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7841                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7842                             "[" PTR_FORMAT "," PTR_FORMAT ")",
7843                             p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7844    }
7845    // Return the storage we are tracking back into the free lists.
7846    if (CMSTraceSweeper) {
7847      gclog_or_tty->print_cr("Flushing ... ");
7848    }
7849    assert(freeFinger() < eob, "Error");
7850    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7851  }
7852}
7853
7854void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7855  assert(inFreeRange(), "Should only be called if currently in a free range.");
7856  assert(size > 0,
7857    "A zero sized chunk cannot be added to the free lists.");
7858  if (!freeRangeInFreeLists()) {
7859    if (CMSTestInFreeList) {
7860      FreeChunk* fc = (FreeChunk*) chunk;
7861      fc->set_size(size);
7862      assert(!_sp->verify_chunk_in_free_list(fc),
7863        "chunk should not be in free lists yet");
7864    }
7865    if (CMSTraceSweeper) {
7866      gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7867                    p2i(chunk), size);
7868    }
7869    // A new free range is going to be starting.  The current
7870    // free range has not been added to the free lists yet or
7871    // was removed so add it back.
7872    // If the current free range was coalesced, then the death
7873    // of the free range was recorded.  Record a birth now.
7874    if (lastFreeRangeCoalesced()) {
7875      _sp->coalBirth(size);
7876    }
7877    _sp->addChunkAndRepairOffsetTable(chunk, size,
7878            lastFreeRangeCoalesced());
7879  } else if (CMSTraceSweeper) {
7880    gclog_or_tty->print_cr("Already in free list: nothing to flush");
7881  }
7882  set_inFreeRange(false);
7883  set_freeRangeInFreeLists(false);
7884}
7885
7886// We take a break if we've been at this for a while,
7887// so as to avoid monopolizing the locks involved.
7888void SweepClosure::do_yield_work(HeapWord* addr) {
7889  // Return current free chunk being used for coalescing (if any)
7890  // to the appropriate freelist.  After yielding, the next
7891  // free block encountered will start a coalescing range of
7892  // free blocks.  If the next free block is adjacent to the
7893  // chunk just flushed, they will need to wait for the next
7894  // sweep to be coalesced.
7895  if (inFreeRange()) {
7896    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7897  }
7898
7899  // First give up the locks, then yield, then re-lock.
7900  // We should probably use a constructor/destructor idiom to
7901  // do this unlock/lock or modify the MutexUnlocker class to
7902  // serve our purpose. XXX
7903  assert_lock_strong(_bitMap->lock());
7904  assert_lock_strong(_freelistLock);
7905  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7906         "CMS thread should hold CMS token");
7907  _bitMap->lock()->unlock();
7908  _freelistLock->unlock();
7909  ConcurrentMarkSweepThread::desynchronize(true);
7910  _collector->stopTimer();
7911  if (PrintCMSStatistics != 0) {
7912    _collector->incrementYields();
7913  }
7914
7915  // See the comment in coordinator_yield()
7916  for (unsigned i = 0; i < CMSYieldSleepCount &&
7917                       ConcurrentMarkSweepThread::should_yield() &&
7918                       !CMSCollector::foregroundGCIsActive(); ++i) {
7919    os::sleep(Thread::current(), 1, false);
7920  }
7921
7922  ConcurrentMarkSweepThread::synchronize(true);
7923  _freelistLock->lock();
7924  _bitMap->lock()->lock_without_safepoint_check();
7925  _collector->startTimer();
7926}
7927
7928#ifndef PRODUCT
7929// This is actually very useful in a product build if it can
7930// be called from the debugger.  Compile it into the product
7931// as needed.
7932bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7933  return debug_cms_space->verify_chunk_in_free_list(fc);
7934}
7935#endif
7936
7937void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7938  if (CMSTraceSweeper) {
7939    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7940                           p2i(fc), fc->size());
7941  }
7942}
7943
7944// CMSIsAliveClosure
7945bool CMSIsAliveClosure::do_object_b(oop obj) {
7946  HeapWord* addr = (HeapWord*)obj;
7947  return addr != NULL &&
7948         (!_span.contains(addr) || _bit_map->isMarked(addr));
7949}
7950
7951
7952CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7953                      MemRegion span,
7954                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7955                      bool cpc):
7956  _collector(collector),
7957  _span(span),
7958  _bit_map(bit_map),
7959  _mark_stack(mark_stack),
7960  _concurrent_precleaning(cpc) {
7961  assert(!_span.is_empty(), "Empty span could spell trouble");
7962}
7963
7964
7965// CMSKeepAliveClosure: the serial version
7966void CMSKeepAliveClosure::do_oop(oop obj) {
7967  HeapWord* addr = (HeapWord*)obj;
7968  if (_span.contains(addr) &&
7969      !_bit_map->isMarked(addr)) {
7970    _bit_map->mark(addr);
7971    bool simulate_overflow = false;
7972    NOT_PRODUCT(
7973      if (CMSMarkStackOverflowALot &&
7974          _collector->simulate_overflow()) {
7975        // simulate a stack overflow
7976        simulate_overflow = true;
7977      }
7978    )
7979    if (simulate_overflow || !_mark_stack->push(obj)) {
7980      if (_concurrent_precleaning) {
7981        // We dirty the overflown object and let the remark
7982        // phase deal with it.
7983        assert(_collector->overflow_list_is_empty(), "Error");
7984        // In the case of object arrays, we need to dirty all of
7985        // the cards that the object spans. No locking or atomics
7986        // are needed since no one else can be mutating the mod union
7987        // table.
7988        if (obj->is_objArray()) {
7989          size_t sz = obj->size();
7990          HeapWord* end_card_addr =
7991            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7992          MemRegion redirty_range = MemRegion(addr, end_card_addr);
7993          assert(!redirty_range.is_empty(), "Arithmetical tautology");
7994          _collector->_modUnionTable.mark_range(redirty_range);
7995        } else {
7996          _collector->_modUnionTable.mark(addr);
7997        }
7998        _collector->_ser_kac_preclean_ovflw++;
7999      } else {
8000        _collector->push_on_overflow_list(obj);
8001        _collector->_ser_kac_ovflw++;
8002      }
8003    }
8004  }
8005}
8006
8007void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8008void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8009
8010// CMSParKeepAliveClosure: a parallel version of the above.
8011// The work queues are private to each closure (thread),
8012// but (may be) available for stealing by other threads.
8013void CMSParKeepAliveClosure::do_oop(oop obj) {
8014  HeapWord* addr = (HeapWord*)obj;
8015  if (_span.contains(addr) &&
8016      !_bit_map->isMarked(addr)) {
8017    // In general, during recursive tracing, several threads
8018    // may be concurrently getting here; the first one to
8019    // "tag" it, claims it.
8020    if (_bit_map->par_mark(addr)) {
8021      bool res = _work_queue->push(obj);
8022      assert(res, "Low water mark should be much less than capacity");
8023      // Do a recursive trim in the hope that this will keep
8024      // stack usage lower, but leave some oops for potential stealers
8025      trim_queue(_low_water_mark);
8026    } // Else, another thread got there first
8027  }
8028}
8029
8030void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8031void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8032
8033void CMSParKeepAliveClosure::trim_queue(uint max) {
8034  while (_work_queue->size() > max) {
8035    oop new_oop;
8036    if (_work_queue->pop_local(new_oop)) {
8037      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8038      assert(_bit_map->isMarked((HeapWord*)new_oop),
8039             "no white objects on this stack!");
8040      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8041      // iterate over the oops in this oop, marking and pushing
8042      // the ones in CMS heap (i.e. in _span).
8043      new_oop->oop_iterate(&_mark_and_push);
8044    }
8045  }
8046}
8047
8048CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8049                                CMSCollector* collector,
8050                                MemRegion span, CMSBitMap* bit_map,
8051                                OopTaskQueue* work_queue):
8052  _collector(collector),
8053  _span(span),
8054  _bit_map(bit_map),
8055  _work_queue(work_queue) { }
8056
8057void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8058  HeapWord* addr = (HeapWord*)obj;
8059  if (_span.contains(addr) &&
8060      !_bit_map->isMarked(addr)) {
8061    if (_bit_map->par_mark(addr)) {
8062      bool simulate_overflow = false;
8063      NOT_PRODUCT(
8064        if (CMSMarkStackOverflowALot &&
8065            _collector->par_simulate_overflow()) {
8066          // simulate a stack overflow
8067          simulate_overflow = true;
8068        }
8069      )
8070      if (simulate_overflow || !_work_queue->push(obj)) {
8071        _collector->par_push_on_overflow_list(obj);
8072        _collector->_par_kac_ovflw++;
8073      }
8074    } // Else another thread got there already
8075  }
8076}
8077
8078void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8079void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8080
8081//////////////////////////////////////////////////////////////////
8082//  CMSExpansionCause                /////////////////////////////
8083//////////////////////////////////////////////////////////////////
8084const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8085  switch (cause) {
8086    case _no_expansion:
8087      return "No expansion";
8088    case _satisfy_free_ratio:
8089      return "Free ratio";
8090    case _satisfy_promotion:
8091      return "Satisfy promotion";
8092    case _satisfy_allocation:
8093      return "allocation";
8094    case _allocate_par_lab:
8095      return "Par LAB";
8096    case _allocate_par_spooling_space:
8097      return "Par Spooling Space";
8098    case _adaptive_size_policy:
8099      return "Ergonomics";
8100    default:
8101      return "unknown";
8102  }
8103}
8104
8105void CMSDrainMarkingStackClosure::do_void() {
8106  // the max number to take from overflow list at a time
8107  const size_t num = _mark_stack->capacity()/4;
8108  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8109         "Overflow list should be NULL during concurrent phases");
8110  while (!_mark_stack->isEmpty() ||
8111         // if stack is empty, check the overflow list
8112         _collector->take_from_overflow_list(num, _mark_stack)) {
8113    oop obj = _mark_stack->pop();
8114    HeapWord* addr = (HeapWord*)obj;
8115    assert(_span.contains(addr), "Should be within span");
8116    assert(_bit_map->isMarked(addr), "Should be marked");
8117    assert(obj->is_oop(), "Should be an oop");
8118    obj->oop_iterate(_keep_alive);
8119  }
8120}
8121
8122void CMSParDrainMarkingStackClosure::do_void() {
8123  // drain queue
8124  trim_queue(0);
8125}
8126
8127// Trim our work_queue so its length is below max at return
8128void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8129  while (_work_queue->size() > max) {
8130    oop new_oop;
8131    if (_work_queue->pop_local(new_oop)) {
8132      assert(new_oop->is_oop(), "Expected an oop");
8133      assert(_bit_map->isMarked((HeapWord*)new_oop),
8134             "no white objects on this stack!");
8135      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8136      // iterate over the oops in this oop, marking and pushing
8137      // the ones in CMS heap (i.e. in _span).
8138      new_oop->oop_iterate(&_mark_and_push);
8139    }
8140  }
8141}
8142
8143////////////////////////////////////////////////////////////////////
8144// Support for Marking Stack Overflow list handling and related code
8145////////////////////////////////////////////////////////////////////
8146// Much of the following code is similar in shape and spirit to the
8147// code used in ParNewGC. We should try and share that code
8148// as much as possible in the future.
8149
8150#ifndef PRODUCT
8151// Debugging support for CMSStackOverflowALot
8152
8153// It's OK to call this multi-threaded;  the worst thing
8154// that can happen is that we'll get a bunch of closely
8155// spaced simulated overflows, but that's OK, in fact
8156// probably good as it would exercise the overflow code
8157// under contention.
8158bool CMSCollector::simulate_overflow() {
8159  if (_overflow_counter-- <= 0) { // just being defensive
8160    _overflow_counter = CMSMarkStackOverflowInterval;
8161    return true;
8162  } else {
8163    return false;
8164  }
8165}
8166
8167bool CMSCollector::par_simulate_overflow() {
8168  return simulate_overflow();
8169}
8170#endif
8171
8172// Single-threaded
8173bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8174  assert(stack->isEmpty(), "Expected precondition");
8175  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8176  size_t i = num;
8177  oop  cur = _overflow_list;
8178  const markOop proto = markOopDesc::prototype();
8179  NOT_PRODUCT(ssize_t n = 0;)
8180  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8181    next = oop(cur->mark());
8182    cur->set_mark(proto);   // until proven otherwise
8183    assert(cur->is_oop(), "Should be an oop");
8184    bool res = stack->push(cur);
8185    assert(res, "Bit off more than can chew?");
8186    NOT_PRODUCT(n++;)
8187  }
8188  _overflow_list = cur;
8189#ifndef PRODUCT
8190  assert(_num_par_pushes >= n, "Too many pops?");
8191  _num_par_pushes -=n;
8192#endif
8193  return !stack->isEmpty();
8194}
8195
8196#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8197// (MT-safe) Get a prefix of at most "num" from the list.
8198// The overflow list is chained through the mark word of
8199// each object in the list. We fetch the entire list,
8200// break off a prefix of the right size and return the
8201// remainder. If other threads try to take objects from
8202// the overflow list at that time, they will wait for
8203// some time to see if data becomes available. If (and
8204// only if) another thread places one or more object(s)
8205// on the global list before we have returned the suffix
8206// to the global list, we will walk down our local list
8207// to find its end and append the global list to
8208// our suffix before returning it. This suffix walk can
8209// prove to be expensive (quadratic in the amount of traffic)
8210// when there are many objects in the overflow list and
8211// there is much producer-consumer contention on the list.
8212// *NOTE*: The overflow list manipulation code here and
8213// in ParNewGeneration:: are very similar in shape,
8214// except that in the ParNew case we use the old (from/eden)
8215// copy of the object to thread the list via its klass word.
8216// Because of the common code, if you make any changes in
8217// the code below, please check the ParNew version to see if
8218// similar changes might be needed.
8219// CR 6797058 has been filed to consolidate the common code.
8220bool CMSCollector::par_take_from_overflow_list(size_t num,
8221                                               OopTaskQueue* work_q,
8222                                               int no_of_gc_threads) {
8223  assert(work_q->size() == 0, "First empty local work queue");
8224  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8225  if (_overflow_list == NULL) {
8226    return false;
8227  }
8228  // Grab the entire list; we'll put back a suffix
8229  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8230  Thread* tid = Thread::current();
8231  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8232  // set to ParallelGCThreads.
8233  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8234  size_t sleep_time_millis = MAX2((size_t)1, num/100);
8235  // If the list is busy, we spin for a short while,
8236  // sleeping between attempts to get the list.
8237  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8238    os::sleep(tid, sleep_time_millis, false);
8239    if (_overflow_list == NULL) {
8240      // Nothing left to take
8241      return false;
8242    } else if (_overflow_list != BUSY) {
8243      // Try and grab the prefix
8244      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8245    }
8246  }
8247  // If the list was found to be empty, or we spun long
8248  // enough, we give up and return empty-handed. If we leave
8249  // the list in the BUSY state below, it must be the case that
8250  // some other thread holds the overflow list and will set it
8251  // to a non-BUSY state in the future.
8252  if (prefix == NULL || prefix == BUSY) {
8253     // Nothing to take or waited long enough
8254     if (prefix == NULL) {
8255       // Write back the NULL in case we overwrote it with BUSY above
8256       // and it is still the same value.
8257       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8258     }
8259     return false;
8260  }
8261  assert(prefix != NULL && prefix != BUSY, "Error");
8262  size_t i = num;
8263  oop cur = prefix;
8264  // Walk down the first "num" objects, unless we reach the end.
8265  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8266  if (cur->mark() == NULL) {
8267    // We have "num" or fewer elements in the list, so there
8268    // is nothing to return to the global list.
8269    // Write back the NULL in lieu of the BUSY we wrote
8270    // above, if it is still the same value.
8271    if (_overflow_list == BUSY) {
8272      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8273    }
8274  } else {
8275    // Chop off the suffix and return it to the global list.
8276    assert(cur->mark() != BUSY, "Error");
8277    oop suffix_head = cur->mark(); // suffix will be put back on global list
8278    cur->set_mark(NULL);           // break off suffix
8279    // It's possible that the list is still in the empty(busy) state
8280    // we left it in a short while ago; in that case we may be
8281    // able to place back the suffix without incurring the cost
8282    // of a walk down the list.
8283    oop observed_overflow_list = _overflow_list;
8284    oop cur_overflow_list = observed_overflow_list;
8285    bool attached = false;
8286    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8287      observed_overflow_list =
8288        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8289      if (cur_overflow_list == observed_overflow_list) {
8290        attached = true;
8291        break;
8292      } else cur_overflow_list = observed_overflow_list;
8293    }
8294    if (!attached) {
8295      // Too bad, someone else sneaked in (at least) an element; we'll need
8296      // to do a splice. Find tail of suffix so we can prepend suffix to global
8297      // list.
8298      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8299      oop suffix_tail = cur;
8300      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8301             "Tautology");
8302      observed_overflow_list = _overflow_list;
8303      do {
8304        cur_overflow_list = observed_overflow_list;
8305        if (cur_overflow_list != BUSY) {
8306          // Do the splice ...
8307          suffix_tail->set_mark(markOop(cur_overflow_list));
8308        } else { // cur_overflow_list == BUSY
8309          suffix_tail->set_mark(NULL);
8310        }
8311        // ... and try to place spliced list back on overflow_list ...
8312        observed_overflow_list =
8313          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8314      } while (cur_overflow_list != observed_overflow_list);
8315      // ... until we have succeeded in doing so.
8316    }
8317  }
8318
8319  // Push the prefix elements on work_q
8320  assert(prefix != NULL, "control point invariant");
8321  const markOop proto = markOopDesc::prototype();
8322  oop next;
8323  NOT_PRODUCT(ssize_t n = 0;)
8324  for (cur = prefix; cur != NULL; cur = next) {
8325    next = oop(cur->mark());
8326    cur->set_mark(proto);   // until proven otherwise
8327    assert(cur->is_oop(), "Should be an oop");
8328    bool res = work_q->push(cur);
8329    assert(res, "Bit off more than we can chew?");
8330    NOT_PRODUCT(n++;)
8331  }
8332#ifndef PRODUCT
8333  assert(_num_par_pushes >= n, "Too many pops?");
8334  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8335#endif
8336  return true;
8337}
8338
8339// Single-threaded
8340void CMSCollector::push_on_overflow_list(oop p) {
8341  NOT_PRODUCT(_num_par_pushes++;)
8342  assert(p->is_oop(), "Not an oop");
8343  preserve_mark_if_necessary(p);
8344  p->set_mark((markOop)_overflow_list);
8345  _overflow_list = p;
8346}
8347
8348// Multi-threaded; use CAS to prepend to overflow list
8349void CMSCollector::par_push_on_overflow_list(oop p) {
8350  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8351  assert(p->is_oop(), "Not an oop");
8352  par_preserve_mark_if_necessary(p);
8353  oop observed_overflow_list = _overflow_list;
8354  oop cur_overflow_list;
8355  do {
8356    cur_overflow_list = observed_overflow_list;
8357    if (cur_overflow_list != BUSY) {
8358      p->set_mark(markOop(cur_overflow_list));
8359    } else {
8360      p->set_mark(NULL);
8361    }
8362    observed_overflow_list =
8363      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8364  } while (cur_overflow_list != observed_overflow_list);
8365}
8366#undef BUSY
8367
8368// Single threaded
8369// General Note on GrowableArray: pushes may silently fail
8370// because we are (temporarily) out of C-heap for expanding
8371// the stack. The problem is quite ubiquitous and affects
8372// a lot of code in the JVM. The prudent thing for GrowableArray
8373// to do (for now) is to exit with an error. However, that may
8374// be too draconian in some cases because the caller may be
8375// able to recover without much harm. For such cases, we
8376// should probably introduce a "soft_push" method which returns
8377// an indication of success or failure with the assumption that
8378// the caller may be able to recover from a failure; code in
8379// the VM can then be changed, incrementally, to deal with such
8380// failures where possible, thus, incrementally hardening the VM
8381// in such low resource situations.
8382void CMSCollector::preserve_mark_work(oop p, markOop m) {
8383  _preserved_oop_stack.push(p);
8384  _preserved_mark_stack.push(m);
8385  assert(m == p->mark(), "Mark word changed");
8386  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8387         "bijection");
8388}
8389
8390// Single threaded
8391void CMSCollector::preserve_mark_if_necessary(oop p) {
8392  markOop m = p->mark();
8393  if (m->must_be_preserved(p)) {
8394    preserve_mark_work(p, m);
8395  }
8396}
8397
8398void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8399  markOop m = p->mark();
8400  if (m->must_be_preserved(p)) {
8401    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8402    // Even though we read the mark word without holding
8403    // the lock, we are assured that it will not change
8404    // because we "own" this oop, so no other thread can
8405    // be trying to push it on the overflow list; see
8406    // the assertion in preserve_mark_work() that checks
8407    // that m == p->mark().
8408    preserve_mark_work(p, m);
8409  }
8410}
8411
8412// We should be able to do this multi-threaded,
8413// a chunk of stack being a task (this is
8414// correct because each oop only ever appears
8415// once in the overflow list. However, it's
8416// not very easy to completely overlap this with
8417// other operations, so will generally not be done
8418// until all work's been completed. Because we
8419// expect the preserved oop stack (set) to be small,
8420// it's probably fine to do this single-threaded.
8421// We can explore cleverer concurrent/overlapped/parallel
8422// processing of preserved marks if we feel the
8423// need for this in the future. Stack overflow should
8424// be so rare in practice and, when it happens, its
8425// effect on performance so great that this will
8426// likely just be in the noise anyway.
8427void CMSCollector::restore_preserved_marks_if_any() {
8428  assert(SafepointSynchronize::is_at_safepoint(),
8429         "world should be stopped");
8430  assert(Thread::current()->is_ConcurrentGC_thread() ||
8431         Thread::current()->is_VM_thread(),
8432         "should be single-threaded");
8433  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8434         "bijection");
8435
8436  while (!_preserved_oop_stack.is_empty()) {
8437    oop p = _preserved_oop_stack.pop();
8438    assert(p->is_oop(), "Should be an oop");
8439    assert(_span.contains(p), "oop should be in _span");
8440    assert(p->mark() == markOopDesc::prototype(),
8441           "Set when taken from overflow list");
8442    markOop m = _preserved_mark_stack.pop();
8443    p->set_mark(m);
8444  }
8445  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8446         "stacks were cleared above");
8447}
8448
8449#ifndef PRODUCT
8450bool CMSCollector::no_preserved_marks() const {
8451  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8452}
8453#endif
8454
8455// Transfer some number of overflown objects to usual marking
8456// stack. Return true if some objects were transferred.
8457bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8458  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8459                    (size_t)ParGCDesiredObjsFromOverflowList);
8460
8461  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8462  assert(_collector->overflow_list_is_empty() || res,
8463         "If list is not empty, we should have taken something");
8464  assert(!res || !_mark_stack->isEmpty(),
8465         "If we took something, it should now be on our stack");
8466  return res;
8467}
8468
8469size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8470  size_t res = _sp->block_size_no_stall(addr, _collector);
8471  if (_sp->block_is_obj(addr)) {
8472    if (_live_bit_map->isMarked(addr)) {
8473      // It can't have been dead in a previous cycle
8474      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8475    } else {
8476      _dead_bit_map->mark(addr);      // mark the dead object
8477    }
8478  }
8479  // Could be 0, if the block size could not be computed without stalling.
8480  return res;
8481}
8482
8483TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8484
8485  switch (phase) {
8486    case CMSCollector::InitialMarking:
8487      initialize(true  /* fullGC */ ,
8488                 cause /* cause of the GC */,
8489                 true  /* recordGCBeginTime */,
8490                 true  /* recordPreGCUsage */,
8491                 false /* recordPeakUsage */,
8492                 false /* recordPostGCusage */,
8493                 true  /* recordAccumulatedGCTime */,
8494                 false /* recordGCEndTime */,
8495                 false /* countCollection */  );
8496      break;
8497
8498    case CMSCollector::FinalMarking:
8499      initialize(true  /* fullGC */ ,
8500                 cause /* cause of the GC */,
8501                 false /* recordGCBeginTime */,
8502                 false /* recordPreGCUsage */,
8503                 false /* recordPeakUsage */,
8504                 false /* recordPostGCusage */,
8505                 true  /* recordAccumulatedGCTime */,
8506                 false /* recordGCEndTime */,
8507                 false /* countCollection */  );
8508      break;
8509
8510    case CMSCollector::Sweeping:
8511      initialize(true  /* fullGC */ ,
8512                 cause /* cause of the GC */,
8513                 false /* recordGCBeginTime */,
8514                 false /* recordPreGCUsage */,
8515                 true  /* recordPeakUsage */,
8516                 true  /* recordPostGCusage */,
8517                 false /* recordAccumulatedGCTime */,
8518                 true  /* recordGCEndTime */,
8519                 true  /* countCollection */  );
8520      break;
8521
8522    default:
8523      ShouldNotReachHere();
8524  }
8525}
8526