concurrentMarkSweepGeneration.cpp revision 8848:90861a3150d0
1/*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/classLoaderData.hpp"
27#include "classfile/stringTable.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "code/codeCache.hpp"
30#include "gc/cms/cmsCollectorPolicy.hpp"
31#include "gc/cms/cmsOopClosures.inline.hpp"
32#include "gc/cms/compactibleFreeListSpace.hpp"
33#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
34#include "gc/cms/concurrentMarkSweepThread.hpp"
35#include "gc/cms/parNewGeneration.hpp"
36#include "gc/cms/vmCMSOperations.hpp"
37#include "gc/serial/genMarkSweep.hpp"
38#include "gc/serial/tenuredGeneration.hpp"
39#include "gc/shared/adaptiveSizePolicy.hpp"
40#include "gc/shared/cardGeneration.inline.hpp"
41#include "gc/shared/cardTableRS.hpp"
42#include "gc/shared/collectedHeap.inline.hpp"
43#include "gc/shared/collectorCounters.hpp"
44#include "gc/shared/collectorPolicy.hpp"
45#include "gc/shared/gcLocker.inline.hpp"
46#include "gc/shared/gcPolicyCounters.hpp"
47#include "gc/shared/gcTimer.hpp"
48#include "gc/shared/gcTrace.hpp"
49#include "gc/shared/gcTraceTime.hpp"
50#include "gc/shared/genCollectedHeap.hpp"
51#include "gc/shared/genOopClosures.inline.hpp"
52#include "gc/shared/isGCActiveMark.hpp"
53#include "gc/shared/referencePolicy.hpp"
54#include "gc/shared/strongRootsScope.hpp"
55#include "gc/shared/taskqueue.inline.hpp"
56#include "memory/allocation.hpp"
57#include "memory/iterator.inline.hpp"
58#include "memory/padded.hpp"
59#include "memory/resourceArea.hpp"
60#include "oops/oop.inline.hpp"
61#include "prims/jvmtiExport.hpp"
62#include "runtime/atomic.inline.hpp"
63#include "runtime/globals_extension.hpp"
64#include "runtime/handles.inline.hpp"
65#include "runtime/java.hpp"
66#include "runtime/orderAccess.inline.hpp"
67#include "runtime/vmThread.hpp"
68#include "services/memoryService.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/stack.inline.hpp"
71
72// statics
73CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
74bool CMSCollector::_full_gc_requested = false;
75GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
76
77//////////////////////////////////////////////////////////////////
78// In support of CMS/VM thread synchronization
79//////////////////////////////////////////////////////////////////
80// We split use of the CGC_lock into 2 "levels".
81// The low-level locking is of the usual CGC_lock monitor. We introduce
82// a higher level "token" (hereafter "CMS token") built on top of the
83// low level monitor (hereafter "CGC lock").
84// The token-passing protocol gives priority to the VM thread. The
85// CMS-lock doesn't provide any fairness guarantees, but clients
86// should ensure that it is only held for very short, bounded
87// durations.
88//
89// When either of the CMS thread or the VM thread is involved in
90// collection operations during which it does not want the other
91// thread to interfere, it obtains the CMS token.
92//
93// If either thread tries to get the token while the other has
94// it, that thread waits. However, if the VM thread and CMS thread
95// both want the token, then the VM thread gets priority while the
96// CMS thread waits. This ensures, for instance, that the "concurrent"
97// phases of the CMS thread's work do not block out the VM thread
98// for long periods of time as the CMS thread continues to hog
99// the token. (See bug 4616232).
100//
101// The baton-passing functions are, however, controlled by the
102// flags _foregroundGCShouldWait and _foregroundGCIsActive,
103// and here the low-level CMS lock, not the high level token,
104// ensures mutual exclusion.
105//
106// Two important conditions that we have to satisfy:
107// 1. if a thread does a low-level wait on the CMS lock, then it
108//    relinquishes the CMS token if it were holding that token
109//    when it acquired the low-level CMS lock.
110// 2. any low-level notifications on the low-level lock
111//    should only be sent when a thread has relinquished the token.
112//
113// In the absence of either property, we'd have potential deadlock.
114//
115// We protect each of the CMS (concurrent and sequential) phases
116// with the CMS _token_, not the CMS _lock_.
117//
118// The only code protected by CMS lock is the token acquisition code
119// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
120// baton-passing code.
121//
122// Unfortunately, i couldn't come up with a good abstraction to factor and
123// hide the naked CGC_lock manipulation in the baton-passing code
124// further below. That's something we should try to do. Also, the proof
125// of correctness of this 2-level locking scheme is far from obvious,
126// and potentially quite slippery. We have an uneasy suspicion, for instance,
127// that there may be a theoretical possibility of delay/starvation in the
128// low-level lock/wait/notify scheme used for the baton-passing because of
129// potential interference with the priority scheme embodied in the
130// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
131// invocation further below and marked with "XXX 20011219YSR".
132// Indeed, as we note elsewhere, this may become yet more slippery
133// in the presence of multiple CMS and/or multiple VM threads. XXX
134
135class CMSTokenSync: public StackObj {
136 private:
137  bool _is_cms_thread;
138 public:
139  CMSTokenSync(bool is_cms_thread):
140    _is_cms_thread(is_cms_thread) {
141    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
142           "Incorrect argument to constructor");
143    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
144  }
145
146  ~CMSTokenSync() {
147    assert(_is_cms_thread ?
148             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
149             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
150          "Incorrect state");
151    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
152  }
153};
154
155// Convenience class that does a CMSTokenSync, and then acquires
156// upto three locks.
157class CMSTokenSyncWithLocks: public CMSTokenSync {
158 private:
159  // Note: locks are acquired in textual declaration order
160  // and released in the opposite order
161  MutexLockerEx _locker1, _locker2, _locker3;
162 public:
163  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
164                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
165    CMSTokenSync(is_cms_thread),
166    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
167    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
168    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
169  { }
170};
171
172
173//////////////////////////////////////////////////////////////////
174//  Concurrent Mark-Sweep Generation /////////////////////////////
175//////////////////////////////////////////////////////////////////
176
177NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
178
179// This struct contains per-thread things necessary to support parallel
180// young-gen collection.
181class CMSParGCThreadState: public CHeapObj<mtGC> {
182 public:
183  CFLS_LAB lab;
184  PromotionInfo promo;
185
186  // Constructor.
187  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
188    promo.setSpace(cfls);
189  }
190};
191
192ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
193     ReservedSpace rs, size_t initial_byte_size,
194     CardTableRS* ct, bool use_adaptive_freelists,
195     FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
196  CardGeneration(rs, initial_byte_size, ct),
197  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
198  _did_compact(false)
199{
200  HeapWord* bottom = (HeapWord*) _virtual_space.low();
201  HeapWord* end    = (HeapWord*) _virtual_space.high();
202
203  _direct_allocated_words = 0;
204  NOT_PRODUCT(
205    _numObjectsPromoted = 0;
206    _numWordsPromoted = 0;
207    _numObjectsAllocated = 0;
208    _numWordsAllocated = 0;
209  )
210
211  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
212                                           use_adaptive_freelists,
213                                           dictionaryChoice);
214  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
215  _cmsSpace->_gen = this;
216
217  _gc_stats = new CMSGCStats();
218
219  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
220  // offsets match. The ability to tell free chunks from objects
221  // depends on this property.
222  debug_only(
223    FreeChunk* junk = NULL;
224    assert(UseCompressedClassPointers ||
225           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
226           "Offset of FreeChunk::_prev within FreeChunk must match"
227           "  that of OopDesc::_klass within OopDesc");
228  )
229
230  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
231  for (uint i = 0; i < ParallelGCThreads; i++) {
232    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
233  }
234
235  _incremental_collection_failed = false;
236  // The "dilatation_factor" is the expansion that can occur on
237  // account of the fact that the minimum object size in the CMS
238  // generation may be larger than that in, say, a contiguous young
239  //  generation.
240  // Ideally, in the calculation below, we'd compute the dilatation
241  // factor as: MinChunkSize/(promoting_gen's min object size)
242  // Since we do not have such a general query interface for the
243  // promoting generation, we'll instead just use the minimum
244  // object size (which today is a header's worth of space);
245  // note that all arithmetic is in units of HeapWords.
246  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
247  assert(_dilatation_factor >= 1.0, "from previous assert");
248}
249
250
251// The field "_initiating_occupancy" represents the occupancy percentage
252// at which we trigger a new collection cycle.  Unless explicitly specified
253// via CMSInitiatingOccupancyFraction (argument "io" below), it
254// is calculated by:
255//
256//   Let "f" be MinHeapFreeRatio in
257//
258//    _initiating_occupancy = 100-f +
259//                           f * (CMSTriggerRatio/100)
260//   where CMSTriggerRatio is the argument "tr" below.
261//
262// That is, if we assume the heap is at its desired maximum occupancy at the
263// end of a collection, we let CMSTriggerRatio of the (purported) free
264// space be allocated before initiating a new collection cycle.
265//
266void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
267  assert(io <= 100 && tr <= 100, "Check the arguments");
268  if (io >= 0) {
269    _initiating_occupancy = (double)io / 100.0;
270  } else {
271    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
272                             (double)(tr * MinHeapFreeRatio) / 100.0)
273                            / 100.0;
274  }
275}
276
277void ConcurrentMarkSweepGeneration::ref_processor_init() {
278  assert(collector() != NULL, "no collector");
279  collector()->ref_processor_init();
280}
281
282void CMSCollector::ref_processor_init() {
283  if (_ref_processor == NULL) {
284    // Allocate and initialize a reference processor
285    _ref_processor =
286      new ReferenceProcessor(_span,                               // span
287                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
288                             ParallelGCThreads,                   // mt processing degree
289                             _cmsGen->refs_discovery_is_mt(),     // mt discovery
290                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
291                             _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
292                             &_is_alive_closure);                 // closure for liveness info
293    // Initialize the _ref_processor field of CMSGen
294    _cmsGen->set_ref_processor(_ref_processor);
295
296  }
297}
298
299AdaptiveSizePolicy* CMSCollector::size_policy() {
300  GenCollectedHeap* gch = GenCollectedHeap::heap();
301  return gch->gen_policy()->size_policy();
302}
303
304void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
305
306  const char* gen_name = "old";
307  GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
308
309  // Generation Counters - generation 1, 1 subspace
310  _gen_counters = new GenerationCounters(gen_name, 1, 1,
311      gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
312
313  _space_counters = new GSpaceCounters(gen_name, 0,
314                                       _virtual_space.reserved_size(),
315                                       this, _gen_counters);
316}
317
318CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
319  _cms_gen(cms_gen)
320{
321  assert(alpha <= 100, "bad value");
322  _saved_alpha = alpha;
323
324  // Initialize the alphas to the bootstrap value of 100.
325  _gc0_alpha = _cms_alpha = 100;
326
327  _cms_begin_time.update();
328  _cms_end_time.update();
329
330  _gc0_duration = 0.0;
331  _gc0_period = 0.0;
332  _gc0_promoted = 0;
333
334  _cms_duration = 0.0;
335  _cms_period = 0.0;
336  _cms_allocated = 0;
337
338  _cms_used_at_gc0_begin = 0;
339  _cms_used_at_gc0_end = 0;
340  _allow_duty_cycle_reduction = false;
341  _valid_bits = 0;
342}
343
344double CMSStats::cms_free_adjustment_factor(size_t free) const {
345  // TBD: CR 6909490
346  return 1.0;
347}
348
349void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
350}
351
352// If promotion failure handling is on use
353// the padded average size of the promotion for each
354// young generation collection.
355double CMSStats::time_until_cms_gen_full() const {
356  size_t cms_free = _cms_gen->cmsSpace()->free();
357  GenCollectedHeap* gch = GenCollectedHeap::heap();
358  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
359                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
360  if (cms_free > expected_promotion) {
361    // Start a cms collection if there isn't enough space to promote
362    // for the next minor collection.  Use the padded average as
363    // a safety factor.
364    cms_free -= expected_promotion;
365
366    // Adjust by the safety factor.
367    double cms_free_dbl = (double)cms_free;
368    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
369    // Apply a further correction factor which tries to adjust
370    // for recent occurance of concurrent mode failures.
371    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
372    cms_free_dbl = cms_free_dbl * cms_adjustment;
373
374    if (PrintGCDetails && Verbose) {
375      gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
376        SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
377        cms_free, expected_promotion);
378      gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
379        cms_free_dbl, cms_consumption_rate() + 1.0);
380    }
381    // Add 1 in case the consumption rate goes to zero.
382    return cms_free_dbl / (cms_consumption_rate() + 1.0);
383  }
384  return 0.0;
385}
386
387// Compare the duration of the cms collection to the
388// time remaining before the cms generation is empty.
389// Note that the time from the start of the cms collection
390// to the start of the cms sweep (less than the total
391// duration of the cms collection) can be used.  This
392// has been tried and some applications experienced
393// promotion failures early in execution.  This was
394// possibly because the averages were not accurate
395// enough at the beginning.
396double CMSStats::time_until_cms_start() const {
397  // We add "gc0_period" to the "work" calculation
398  // below because this query is done (mostly) at the
399  // end of a scavenge, so we need to conservatively
400  // account for that much possible delay
401  // in the query so as to avoid concurrent mode failures
402  // due to starting the collection just a wee bit too
403  // late.
404  double work = cms_duration() + gc0_period();
405  double deadline = time_until_cms_gen_full();
406  // If a concurrent mode failure occurred recently, we want to be
407  // more conservative and halve our expected time_until_cms_gen_full()
408  if (work > deadline) {
409    if (Verbose && PrintGCDetails) {
410      gclog_or_tty->print(
411        " CMSCollector: collect because of anticipated promotion "
412        "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
413        gc0_period(), time_until_cms_gen_full());
414    }
415    return 0.0;
416  }
417  return work - deadline;
418}
419
420#ifndef PRODUCT
421void CMSStats::print_on(outputStream *st) const {
422  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
423  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
424               gc0_duration(), gc0_period(), gc0_promoted());
425  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
426            cms_duration(), cms_period(), cms_allocated());
427  st->print(",cms_since_beg=%g,cms_since_end=%g",
428            cms_time_since_begin(), cms_time_since_end());
429  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
430            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
431
432  if (valid()) {
433    st->print(",promo_rate=%g,cms_alloc_rate=%g",
434              promotion_rate(), cms_allocation_rate());
435    st->print(",cms_consumption_rate=%g,time_until_full=%g",
436              cms_consumption_rate(), time_until_cms_gen_full());
437  }
438  st->print(" ");
439}
440#endif // #ifndef PRODUCT
441
442CMSCollector::CollectorState CMSCollector::_collectorState =
443                             CMSCollector::Idling;
444bool CMSCollector::_foregroundGCIsActive = false;
445bool CMSCollector::_foregroundGCShouldWait = false;
446
447CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
448                           CardTableRS*                   ct,
449                           ConcurrentMarkSweepPolicy*     cp):
450  _cmsGen(cmsGen),
451  _ct(ct),
452  _ref_processor(NULL),    // will be set later
453  _conc_workers(NULL),     // may be set later
454  _abort_preclean(false),
455  _start_sampling(false),
456  _between_prologue_and_epilogue(false),
457  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
458  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
459                 -1 /* lock-free */, "No_lock" /* dummy */),
460  _modUnionClosurePar(&_modUnionTable),
461  // Adjust my span to cover old (cms) gen
462  _span(cmsGen->reserved()),
463  // Construct the is_alive_closure with _span & markBitMap
464  _is_alive_closure(_span, &_markBitMap),
465  _restart_addr(NULL),
466  _overflow_list(NULL),
467  _stats(cmsGen),
468  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
469                             //verify that this lock should be acquired with safepoint check.
470                             Monitor::_safepoint_check_sometimes)),
471  _eden_chunk_array(NULL),     // may be set in ctor body
472  _eden_chunk_capacity(0),     // -- ditto --
473  _eden_chunk_index(0),        // -- ditto --
474  _survivor_plab_array(NULL),  // -- ditto --
475  _survivor_chunk_array(NULL), // -- ditto --
476  _survivor_chunk_capacity(0), // -- ditto --
477  _survivor_chunk_index(0),    // -- ditto --
478  _ser_pmc_preclean_ovflw(0),
479  _ser_kac_preclean_ovflw(0),
480  _ser_pmc_remark_ovflw(0),
481  _par_pmc_remark_ovflw(0),
482  _ser_kac_ovflw(0),
483  _par_kac_ovflw(0),
484#ifndef PRODUCT
485  _num_par_pushes(0),
486#endif
487  _collection_count_start(0),
488  _verifying(false),
489  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
490  _completed_initialization(false),
491  _collector_policy(cp),
492  _should_unload_classes(CMSClassUnloadingEnabled),
493  _concurrent_cycles_since_last_unload(0),
494  _roots_scanning_options(GenCollectedHeap::SO_None),
495  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
496  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
497  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
498  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
499  _cms_start_registered(false)
500{
501  if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
502    ExplicitGCInvokesConcurrent = true;
503  }
504  // Now expand the span and allocate the collection support structures
505  // (MUT, marking bit map etc.) to cover both generations subject to
506  // collection.
507
508  // For use by dirty card to oop closures.
509  _cmsGen->cmsSpace()->set_collector(this);
510
511  // Allocate MUT and marking bit map
512  {
513    MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
514    if (!_markBitMap.allocate(_span)) {
515      warning("Failed to allocate CMS Bit Map");
516      return;
517    }
518    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
519  }
520  {
521    _modUnionTable.allocate(_span);
522    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
523  }
524
525  if (!_markStack.allocate(MarkStackSize)) {
526    warning("Failed to allocate CMS Marking Stack");
527    return;
528  }
529
530  // Support for multi-threaded concurrent phases
531  if (CMSConcurrentMTEnabled) {
532    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
533      // just for now
534      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
535    }
536    if (ConcGCThreads > 1) {
537      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
538                                 ConcGCThreads, true);
539      if (_conc_workers == NULL) {
540        warning("GC/CMS: _conc_workers allocation failure: "
541              "forcing -CMSConcurrentMTEnabled");
542        CMSConcurrentMTEnabled = false;
543      } else {
544        _conc_workers->initialize_workers();
545      }
546    } else {
547      CMSConcurrentMTEnabled = false;
548    }
549  }
550  if (!CMSConcurrentMTEnabled) {
551    ConcGCThreads = 0;
552  } else {
553    // Turn off CMSCleanOnEnter optimization temporarily for
554    // the MT case where it's not fixed yet; see 6178663.
555    CMSCleanOnEnter = false;
556  }
557  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
558         "Inconsistency");
559
560  // Parallel task queues; these are shared for the
561  // concurrent and stop-world phases of CMS, but
562  // are not shared with parallel scavenge (ParNew).
563  {
564    uint i;
565    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
566
567    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
568         || ParallelRefProcEnabled)
569        && num_queues > 0) {
570      _task_queues = new OopTaskQueueSet(num_queues);
571      if (_task_queues == NULL) {
572        warning("task_queues allocation failure.");
573        return;
574      }
575      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
576      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
577      for (i = 0; i < num_queues; i++) {
578        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
579        if (q == NULL) {
580          warning("work_queue allocation failure.");
581          return;
582        }
583        _task_queues->register_queue(i, q);
584      }
585      for (i = 0; i < num_queues; i++) {
586        _task_queues->queue(i)->initialize();
587        _hash_seed[i] = 17;  // copied from ParNew
588      }
589    }
590  }
591
592  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
593
594  // Clip CMSBootstrapOccupancy between 0 and 100.
595  _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
596
597  // Now tell CMS generations the identity of their collector
598  ConcurrentMarkSweepGeneration::set_collector(this);
599
600  // Create & start a CMS thread for this CMS collector
601  _cmsThread = ConcurrentMarkSweepThread::start(this);
602  assert(cmsThread() != NULL, "CMS Thread should have been created");
603  assert(cmsThread()->collector() == this,
604         "CMS Thread should refer to this gen");
605  assert(CGC_lock != NULL, "Where's the CGC_lock?");
606
607  // Support for parallelizing young gen rescan
608  GenCollectedHeap* gch = GenCollectedHeap::heap();
609  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
610  _young_gen = (ParNewGeneration*)gch->young_gen();
611  if (gch->supports_inline_contig_alloc()) {
612    _top_addr = gch->top_addr();
613    _end_addr = gch->end_addr();
614    assert(_young_gen != NULL, "no _young_gen");
615    _eden_chunk_index = 0;
616    _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
617    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
618  }
619
620  // Support for parallelizing survivor space rescan
621  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
622    const size_t max_plab_samples =
623      _young_gen->max_survivor_size() / (ThreadLocalAllocBuffer::min_size() * HeapWordSize);
624
625    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
626    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
627    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
628    _survivor_chunk_capacity = max_plab_samples;
629    for (uint i = 0; i < ParallelGCThreads; i++) {
630      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
631      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
632      assert(cur->end() == 0, "Should be 0");
633      assert(cur->array() == vec, "Should be vec");
634      assert(cur->capacity() == max_plab_samples, "Error");
635    }
636  }
637
638  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
639  _gc_counters = new CollectorCounters("CMS", 1);
640  _completed_initialization = true;
641  _inter_sweep_timer.start();  // start of time
642}
643
644const char* ConcurrentMarkSweepGeneration::name() const {
645  return "concurrent mark-sweep generation";
646}
647void ConcurrentMarkSweepGeneration::update_counters() {
648  if (UsePerfData) {
649    _space_counters->update_all();
650    _gen_counters->update_all();
651  }
652}
653
654// this is an optimized version of update_counters(). it takes the
655// used value as a parameter rather than computing it.
656//
657void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
658  if (UsePerfData) {
659    _space_counters->update_used(used);
660    _space_counters->update_capacity();
661    _gen_counters->update_all();
662  }
663}
664
665void ConcurrentMarkSweepGeneration::print() const {
666  Generation::print();
667  cmsSpace()->print();
668}
669
670#ifndef PRODUCT
671void ConcurrentMarkSweepGeneration::print_statistics() {
672  cmsSpace()->printFLCensus(0);
673}
674#endif
675
676void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
677  GenCollectedHeap* gch = GenCollectedHeap::heap();
678  if (PrintGCDetails) {
679    // I didn't want to change the logging when removing the level concept,
680    // but I guess this logging could say "old" or something instead of "1".
681    assert(gch->is_old_gen(this),
682           "The CMS generation should be the old generation");
683    uint level = 1;
684    if (Verbose) {
685      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
686        level, short_name(), s, used(), capacity());
687    } else {
688      gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
689        level, short_name(), s, used() / K, capacity() / K);
690    }
691  }
692  if (Verbose) {
693    gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
694              gch->used(), gch->capacity());
695  } else {
696    gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
697              gch->used() / K, gch->capacity() / K);
698  }
699}
700
701size_t
702ConcurrentMarkSweepGeneration::contiguous_available() const {
703  // dld proposes an improvement in precision here. If the committed
704  // part of the space ends in a free block we should add that to
705  // uncommitted size in the calculation below. Will make this
706  // change later, staying with the approximation below for the
707  // time being. -- ysr.
708  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
709}
710
711size_t
712ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
713  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
714}
715
716size_t ConcurrentMarkSweepGeneration::max_available() const {
717  return free() + _virtual_space.uncommitted_size();
718}
719
720bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
721  size_t available = max_available();
722  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
723  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
724  if (Verbose && PrintGCDetails) {
725    gclog_or_tty->print_cr(
726      "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
727      "max_promo(" SIZE_FORMAT ")",
728      res? "":" not", available, res? ">=":"<",
729      av_promo, max_promotion_in_bytes);
730  }
731  return res;
732}
733
734// At a promotion failure dump information on block layout in heap
735// (cms old generation).
736void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
737  if (CMSDumpAtPromotionFailure) {
738    cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
739  }
740}
741
742void ConcurrentMarkSweepGeneration::reset_after_compaction() {
743  // Clear the promotion information.  These pointers can be adjusted
744  // along with all the other pointers into the heap but
745  // compaction is expected to be a rare event with
746  // a heap using cms so don't do it without seeing the need.
747  for (uint i = 0; i < ParallelGCThreads; i++) {
748    _par_gc_thread_states[i]->promo.reset();
749  }
750}
751
752void ConcurrentMarkSweepGeneration::compute_new_size() {
753  assert_locked_or_safepoint(Heap_lock);
754
755  // If incremental collection failed, we just want to expand
756  // to the limit.
757  if (incremental_collection_failed()) {
758    clear_incremental_collection_failed();
759    grow_to_reserved();
760    return;
761  }
762
763  // The heap has been compacted but not reset yet.
764  // Any metric such as free() or used() will be incorrect.
765
766  CardGeneration::compute_new_size();
767
768  // Reset again after a possible resizing
769  if (did_compact()) {
770    cmsSpace()->reset_after_compaction();
771  }
772}
773
774void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
775  assert_locked_or_safepoint(Heap_lock);
776
777  // If incremental collection failed, we just want to expand
778  // to the limit.
779  if (incremental_collection_failed()) {
780    clear_incremental_collection_failed();
781    grow_to_reserved();
782    return;
783  }
784
785  double free_percentage = ((double) free()) / capacity();
786  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
787  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
788
789  // compute expansion delta needed for reaching desired free percentage
790  if (free_percentage < desired_free_percentage) {
791    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
792    assert(desired_capacity >= capacity(), "invalid expansion size");
793    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
794    if (PrintGCDetails && Verbose) {
795      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
796      gclog_or_tty->print_cr("\nFrom compute_new_size: ");
797      gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
798      gclog_or_tty->print_cr("  Desired free fraction %f",
799              desired_free_percentage);
800      gclog_or_tty->print_cr("  Maximum free fraction %f",
801              maximum_free_percentage);
802      gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity()/1000);
803      gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT,
804              desired_capacity/1000);
805      GenCollectedHeap* gch = GenCollectedHeap::heap();
806      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
807      size_t young_size = gch->young_gen()->capacity();
808      gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
809      gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT,
810              unsafe_max_alloc_nogc()/1000);
811      gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT,
812              contiguous_available()/1000);
813      gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)",
814              expand_bytes);
815    }
816    // safe if expansion fails
817    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
818    if (PrintGCDetails && Verbose) {
819      gclog_or_tty->print_cr("  Expanded free fraction %f",
820        ((double) free()) / capacity());
821    }
822  } else {
823    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
824    assert(desired_capacity <= capacity(), "invalid expansion size");
825    size_t shrink_bytes = capacity() - desired_capacity;
826    // Don't shrink unless the delta is greater than the minimum shrink we want
827    if (shrink_bytes >= MinHeapDeltaBytes) {
828      shrink_free_list_by(shrink_bytes);
829    }
830  }
831}
832
833Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
834  return cmsSpace()->freelistLock();
835}
836
837HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
838                                                  bool   tlab) {
839  CMSSynchronousYieldRequest yr;
840  MutexLockerEx x(freelistLock(),
841                  Mutex::_no_safepoint_check_flag);
842  return have_lock_and_allocate(size, tlab);
843}
844
845HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
846                                                  bool   tlab /* ignored */) {
847  assert_lock_strong(freelistLock());
848  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
849  HeapWord* res = cmsSpace()->allocate(adjustedSize);
850  // Allocate the object live (grey) if the background collector has
851  // started marking. This is necessary because the marker may
852  // have passed this address and consequently this object will
853  // not otherwise be greyed and would be incorrectly swept up.
854  // Note that if this object contains references, the writing
855  // of those references will dirty the card containing this object
856  // allowing the object to be blackened (and its references scanned)
857  // either during a preclean phase or at the final checkpoint.
858  if (res != NULL) {
859    // We may block here with an uninitialized object with
860    // its mark-bit or P-bits not yet set. Such objects need
861    // to be safely navigable by block_start().
862    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
863    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
864    collector()->direct_allocated(res, adjustedSize);
865    _direct_allocated_words += adjustedSize;
866    // allocation counters
867    NOT_PRODUCT(
868      _numObjectsAllocated++;
869      _numWordsAllocated += (int)adjustedSize;
870    )
871  }
872  return res;
873}
874
875// In the case of direct allocation by mutators in a generation that
876// is being concurrently collected, the object must be allocated
877// live (grey) if the background collector has started marking.
878// This is necessary because the marker may
879// have passed this address and consequently this object will
880// not otherwise be greyed and would be incorrectly swept up.
881// Note that if this object contains references, the writing
882// of those references will dirty the card containing this object
883// allowing the object to be blackened (and its references scanned)
884// either during a preclean phase or at the final checkpoint.
885void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
886  assert(_markBitMap.covers(start, size), "Out of bounds");
887  if (_collectorState >= Marking) {
888    MutexLockerEx y(_markBitMap.lock(),
889                    Mutex::_no_safepoint_check_flag);
890    // [see comments preceding SweepClosure::do_blk() below for details]
891    //
892    // Can the P-bits be deleted now?  JJJ
893    //
894    // 1. need to mark the object as live so it isn't collected
895    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
896    // 3. need to mark the end of the object so marking, precleaning or sweeping
897    //    can skip over uninitialized or unparsable objects. An allocated
898    //    object is considered uninitialized for our purposes as long as
899    //    its klass word is NULL.  All old gen objects are parsable
900    //    as soon as they are initialized.)
901    _markBitMap.mark(start);          // object is live
902    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
903    _markBitMap.mark(start + size - 1);
904                                      // mark end of object
905  }
906  // check that oop looks uninitialized
907  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
908}
909
910void CMSCollector::promoted(bool par, HeapWord* start,
911                            bool is_obj_array, size_t obj_size) {
912  assert(_markBitMap.covers(start), "Out of bounds");
913  // See comment in direct_allocated() about when objects should
914  // be allocated live.
915  if (_collectorState >= Marking) {
916    // we already hold the marking bit map lock, taken in
917    // the prologue
918    if (par) {
919      _markBitMap.par_mark(start);
920    } else {
921      _markBitMap.mark(start);
922    }
923    // We don't need to mark the object as uninitialized (as
924    // in direct_allocated above) because this is being done with the
925    // world stopped and the object will be initialized by the
926    // time the marking, precleaning or sweeping get to look at it.
927    // But see the code for copying objects into the CMS generation,
928    // where we need to ensure that concurrent readers of the
929    // block offset table are able to safely navigate a block that
930    // is in flux from being free to being allocated (and in
931    // transition while being copied into) and subsequently
932    // becoming a bona-fide object when the copy/promotion is complete.
933    assert(SafepointSynchronize::is_at_safepoint(),
934           "expect promotion only at safepoints");
935
936    if (_collectorState < Sweeping) {
937      // Mark the appropriate cards in the modUnionTable, so that
938      // this object gets scanned before the sweep. If this is
939      // not done, CMS generation references in the object might
940      // not get marked.
941      // For the case of arrays, which are otherwise precisely
942      // marked, we need to dirty the entire array, not just its head.
943      if (is_obj_array) {
944        // The [par_]mark_range() method expects mr.end() below to
945        // be aligned to the granularity of a bit's representation
946        // in the heap. In the case of the MUT below, that's a
947        // card size.
948        MemRegion mr(start,
949                     (HeapWord*)round_to((intptr_t)(start + obj_size),
950                        CardTableModRefBS::card_size /* bytes */));
951        if (par) {
952          _modUnionTable.par_mark_range(mr);
953        } else {
954          _modUnionTable.mark_range(mr);
955        }
956      } else {  // not an obj array; we can just mark the head
957        if (par) {
958          _modUnionTable.par_mark(start);
959        } else {
960          _modUnionTable.mark(start);
961        }
962      }
963    }
964  }
965}
966
967oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
968  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
969  // allocate, copy and if necessary update promoinfo --
970  // delegate to underlying space.
971  assert_lock_strong(freelistLock());
972
973#ifndef PRODUCT
974  if (GenCollectedHeap::heap()->promotion_should_fail()) {
975    return NULL;
976  }
977#endif  // #ifndef PRODUCT
978
979  oop res = _cmsSpace->promote(obj, obj_size);
980  if (res == NULL) {
981    // expand and retry
982    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
983    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
984    // Since this is the old generation, we don't try to promote
985    // into a more senior generation.
986    res = _cmsSpace->promote(obj, obj_size);
987  }
988  if (res != NULL) {
989    // See comment in allocate() about when objects should
990    // be allocated live.
991    assert(obj->is_oop(), "Will dereference klass pointer below");
992    collector()->promoted(false,           // Not parallel
993                          (HeapWord*)res, obj->is_objArray(), obj_size);
994    // promotion counters
995    NOT_PRODUCT(
996      _numObjectsPromoted++;
997      _numWordsPromoted +=
998        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
999    )
1000  }
1001  return res;
1002}
1003
1004
1005// IMPORTANT: Notes on object size recognition in CMS.
1006// ---------------------------------------------------
1007// A block of storage in the CMS generation is always in
1008// one of three states. A free block (FREE), an allocated
1009// object (OBJECT) whose size() method reports the correct size,
1010// and an intermediate state (TRANSIENT) in which its size cannot
1011// be accurately determined.
1012// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1013// -----------------------------------------------------
1014// FREE:      klass_word & 1 == 1; mark_word holds block size
1015//
1016// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1017//            obj->size() computes correct size
1018//
1019// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1020//
1021// STATE IDENTIFICATION: (64 bit+COOPS)
1022// ------------------------------------
1023// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1024//
1025// OBJECT:    klass_word installed; klass_word != 0;
1026//            obj->size() computes correct size
1027//
1028// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1029//
1030//
1031// STATE TRANSITION DIAGRAM
1032//
1033//        mut / parnew                     mut  /  parnew
1034// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1035//  ^                                                                   |
1036//  |------------------------ DEAD <------------------------------------|
1037//         sweep                            mut
1038//
1039// While a block is in TRANSIENT state its size cannot be determined
1040// so readers will either need to come back later or stall until
1041// the size can be determined. Note that for the case of direct
1042// allocation, P-bits, when available, may be used to determine the
1043// size of an object that may not yet have been initialized.
1044
1045// Things to support parallel young-gen collection.
1046oop
1047ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1048                                           oop old, markOop m,
1049                                           size_t word_sz) {
1050#ifndef PRODUCT
1051  if (GenCollectedHeap::heap()->promotion_should_fail()) {
1052    return NULL;
1053  }
1054#endif  // #ifndef PRODUCT
1055
1056  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1057  PromotionInfo* promoInfo = &ps->promo;
1058  // if we are tracking promotions, then first ensure space for
1059  // promotion (including spooling space for saving header if necessary).
1060  // then allocate and copy, then track promoted info if needed.
1061  // When tracking (see PromotionInfo::track()), the mark word may
1062  // be displaced and in this case restoration of the mark word
1063  // occurs in the (oop_since_save_marks_)iterate phase.
1064  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1065    // Out of space for allocating spooling buffers;
1066    // try expanding and allocating spooling buffers.
1067    if (!expand_and_ensure_spooling_space(promoInfo)) {
1068      return NULL;
1069    }
1070  }
1071  assert(promoInfo->has_spooling_space(), "Control point invariant");
1072  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1073  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1074  if (obj_ptr == NULL) {
1075     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1076     if (obj_ptr == NULL) {
1077       return NULL;
1078     }
1079  }
1080  oop obj = oop(obj_ptr);
1081  OrderAccess::storestore();
1082  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1083  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1084  // IMPORTANT: See note on object initialization for CMS above.
1085  // Otherwise, copy the object.  Here we must be careful to insert the
1086  // klass pointer last, since this marks the block as an allocated object.
1087  // Except with compressed oops it's the mark word.
1088  HeapWord* old_ptr = (HeapWord*)old;
1089  // Restore the mark word copied above.
1090  obj->set_mark(m);
1091  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1092  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1093  OrderAccess::storestore();
1094
1095  if (UseCompressedClassPointers) {
1096    // Copy gap missed by (aligned) header size calculation below
1097    obj->set_klass_gap(old->klass_gap());
1098  }
1099  if (word_sz > (size_t)oopDesc::header_size()) {
1100    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1101                                 obj_ptr + oopDesc::header_size(),
1102                                 word_sz - oopDesc::header_size());
1103  }
1104
1105  // Now we can track the promoted object, if necessary.  We take care
1106  // to delay the transition from uninitialized to full object
1107  // (i.e., insertion of klass pointer) until after, so that it
1108  // atomically becomes a promoted object.
1109  if (promoInfo->tracking()) {
1110    promoInfo->track((PromotedObject*)obj, old->klass());
1111  }
1112  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1113  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1114  assert(old->is_oop(), "Will use and dereference old klass ptr below");
1115
1116  // Finally, install the klass pointer (this should be volatile).
1117  OrderAccess::storestore();
1118  obj->set_klass(old->klass());
1119  // We should now be able to calculate the right size for this object
1120  assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1121
1122  collector()->promoted(true,          // parallel
1123                        obj_ptr, old->is_objArray(), word_sz);
1124
1125  NOT_PRODUCT(
1126    Atomic::inc_ptr(&_numObjectsPromoted);
1127    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1128  )
1129
1130  return obj;
1131}
1132
1133void
1134ConcurrentMarkSweepGeneration::
1135par_promote_alloc_done(int thread_num) {
1136  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1137  ps->lab.retire(thread_num);
1138}
1139
1140void
1141ConcurrentMarkSweepGeneration::
1142par_oop_since_save_marks_iterate_done(int thread_num) {
1143  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1144  ParScanWithoutBarrierClosure* dummy_cl = NULL;
1145  ps->promo.promoted_oops_iterate_nv(dummy_cl);
1146}
1147
1148bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1149                                                   size_t size,
1150                                                   bool   tlab)
1151{
1152  // We allow a STW collection only if a full
1153  // collection was requested.
1154  return full || should_allocate(size, tlab); // FIX ME !!!
1155  // This and promotion failure handling are connected at the
1156  // hip and should be fixed by untying them.
1157}
1158
1159bool CMSCollector::shouldConcurrentCollect() {
1160  if (_full_gc_requested) {
1161    if (Verbose && PrintGCDetails) {
1162      gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1163                             " gc request (or gc_locker)");
1164    }
1165    return true;
1166  }
1167
1168  FreelistLocker x(this);
1169  // ------------------------------------------------------------------
1170  // Print out lots of information which affects the initiation of
1171  // a collection.
1172  if (PrintCMSInitiationStatistics && stats().valid()) {
1173    gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1174    gclog_or_tty->stamp();
1175    gclog_or_tty->cr();
1176    stats().print_on(gclog_or_tty);
1177    gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1178      stats().time_until_cms_gen_full());
1179    gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
1180    gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
1181                           _cmsGen->contiguous_available());
1182    gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1183    gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1184    gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1185    gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1186    gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1187    gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1188    gclog_or_tty->print_cr("metadata initialized %d",
1189      MetaspaceGC::should_concurrent_collect());
1190  }
1191  // ------------------------------------------------------------------
1192
1193  // If the estimated time to complete a cms collection (cms_duration())
1194  // is less than the estimated time remaining until the cms generation
1195  // is full, start a collection.
1196  if (!UseCMSInitiatingOccupancyOnly) {
1197    if (stats().valid()) {
1198      if (stats().time_until_cms_start() == 0.0) {
1199        return true;
1200      }
1201    } else {
1202      // We want to conservatively collect somewhat early in order
1203      // to try and "bootstrap" our CMS/promotion statistics;
1204      // this branch will not fire after the first successful CMS
1205      // collection because the stats should then be valid.
1206      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1207        if (Verbose && PrintGCDetails) {
1208          gclog_or_tty->print_cr(
1209            " CMSCollector: collect for bootstrapping statistics:"
1210            " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1211            _bootstrap_occupancy);
1212        }
1213        return true;
1214      }
1215    }
1216  }
1217
1218  // Otherwise, we start a collection cycle if
1219  // old gen want a collection cycle started. Each may use
1220  // an appropriate criterion for making this decision.
1221  // XXX We need to make sure that the gen expansion
1222  // criterion dovetails well with this. XXX NEED TO FIX THIS
1223  if (_cmsGen->should_concurrent_collect()) {
1224    if (Verbose && PrintGCDetails) {
1225      gclog_or_tty->print_cr("CMS old gen initiated");
1226    }
1227    return true;
1228  }
1229
1230  // We start a collection if we believe an incremental collection may fail;
1231  // this is not likely to be productive in practice because it's probably too
1232  // late anyway.
1233  GenCollectedHeap* gch = GenCollectedHeap::heap();
1234  assert(gch->collector_policy()->is_generation_policy(),
1235         "You may want to check the correctness of the following");
1236  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1237    if (Verbose && PrintGCDetails) {
1238      gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1239    }
1240    return true;
1241  }
1242
1243  if (MetaspaceGC::should_concurrent_collect()) {
1244    if (Verbose && PrintGCDetails) {
1245      gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1246    }
1247    return true;
1248  }
1249
1250  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1251  if (CMSTriggerInterval >= 0) {
1252    if (CMSTriggerInterval == 0) {
1253      // Trigger always
1254      return true;
1255    }
1256
1257    // Check the CMS time since begin (we do not check the stats validity
1258    // as we want to be able to trigger the first CMS cycle as well)
1259    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1260      if (Verbose && PrintGCDetails) {
1261        if (stats().valid()) {
1262          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1263                                 stats().cms_time_since_begin());
1264        } else {
1265          gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1266        }
1267      }
1268      return true;
1269    }
1270  }
1271
1272  return false;
1273}
1274
1275void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1276
1277// Clear _expansion_cause fields of constituent generations
1278void CMSCollector::clear_expansion_cause() {
1279  _cmsGen->clear_expansion_cause();
1280}
1281
1282// We should be conservative in starting a collection cycle.  To
1283// start too eagerly runs the risk of collecting too often in the
1284// extreme.  To collect too rarely falls back on full collections,
1285// which works, even if not optimum in terms of concurrent work.
1286// As a work around for too eagerly collecting, use the flag
1287// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1288// giving the user an easily understandable way of controlling the
1289// collections.
1290// We want to start a new collection cycle if any of the following
1291// conditions hold:
1292// . our current occupancy exceeds the configured initiating occupancy
1293//   for this generation, or
1294// . we recently needed to expand this space and have not, since that
1295//   expansion, done a collection of this generation, or
1296// . the underlying space believes that it may be a good idea to initiate
1297//   a concurrent collection (this may be based on criteria such as the
1298//   following: the space uses linear allocation and linear allocation is
1299//   going to fail, or there is believed to be excessive fragmentation in
1300//   the generation, etc... or ...
1301// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1302//   the case of the old generation; see CR 6543076):
1303//   we may be approaching a point at which allocation requests may fail because
1304//   we will be out of sufficient free space given allocation rate estimates.]
1305bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1306
1307  assert_lock_strong(freelistLock());
1308  if (occupancy() > initiating_occupancy()) {
1309    if (PrintGCDetails && Verbose) {
1310      gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1311        short_name(), occupancy(), initiating_occupancy());
1312    }
1313    return true;
1314  }
1315  if (UseCMSInitiatingOccupancyOnly) {
1316    return false;
1317  }
1318  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1319    if (PrintGCDetails && Verbose) {
1320      gclog_or_tty->print(" %s: collect because expanded for allocation ",
1321        short_name());
1322    }
1323    return true;
1324  }
1325  if (_cmsSpace->should_concurrent_collect()) {
1326    if (PrintGCDetails && Verbose) {
1327      gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1328        short_name());
1329    }
1330    return true;
1331  }
1332  return false;
1333}
1334
1335void ConcurrentMarkSweepGeneration::collect(bool   full,
1336                                            bool   clear_all_soft_refs,
1337                                            size_t size,
1338                                            bool   tlab)
1339{
1340  collector()->collect(full, clear_all_soft_refs, size, tlab);
1341}
1342
1343void CMSCollector::collect(bool   full,
1344                           bool   clear_all_soft_refs,
1345                           size_t size,
1346                           bool   tlab)
1347{
1348  // The following "if" branch is present for defensive reasons.
1349  // In the current uses of this interface, it can be replaced with:
1350  // assert(!GC_locker.is_active(), "Can't be called otherwise");
1351  // But I am not placing that assert here to allow future
1352  // generality in invoking this interface.
1353  if (GC_locker::is_active()) {
1354    // A consistency test for GC_locker
1355    assert(GC_locker::needs_gc(), "Should have been set already");
1356    // Skip this foreground collection, instead
1357    // expanding the heap if necessary.
1358    // Need the free list locks for the call to free() in compute_new_size()
1359    compute_new_size();
1360    return;
1361  }
1362  acquire_control_and_collect(full, clear_all_soft_refs);
1363}
1364
1365void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1366  GenCollectedHeap* gch = GenCollectedHeap::heap();
1367  unsigned int gc_count = gch->total_full_collections();
1368  if (gc_count == full_gc_count) {
1369    MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1370    _full_gc_requested = true;
1371    _full_gc_cause = cause;
1372    CGC_lock->notify();   // nudge CMS thread
1373  } else {
1374    assert(gc_count > full_gc_count, "Error: causal loop");
1375  }
1376}
1377
1378bool CMSCollector::is_external_interruption() {
1379  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1380  return GCCause::is_user_requested_gc(cause) ||
1381         GCCause::is_serviceability_requested_gc(cause);
1382}
1383
1384void CMSCollector::report_concurrent_mode_interruption() {
1385  if (is_external_interruption()) {
1386    if (PrintGCDetails) {
1387      gclog_or_tty->print(" (concurrent mode interrupted)");
1388    }
1389  } else {
1390    if (PrintGCDetails) {
1391      gclog_or_tty->print(" (concurrent mode failure)");
1392    }
1393    _gc_tracer_cm->report_concurrent_mode_failure();
1394  }
1395}
1396
1397
1398// The foreground and background collectors need to coordinate in order
1399// to make sure that they do not mutually interfere with CMS collections.
1400// When a background collection is active,
1401// the foreground collector may need to take over (preempt) and
1402// synchronously complete an ongoing collection. Depending on the
1403// frequency of the background collections and the heap usage
1404// of the application, this preemption can be seldom or frequent.
1405// There are only certain
1406// points in the background collection that the "collection-baton"
1407// can be passed to the foreground collector.
1408//
1409// The foreground collector will wait for the baton before
1410// starting any part of the collection.  The foreground collector
1411// will only wait at one location.
1412//
1413// The background collector will yield the baton before starting a new
1414// phase of the collection (e.g., before initial marking, marking from roots,
1415// precleaning, final re-mark, sweep etc.)  This is normally done at the head
1416// of the loop which switches the phases. The background collector does some
1417// of the phases (initial mark, final re-mark) with the world stopped.
1418// Because of locking involved in stopping the world,
1419// the foreground collector should not block waiting for the background
1420// collector when it is doing a stop-the-world phase.  The background
1421// collector will yield the baton at an additional point just before
1422// it enters a stop-the-world phase.  Once the world is stopped, the
1423// background collector checks the phase of the collection.  If the
1424// phase has not changed, it proceeds with the collection.  If the
1425// phase has changed, it skips that phase of the collection.  See
1426// the comments on the use of the Heap_lock in collect_in_background().
1427//
1428// Variable used in baton passing.
1429//   _foregroundGCIsActive - Set to true by the foreground collector when
1430//      it wants the baton.  The foreground clears it when it has finished
1431//      the collection.
1432//   _foregroundGCShouldWait - Set to true by the background collector
1433//        when it is running.  The foreground collector waits while
1434//      _foregroundGCShouldWait is true.
1435//  CGC_lock - monitor used to protect access to the above variables
1436//      and to notify the foreground and background collectors.
1437//  _collectorState - current state of the CMS collection.
1438//
1439// The foreground collector
1440//   acquires the CGC_lock
1441//   sets _foregroundGCIsActive
1442//   waits on the CGC_lock for _foregroundGCShouldWait to be false
1443//     various locks acquired in preparation for the collection
1444//     are released so as not to block the background collector
1445//     that is in the midst of a collection
1446//   proceeds with the collection
1447//   clears _foregroundGCIsActive
1448//   returns
1449//
1450// The background collector in a loop iterating on the phases of the
1451//      collection
1452//   acquires the CGC_lock
1453//   sets _foregroundGCShouldWait
1454//   if _foregroundGCIsActive is set
1455//     clears _foregroundGCShouldWait, notifies _CGC_lock
1456//     waits on _CGC_lock for _foregroundGCIsActive to become false
1457//     and exits the loop.
1458//   otherwise
1459//     proceed with that phase of the collection
1460//     if the phase is a stop-the-world phase,
1461//       yield the baton once more just before enqueueing
1462//       the stop-world CMS operation (executed by the VM thread).
1463//   returns after all phases of the collection are done
1464//
1465
1466void CMSCollector::acquire_control_and_collect(bool full,
1467        bool clear_all_soft_refs) {
1468  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1469  assert(!Thread::current()->is_ConcurrentGC_thread(),
1470         "shouldn't try to acquire control from self!");
1471
1472  // Start the protocol for acquiring control of the
1473  // collection from the background collector (aka CMS thread).
1474  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1475         "VM thread should have CMS token");
1476  // Remember the possibly interrupted state of an ongoing
1477  // concurrent collection
1478  CollectorState first_state = _collectorState;
1479
1480  // Signal to a possibly ongoing concurrent collection that
1481  // we want to do a foreground collection.
1482  _foregroundGCIsActive = true;
1483
1484  // release locks and wait for a notify from the background collector
1485  // releasing the locks in only necessary for phases which
1486  // do yields to improve the granularity of the collection.
1487  assert_lock_strong(bitMapLock());
1488  // We need to lock the Free list lock for the space that we are
1489  // currently collecting.
1490  assert(haveFreelistLocks(), "Must be holding free list locks");
1491  bitMapLock()->unlock();
1492  releaseFreelistLocks();
1493  {
1494    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1495    if (_foregroundGCShouldWait) {
1496      // We are going to be waiting for action for the CMS thread;
1497      // it had better not be gone (for instance at shutdown)!
1498      assert(ConcurrentMarkSweepThread::cmst() != NULL,
1499             "CMS thread must be running");
1500      // Wait here until the background collector gives us the go-ahead
1501      ConcurrentMarkSweepThread::clear_CMS_flag(
1502        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1503      // Get a possibly blocked CMS thread going:
1504      //   Note that we set _foregroundGCIsActive true above,
1505      //   without protection of the CGC_lock.
1506      CGC_lock->notify();
1507      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1508             "Possible deadlock");
1509      while (_foregroundGCShouldWait) {
1510        // wait for notification
1511        CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1512        // Possibility of delay/starvation here, since CMS token does
1513        // not know to give priority to VM thread? Actually, i think
1514        // there wouldn't be any delay/starvation, but the proof of
1515        // that "fact" (?) appears non-trivial. XXX 20011219YSR
1516      }
1517      ConcurrentMarkSweepThread::set_CMS_flag(
1518        ConcurrentMarkSweepThread::CMS_vm_has_token);
1519    }
1520  }
1521  // The CMS_token is already held.  Get back the other locks.
1522  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1523         "VM thread should have CMS token");
1524  getFreelistLocks();
1525  bitMapLock()->lock_without_safepoint_check();
1526  if (TraceCMSState) {
1527    gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1528      INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1529    gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1530  }
1531
1532  // Inform cms gen if this was due to partial collection failing.
1533  // The CMS gen may use this fact to determine its expansion policy.
1534  GenCollectedHeap* gch = GenCollectedHeap::heap();
1535  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1536    assert(!_cmsGen->incremental_collection_failed(),
1537           "Should have been noticed, reacted to and cleared");
1538    _cmsGen->set_incremental_collection_failed();
1539  }
1540
1541  if (first_state > Idling) {
1542    report_concurrent_mode_interruption();
1543  }
1544
1545  set_did_compact(true);
1546
1547  // If the collection is being acquired from the background
1548  // collector, there may be references on the discovered
1549  // references lists.  Abandon those references, since some
1550  // of them may have become unreachable after concurrent
1551  // discovery; the STW compacting collector will redo discovery
1552  // more precisely, without being subject to floating garbage.
1553  // Leaving otherwise unreachable references in the discovered
1554  // lists would require special handling.
1555  ref_processor()->disable_discovery();
1556  ref_processor()->abandon_partial_discovery();
1557  ref_processor()->verify_no_references_recorded();
1558
1559  if (first_state > Idling) {
1560    save_heap_summary();
1561  }
1562
1563  do_compaction_work(clear_all_soft_refs);
1564
1565  // Has the GC time limit been exceeded?
1566  size_t max_eden_size = _young_gen->max_capacity() -
1567                         _young_gen->to()->capacity() -
1568                         _young_gen->from()->capacity();
1569  GCCause::Cause gc_cause = gch->gc_cause();
1570  size_policy()->check_gc_overhead_limit(_young_gen->used(),
1571                                         _young_gen->eden()->used(),
1572                                         _cmsGen->max_capacity(),
1573                                         max_eden_size,
1574                                         full,
1575                                         gc_cause,
1576                                         gch->collector_policy());
1577
1578  // Reset the expansion cause, now that we just completed
1579  // a collection cycle.
1580  clear_expansion_cause();
1581  _foregroundGCIsActive = false;
1582  return;
1583}
1584
1585// Resize the tenured generation
1586// after obtaining the free list locks for the
1587// two generations.
1588void CMSCollector::compute_new_size() {
1589  assert_locked_or_safepoint(Heap_lock);
1590  FreelistLocker z(this);
1591  MetaspaceGC::compute_new_size();
1592  _cmsGen->compute_new_size_free_list();
1593}
1594
1595// A work method used by the foreground collector to do
1596// a mark-sweep-compact.
1597void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1598  GenCollectedHeap* gch = GenCollectedHeap::heap();
1599
1600  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1601  gc_timer->register_gc_start();
1602
1603  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1604  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1605
1606  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1607
1608  // Temporarily widen the span of the weak reference processing to
1609  // the entire heap.
1610  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1611  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1612  // Temporarily, clear the "is_alive_non_header" field of the
1613  // reference processor.
1614  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1615  // Temporarily make reference _processing_ single threaded (non-MT).
1616  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1617  // Temporarily make refs discovery atomic
1618  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1619  // Temporarily make reference _discovery_ single threaded (non-MT)
1620  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1621
1622  ref_processor()->set_enqueuing_is_done(false);
1623  ref_processor()->enable_discovery();
1624  ref_processor()->setup_policy(clear_all_soft_refs);
1625  // If an asynchronous collection finishes, the _modUnionTable is
1626  // all clear.  If we are assuming the collection from an asynchronous
1627  // collection, clear the _modUnionTable.
1628  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1629    "_modUnionTable should be clear if the baton was not passed");
1630  _modUnionTable.clear_all();
1631  assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1632    "mod union for klasses should be clear if the baton was passed");
1633  _ct->klass_rem_set()->clear_mod_union();
1634
1635  // We must adjust the allocation statistics being maintained
1636  // in the free list space. We do so by reading and clearing
1637  // the sweep timer and updating the block flux rate estimates below.
1638  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1639  if (_inter_sweep_timer.is_active()) {
1640    _inter_sweep_timer.stop();
1641    // Note that we do not use this sample to update the _inter_sweep_estimate.
1642    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1643                                            _inter_sweep_estimate.padded_average(),
1644                                            _intra_sweep_estimate.padded_average());
1645  }
1646
1647  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1648  #ifdef ASSERT
1649    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1650    size_t free_size = cms_space->free();
1651    assert(free_size ==
1652           pointer_delta(cms_space->end(), cms_space->compaction_top())
1653           * HeapWordSize,
1654      "All the free space should be compacted into one chunk at top");
1655    assert(cms_space->dictionary()->total_chunk_size(
1656                                      debug_only(cms_space->freelistLock())) == 0 ||
1657           cms_space->totalSizeInIndexedFreeLists() == 0,
1658      "All the free space should be in a single chunk");
1659    size_t num = cms_space->totalCount();
1660    assert((free_size == 0 && num == 0) ||
1661           (free_size > 0  && (num == 1 || num == 2)),
1662         "There should be at most 2 free chunks after compaction");
1663  #endif // ASSERT
1664  _collectorState = Resetting;
1665  assert(_restart_addr == NULL,
1666         "Should have been NULL'd before baton was passed");
1667  reset(false /* == !concurrent */);
1668  _cmsGen->reset_after_compaction();
1669  _concurrent_cycles_since_last_unload = 0;
1670
1671  // Clear any data recorded in the PLAB chunk arrays.
1672  if (_survivor_plab_array != NULL) {
1673    reset_survivor_plab_arrays();
1674  }
1675
1676  // Adjust the per-size allocation stats for the next epoch.
1677  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1678  // Restart the "inter sweep timer" for the next epoch.
1679  _inter_sweep_timer.reset();
1680  _inter_sweep_timer.start();
1681
1682  gc_timer->register_gc_end();
1683
1684  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1685
1686  // For a mark-sweep-compact, compute_new_size() will be called
1687  // in the heap's do_collection() method.
1688}
1689
1690void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1691  ContiguousSpace* eden_space = _young_gen->eden();
1692  ContiguousSpace* from_space = _young_gen->from();
1693  ContiguousSpace* to_space   = _young_gen->to();
1694  // Eden
1695  if (_eden_chunk_array != NULL) {
1696    gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1697                           p2i(eden_space->bottom()), p2i(eden_space->top()),
1698                           p2i(eden_space->end()), eden_space->capacity());
1699    gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1700                           "_eden_chunk_capacity=" SIZE_FORMAT,
1701                           _eden_chunk_index, _eden_chunk_capacity);
1702    for (size_t i = 0; i < _eden_chunk_index; i++) {
1703      gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1704                             i, p2i(_eden_chunk_array[i]));
1705    }
1706  }
1707  // Survivor
1708  if (_survivor_chunk_array != NULL) {
1709    gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1710                           p2i(from_space->bottom()), p2i(from_space->top()),
1711                           p2i(from_space->end()), from_space->capacity());
1712    gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1713                           "_survivor_chunk_capacity=" SIZE_FORMAT,
1714                           _survivor_chunk_index, _survivor_chunk_capacity);
1715    for (size_t i = 0; i < _survivor_chunk_index; i++) {
1716      gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1717                             i, p2i(_survivor_chunk_array[i]));
1718    }
1719  }
1720}
1721
1722void CMSCollector::getFreelistLocks() const {
1723  // Get locks for all free lists in all generations that this
1724  // collector is responsible for
1725  _cmsGen->freelistLock()->lock_without_safepoint_check();
1726}
1727
1728void CMSCollector::releaseFreelistLocks() const {
1729  // Release locks for all free lists in all generations that this
1730  // collector is responsible for
1731  _cmsGen->freelistLock()->unlock();
1732}
1733
1734bool CMSCollector::haveFreelistLocks() const {
1735  // Check locks for all free lists in all generations that this
1736  // collector is responsible for
1737  assert_lock_strong(_cmsGen->freelistLock());
1738  PRODUCT_ONLY(ShouldNotReachHere());
1739  return true;
1740}
1741
1742// A utility class that is used by the CMS collector to
1743// temporarily "release" the foreground collector from its
1744// usual obligation to wait for the background collector to
1745// complete an ongoing phase before proceeding.
1746class ReleaseForegroundGC: public StackObj {
1747 private:
1748  CMSCollector* _c;
1749 public:
1750  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1751    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1752    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1753    // allow a potentially blocked foreground collector to proceed
1754    _c->_foregroundGCShouldWait = false;
1755    if (_c->_foregroundGCIsActive) {
1756      CGC_lock->notify();
1757    }
1758    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1759           "Possible deadlock");
1760  }
1761
1762  ~ReleaseForegroundGC() {
1763    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1764    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1765    _c->_foregroundGCShouldWait = true;
1766  }
1767};
1768
1769void CMSCollector::collect_in_background(GCCause::Cause cause) {
1770  assert(Thread::current()->is_ConcurrentGC_thread(),
1771    "A CMS asynchronous collection is only allowed on a CMS thread.");
1772
1773  GenCollectedHeap* gch = GenCollectedHeap::heap();
1774  {
1775    bool safepoint_check = Mutex::_no_safepoint_check_flag;
1776    MutexLockerEx hl(Heap_lock, safepoint_check);
1777    FreelistLocker fll(this);
1778    MutexLockerEx x(CGC_lock, safepoint_check);
1779    if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1780      // The foreground collector is active or we're
1781      // not using asynchronous collections.  Skip this
1782      // background collection.
1783      assert(!_foregroundGCShouldWait, "Should be clear");
1784      return;
1785    } else {
1786      assert(_collectorState == Idling, "Should be idling before start.");
1787      _collectorState = InitialMarking;
1788      register_gc_start(cause);
1789      // Reset the expansion cause, now that we are about to begin
1790      // a new cycle.
1791      clear_expansion_cause();
1792
1793      // Clear the MetaspaceGC flag since a concurrent collection
1794      // is starting but also clear it after the collection.
1795      MetaspaceGC::set_should_concurrent_collect(false);
1796    }
1797    // Decide if we want to enable class unloading as part of the
1798    // ensuing concurrent GC cycle.
1799    update_should_unload_classes();
1800    _full_gc_requested = false;           // acks all outstanding full gc requests
1801    _full_gc_cause = GCCause::_no_gc;
1802    // Signal that we are about to start a collection
1803    gch->increment_total_full_collections();  // ... starting a collection cycle
1804    _collection_count_start = gch->total_full_collections();
1805  }
1806
1807  // Used for PrintGC
1808  size_t prev_used;
1809  if (PrintGC && Verbose) {
1810    prev_used = _cmsGen->used();
1811  }
1812
1813  // The change of the collection state is normally done at this level;
1814  // the exceptions are phases that are executed while the world is
1815  // stopped.  For those phases the change of state is done while the
1816  // world is stopped.  For baton passing purposes this allows the
1817  // background collector to finish the phase and change state atomically.
1818  // The foreground collector cannot wait on a phase that is done
1819  // while the world is stopped because the foreground collector already
1820  // has the world stopped and would deadlock.
1821  while (_collectorState != Idling) {
1822    if (TraceCMSState) {
1823      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1824        p2i(Thread::current()), _collectorState);
1825    }
1826    // The foreground collector
1827    //   holds the Heap_lock throughout its collection.
1828    //   holds the CMS token (but not the lock)
1829    //     except while it is waiting for the background collector to yield.
1830    //
1831    // The foreground collector should be blocked (not for long)
1832    //   if the background collector is about to start a phase
1833    //   executed with world stopped.  If the background
1834    //   collector has already started such a phase, the
1835    //   foreground collector is blocked waiting for the
1836    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1837    //   are executed in the VM thread.
1838    //
1839    // The locking order is
1840    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1841    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1842    //   CMS token  (claimed in
1843    //                stop_world_and_do() -->
1844    //                  safepoint_synchronize() -->
1845    //                    CMSThread::synchronize())
1846
1847    {
1848      // Check if the FG collector wants us to yield.
1849      CMSTokenSync x(true); // is cms thread
1850      if (waitForForegroundGC()) {
1851        // We yielded to a foreground GC, nothing more to be
1852        // done this round.
1853        assert(_foregroundGCShouldWait == false, "We set it to false in "
1854               "waitForForegroundGC()");
1855        if (TraceCMSState) {
1856          gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1857            " exiting collection CMS state %d",
1858            p2i(Thread::current()), _collectorState);
1859        }
1860        return;
1861      } else {
1862        // The background collector can run but check to see if the
1863        // foreground collector has done a collection while the
1864        // background collector was waiting to get the CGC_lock
1865        // above.  If yes, break so that _foregroundGCShouldWait
1866        // is cleared before returning.
1867        if (_collectorState == Idling) {
1868          break;
1869        }
1870      }
1871    }
1872
1873    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1874      "should be waiting");
1875
1876    switch (_collectorState) {
1877      case InitialMarking:
1878        {
1879          ReleaseForegroundGC x(this);
1880          stats().record_cms_begin();
1881          VM_CMS_Initial_Mark initial_mark_op(this);
1882          VMThread::execute(&initial_mark_op);
1883        }
1884        // The collector state may be any legal state at this point
1885        // since the background collector may have yielded to the
1886        // foreground collector.
1887        break;
1888      case Marking:
1889        // initial marking in checkpointRootsInitialWork has been completed
1890        if (markFromRoots()) { // we were successful
1891          assert(_collectorState == Precleaning, "Collector state should "
1892            "have changed");
1893        } else {
1894          assert(_foregroundGCIsActive, "Internal state inconsistency");
1895        }
1896        break;
1897      case Precleaning:
1898        // marking from roots in markFromRoots has been completed
1899        preclean();
1900        assert(_collectorState == AbortablePreclean ||
1901               _collectorState == FinalMarking,
1902               "Collector state should have changed");
1903        break;
1904      case AbortablePreclean:
1905        abortable_preclean();
1906        assert(_collectorState == FinalMarking, "Collector state should "
1907          "have changed");
1908        break;
1909      case FinalMarking:
1910        {
1911          ReleaseForegroundGC x(this);
1912
1913          VM_CMS_Final_Remark final_remark_op(this);
1914          VMThread::execute(&final_remark_op);
1915        }
1916        assert(_foregroundGCShouldWait, "block post-condition");
1917        break;
1918      case Sweeping:
1919        // final marking in checkpointRootsFinal has been completed
1920        sweep();
1921        assert(_collectorState == Resizing, "Collector state change "
1922          "to Resizing must be done under the free_list_lock");
1923
1924      case Resizing: {
1925        // Sweeping has been completed...
1926        // At this point the background collection has completed.
1927        // Don't move the call to compute_new_size() down
1928        // into code that might be executed if the background
1929        // collection was preempted.
1930        {
1931          ReleaseForegroundGC x(this);   // unblock FG collection
1932          MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1933          CMSTokenSync        z(true);   // not strictly needed.
1934          if (_collectorState == Resizing) {
1935            compute_new_size();
1936            save_heap_summary();
1937            _collectorState = Resetting;
1938          } else {
1939            assert(_collectorState == Idling, "The state should only change"
1940                   " because the foreground collector has finished the collection");
1941          }
1942        }
1943        break;
1944      }
1945      case Resetting:
1946        // CMS heap resizing has been completed
1947        reset(true);
1948        assert(_collectorState == Idling, "Collector state should "
1949          "have changed");
1950
1951        MetaspaceGC::set_should_concurrent_collect(false);
1952
1953        stats().record_cms_end();
1954        // Don't move the concurrent_phases_end() and compute_new_size()
1955        // calls to here because a preempted background collection
1956        // has it's state set to "Resetting".
1957        break;
1958      case Idling:
1959      default:
1960        ShouldNotReachHere();
1961        break;
1962    }
1963    if (TraceCMSState) {
1964      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1965        p2i(Thread::current()), _collectorState);
1966    }
1967    assert(_foregroundGCShouldWait, "block post-condition");
1968  }
1969
1970  // Should this be in gc_epilogue?
1971  collector_policy()->counters()->update_counters();
1972
1973  {
1974    // Clear _foregroundGCShouldWait and, in the event that the
1975    // foreground collector is waiting, notify it, before
1976    // returning.
1977    MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1978    _foregroundGCShouldWait = false;
1979    if (_foregroundGCIsActive) {
1980      CGC_lock->notify();
1981    }
1982    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1983           "Possible deadlock");
1984  }
1985  if (TraceCMSState) {
1986    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1987      " exiting collection CMS state %d",
1988      p2i(Thread::current()), _collectorState);
1989  }
1990  if (PrintGC && Verbose) {
1991    _cmsGen->print_heap_change(prev_used);
1992  }
1993}
1994
1995void CMSCollector::register_gc_start(GCCause::Cause cause) {
1996  _cms_start_registered = true;
1997  _gc_timer_cm->register_gc_start();
1998  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1999}
2000
2001void CMSCollector::register_gc_end() {
2002  if (_cms_start_registered) {
2003    report_heap_summary(GCWhen::AfterGC);
2004
2005    _gc_timer_cm->register_gc_end();
2006    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2007    _cms_start_registered = false;
2008  }
2009}
2010
2011void CMSCollector::save_heap_summary() {
2012  GenCollectedHeap* gch = GenCollectedHeap::heap();
2013  _last_heap_summary = gch->create_heap_summary();
2014  _last_metaspace_summary = gch->create_metaspace_summary();
2015}
2016
2017void CMSCollector::report_heap_summary(GCWhen::Type when) {
2018  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2019  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2020}
2021
2022bool CMSCollector::waitForForegroundGC() {
2023  bool res = false;
2024  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2025         "CMS thread should have CMS token");
2026  // Block the foreground collector until the
2027  // background collectors decides whether to
2028  // yield.
2029  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2030  _foregroundGCShouldWait = true;
2031  if (_foregroundGCIsActive) {
2032    // The background collector yields to the
2033    // foreground collector and returns a value
2034    // indicating that it has yielded.  The foreground
2035    // collector can proceed.
2036    res = true;
2037    _foregroundGCShouldWait = false;
2038    ConcurrentMarkSweepThread::clear_CMS_flag(
2039      ConcurrentMarkSweepThread::CMS_cms_has_token);
2040    ConcurrentMarkSweepThread::set_CMS_flag(
2041      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2042    // Get a possibly blocked foreground thread going
2043    CGC_lock->notify();
2044    if (TraceCMSState) {
2045      gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2046        p2i(Thread::current()), _collectorState);
2047    }
2048    while (_foregroundGCIsActive) {
2049      CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2050    }
2051    ConcurrentMarkSweepThread::set_CMS_flag(
2052      ConcurrentMarkSweepThread::CMS_cms_has_token);
2053    ConcurrentMarkSweepThread::clear_CMS_flag(
2054      ConcurrentMarkSweepThread::CMS_cms_wants_token);
2055  }
2056  if (TraceCMSState) {
2057    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2058      p2i(Thread::current()), _collectorState);
2059  }
2060  return res;
2061}
2062
2063// Because of the need to lock the free lists and other structures in
2064// the collector, common to all the generations that the collector is
2065// collecting, we need the gc_prologues of individual CMS generations
2066// delegate to their collector. It may have been simpler had the
2067// current infrastructure allowed one to call a prologue on a
2068// collector. In the absence of that we have the generation's
2069// prologue delegate to the collector, which delegates back
2070// some "local" work to a worker method in the individual generations
2071// that it's responsible for collecting, while itself doing any
2072// work common to all generations it's responsible for. A similar
2073// comment applies to the  gc_epilogue()'s.
2074// The role of the variable _between_prologue_and_epilogue is to
2075// enforce the invocation protocol.
2076void CMSCollector::gc_prologue(bool full) {
2077  // Call gc_prologue_work() for the CMSGen
2078  // we are responsible for.
2079
2080  // The following locking discipline assumes that we are only called
2081  // when the world is stopped.
2082  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2083
2084  // The CMSCollector prologue must call the gc_prologues for the
2085  // "generations" that it's responsible
2086  // for.
2087
2088  assert(   Thread::current()->is_VM_thread()
2089         || (   CMSScavengeBeforeRemark
2090             && Thread::current()->is_ConcurrentGC_thread()),
2091         "Incorrect thread type for prologue execution");
2092
2093  if (_between_prologue_and_epilogue) {
2094    // We have already been invoked; this is a gc_prologue delegation
2095    // from yet another CMS generation that we are responsible for, just
2096    // ignore it since all relevant work has already been done.
2097    return;
2098  }
2099
2100  // set a bit saying prologue has been called; cleared in epilogue
2101  _between_prologue_and_epilogue = true;
2102  // Claim locks for common data structures, then call gc_prologue_work()
2103  // for each CMSGen.
2104
2105  getFreelistLocks();   // gets free list locks on constituent spaces
2106  bitMapLock()->lock_without_safepoint_check();
2107
2108  // Should call gc_prologue_work() for all cms gens we are responsible for
2109  bool duringMarking =    _collectorState >= Marking
2110                         && _collectorState < Sweeping;
2111
2112  // The young collections clear the modified oops state, which tells if
2113  // there are any modified oops in the class. The remark phase also needs
2114  // that information. Tell the young collection to save the union of all
2115  // modified klasses.
2116  if (duringMarking) {
2117    _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2118  }
2119
2120  bool registerClosure = duringMarking;
2121
2122  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2123
2124  if (!full) {
2125    stats().record_gc0_begin();
2126  }
2127}
2128
2129void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2130
2131  _capacity_at_prologue = capacity();
2132  _used_at_prologue = used();
2133
2134  // Delegate to CMScollector which knows how to coordinate between
2135  // this and any other CMS generations that it is responsible for
2136  // collecting.
2137  collector()->gc_prologue(full);
2138}
2139
2140// This is a "private" interface for use by this generation's CMSCollector.
2141// Not to be called directly by any other entity (for instance,
2142// GenCollectedHeap, which calls the "public" gc_prologue method above).
2143void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2144  bool registerClosure, ModUnionClosure* modUnionClosure) {
2145  assert(!incremental_collection_failed(), "Shouldn't be set yet");
2146  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2147    "Should be NULL");
2148  if (registerClosure) {
2149    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2150  }
2151  cmsSpace()->gc_prologue();
2152  // Clear stat counters
2153  NOT_PRODUCT(
2154    assert(_numObjectsPromoted == 0, "check");
2155    assert(_numWordsPromoted   == 0, "check");
2156    if (Verbose && PrintGC) {
2157      gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
2158                          SIZE_FORMAT " bytes concurrently",
2159      _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2160    }
2161    _numObjectsAllocated = 0;
2162    _numWordsAllocated   = 0;
2163  )
2164}
2165
2166void CMSCollector::gc_epilogue(bool full) {
2167  // The following locking discipline assumes that we are only called
2168  // when the world is stopped.
2169  assert(SafepointSynchronize::is_at_safepoint(),
2170         "world is stopped assumption");
2171
2172  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2173  // if linear allocation blocks need to be appropriately marked to allow the
2174  // the blocks to be parsable. We also check here whether we need to nudge the
2175  // CMS collector thread to start a new cycle (if it's not already active).
2176  assert(   Thread::current()->is_VM_thread()
2177         || (   CMSScavengeBeforeRemark
2178             && Thread::current()->is_ConcurrentGC_thread()),
2179         "Incorrect thread type for epilogue execution");
2180
2181  if (!_between_prologue_and_epilogue) {
2182    // We have already been invoked; this is a gc_epilogue delegation
2183    // from yet another CMS generation that we are responsible for, just
2184    // ignore it since all relevant work has already been done.
2185    return;
2186  }
2187  assert(haveFreelistLocks(), "must have freelist locks");
2188  assert_lock_strong(bitMapLock());
2189
2190  _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2191
2192  _cmsGen->gc_epilogue_work(full);
2193
2194  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2195    // in case sampling was not already enabled, enable it
2196    _start_sampling = true;
2197  }
2198  // reset _eden_chunk_array so sampling starts afresh
2199  _eden_chunk_index = 0;
2200
2201  size_t cms_used   = _cmsGen->cmsSpace()->used();
2202
2203  // update performance counters - this uses a special version of
2204  // update_counters() that allows the utilization to be passed as a
2205  // parameter, avoiding multiple calls to used().
2206  //
2207  _cmsGen->update_counters(cms_used);
2208
2209  bitMapLock()->unlock();
2210  releaseFreelistLocks();
2211
2212  if (!CleanChunkPoolAsync) {
2213    Chunk::clean_chunk_pool();
2214  }
2215
2216  set_did_compact(false);
2217  _between_prologue_and_epilogue = false;  // ready for next cycle
2218}
2219
2220void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2221  collector()->gc_epilogue(full);
2222
2223  // Also reset promotion tracking in par gc thread states.
2224  for (uint i = 0; i < ParallelGCThreads; i++) {
2225    _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2226  }
2227}
2228
2229void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2230  assert(!incremental_collection_failed(), "Should have been cleared");
2231  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2232  cmsSpace()->gc_epilogue();
2233    // Print stat counters
2234  NOT_PRODUCT(
2235    assert(_numObjectsAllocated == 0, "check");
2236    assert(_numWordsAllocated == 0, "check");
2237    if (Verbose && PrintGC) {
2238      gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
2239                          SIZE_FORMAT " bytes",
2240                 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2241    }
2242    _numObjectsPromoted = 0;
2243    _numWordsPromoted   = 0;
2244  )
2245
2246  if (PrintGC && Verbose) {
2247    // Call down the chain in contiguous_available needs the freelistLock
2248    // so print this out before releasing the freeListLock.
2249    gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
2250                        contiguous_available());
2251  }
2252}
2253
2254#ifndef PRODUCT
2255bool CMSCollector::have_cms_token() {
2256  Thread* thr = Thread::current();
2257  if (thr->is_VM_thread()) {
2258    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2259  } else if (thr->is_ConcurrentGC_thread()) {
2260    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2261  } else if (thr->is_GC_task_thread()) {
2262    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2263           ParGCRareEvent_lock->owned_by_self();
2264  }
2265  return false;
2266}
2267#endif
2268
2269// Check reachability of the given heap address in CMS generation,
2270// treating all other generations as roots.
2271bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2272  // We could "guarantee" below, rather than assert, but I'll
2273  // leave these as "asserts" so that an adventurous debugger
2274  // could try this in the product build provided some subset of
2275  // the conditions were met, provided they were interested in the
2276  // results and knew that the computation below wouldn't interfere
2277  // with other concurrent computations mutating the structures
2278  // being read or written.
2279  assert(SafepointSynchronize::is_at_safepoint(),
2280         "Else mutations in object graph will make answer suspect");
2281  assert(have_cms_token(), "Should hold cms token");
2282  assert(haveFreelistLocks(), "must hold free list locks");
2283  assert_lock_strong(bitMapLock());
2284
2285  // Clear the marking bit map array before starting, but, just
2286  // for kicks, first report if the given address is already marked
2287  gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2288                _markBitMap.isMarked(addr) ? "" : " not");
2289
2290  if (verify_after_remark()) {
2291    MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2292    bool result = verification_mark_bm()->isMarked(addr);
2293    gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2294                           result ? "IS" : "is NOT");
2295    return result;
2296  } else {
2297    gclog_or_tty->print_cr("Could not compute result");
2298    return false;
2299  }
2300}
2301
2302
2303void
2304CMSCollector::print_on_error(outputStream* st) {
2305  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2306  if (collector != NULL) {
2307    CMSBitMap* bitmap = &collector->_markBitMap;
2308    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2309    bitmap->print_on_error(st, " Bits: ");
2310
2311    st->cr();
2312
2313    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2314    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2315    mut_bitmap->print_on_error(st, " Bits: ");
2316  }
2317}
2318
2319////////////////////////////////////////////////////////
2320// CMS Verification Support
2321////////////////////////////////////////////////////////
2322// Following the remark phase, the following invariant
2323// should hold -- each object in the CMS heap which is
2324// marked in markBitMap() should be marked in the verification_mark_bm().
2325
2326class VerifyMarkedClosure: public BitMapClosure {
2327  CMSBitMap* _marks;
2328  bool       _failed;
2329
2330 public:
2331  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2332
2333  bool do_bit(size_t offset) {
2334    HeapWord* addr = _marks->offsetToHeapWord(offset);
2335    if (!_marks->isMarked(addr)) {
2336      oop(addr)->print_on(gclog_or_tty);
2337      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2338      _failed = true;
2339    }
2340    return true;
2341  }
2342
2343  bool failed() { return _failed; }
2344};
2345
2346bool CMSCollector::verify_after_remark(bool silent) {
2347  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2348  MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2349  static bool init = false;
2350
2351  assert(SafepointSynchronize::is_at_safepoint(),
2352         "Else mutations in object graph will make answer suspect");
2353  assert(have_cms_token(),
2354         "Else there may be mutual interference in use of "
2355         " verification data structures");
2356  assert(_collectorState > Marking && _collectorState <= Sweeping,
2357         "Else marking info checked here may be obsolete");
2358  assert(haveFreelistLocks(), "must hold free list locks");
2359  assert_lock_strong(bitMapLock());
2360
2361
2362  // Allocate marking bit map if not already allocated
2363  if (!init) { // first time
2364    if (!verification_mark_bm()->allocate(_span)) {
2365      return false;
2366    }
2367    init = true;
2368  }
2369
2370  assert(verification_mark_stack()->isEmpty(), "Should be empty");
2371
2372  // Turn off refs discovery -- so we will be tracing through refs.
2373  // This is as intended, because by this time
2374  // GC must already have cleared any refs that need to be cleared,
2375  // and traced those that need to be marked; moreover,
2376  // the marking done here is not going to interfere in any
2377  // way with the marking information used by GC.
2378  NoRefDiscovery no_discovery(ref_processor());
2379
2380  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2381
2382  // Clear any marks from a previous round
2383  verification_mark_bm()->clear_all();
2384  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2385  verify_work_stacks_empty();
2386
2387  GenCollectedHeap* gch = GenCollectedHeap::heap();
2388  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2389  // Update the saved marks which may affect the root scans.
2390  gch->save_marks();
2391
2392  if (CMSRemarkVerifyVariant == 1) {
2393    // In this first variant of verification, we complete
2394    // all marking, then check if the new marks-vector is
2395    // a subset of the CMS marks-vector.
2396    verify_after_remark_work_1();
2397  } else if (CMSRemarkVerifyVariant == 2) {
2398    // In this second variant of verification, we flag an error
2399    // (i.e. an object reachable in the new marks-vector not reachable
2400    // in the CMS marks-vector) immediately, also indicating the
2401    // identify of an object (A) that references the unmarked object (B) --
2402    // presumably, a mutation to A failed to be picked up by preclean/remark?
2403    verify_after_remark_work_2();
2404  } else {
2405    warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2406            CMSRemarkVerifyVariant);
2407  }
2408  if (!silent) gclog_or_tty->print(" done] ");
2409  return true;
2410}
2411
2412void CMSCollector::verify_after_remark_work_1() {
2413  ResourceMark rm;
2414  HandleMark  hm;
2415  GenCollectedHeap* gch = GenCollectedHeap::heap();
2416
2417  // Get a clear set of claim bits for the roots processing to work with.
2418  ClassLoaderDataGraph::clear_claimed_marks();
2419
2420  // Mark from roots one level into CMS
2421  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2422  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2423
2424  {
2425    StrongRootsScope srs(1);
2426
2427    gch->gen_process_roots(&srs,
2428                           GenCollectedHeap::OldGen,
2429                           true,   // younger gens are roots
2430                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2431                           should_unload_classes(),
2432                           &notOlder,
2433                           NULL,
2434                           NULL);
2435  }
2436
2437  // Now mark from the roots
2438  MarkFromRootsClosure markFromRootsClosure(this, _span,
2439    verification_mark_bm(), verification_mark_stack(),
2440    false /* don't yield */, true /* verifying */);
2441  assert(_restart_addr == NULL, "Expected pre-condition");
2442  verification_mark_bm()->iterate(&markFromRootsClosure);
2443  while (_restart_addr != NULL) {
2444    // Deal with stack overflow: by restarting at the indicated
2445    // address.
2446    HeapWord* ra = _restart_addr;
2447    markFromRootsClosure.reset(ra);
2448    _restart_addr = NULL;
2449    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2450  }
2451  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2452  verify_work_stacks_empty();
2453
2454  // Marking completed -- now verify that each bit marked in
2455  // verification_mark_bm() is also marked in markBitMap(); flag all
2456  // errors by printing corresponding objects.
2457  VerifyMarkedClosure vcl(markBitMap());
2458  verification_mark_bm()->iterate(&vcl);
2459  if (vcl.failed()) {
2460    gclog_or_tty->print("Verification failed");
2461    gch->print_on(gclog_or_tty);
2462    fatal("CMS: failed marking verification after remark");
2463  }
2464}
2465
2466class VerifyKlassOopsKlassClosure : public KlassClosure {
2467  class VerifyKlassOopsClosure : public OopClosure {
2468    CMSBitMap* _bitmap;
2469   public:
2470    VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2471    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2472    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2473  } _oop_closure;
2474 public:
2475  VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2476  void do_klass(Klass* k) {
2477    k->oops_do(&_oop_closure);
2478  }
2479};
2480
2481void CMSCollector::verify_after_remark_work_2() {
2482  ResourceMark rm;
2483  HandleMark  hm;
2484  GenCollectedHeap* gch = GenCollectedHeap::heap();
2485
2486  // Get a clear set of claim bits for the roots processing to work with.
2487  ClassLoaderDataGraph::clear_claimed_marks();
2488
2489  // Mark from roots one level into CMS
2490  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2491                                     markBitMap());
2492  CLDToOopClosure cld_closure(&notOlder, true);
2493
2494  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2495
2496  {
2497    StrongRootsScope srs(1);
2498
2499    gch->gen_process_roots(&srs,
2500                           GenCollectedHeap::OldGen,
2501                           true,   // younger gens are roots
2502                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
2503                           should_unload_classes(),
2504                           &notOlder,
2505                           NULL,
2506                           &cld_closure);
2507  }
2508
2509  // Now mark from the roots
2510  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2511    verification_mark_bm(), markBitMap(), verification_mark_stack());
2512  assert(_restart_addr == NULL, "Expected pre-condition");
2513  verification_mark_bm()->iterate(&markFromRootsClosure);
2514  while (_restart_addr != NULL) {
2515    // Deal with stack overflow: by restarting at the indicated
2516    // address.
2517    HeapWord* ra = _restart_addr;
2518    markFromRootsClosure.reset(ra);
2519    _restart_addr = NULL;
2520    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2521  }
2522  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2523  verify_work_stacks_empty();
2524
2525  VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2526  ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2527
2528  // Marking completed -- now verify that each bit marked in
2529  // verification_mark_bm() is also marked in markBitMap(); flag all
2530  // errors by printing corresponding objects.
2531  VerifyMarkedClosure vcl(markBitMap());
2532  verification_mark_bm()->iterate(&vcl);
2533  assert(!vcl.failed(), "Else verification above should not have succeeded");
2534}
2535
2536void ConcurrentMarkSweepGeneration::save_marks() {
2537  // delegate to CMS space
2538  cmsSpace()->save_marks();
2539  for (uint i = 0; i < ParallelGCThreads; i++) {
2540    _par_gc_thread_states[i]->promo.startTrackingPromotions();
2541  }
2542}
2543
2544bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2545  return cmsSpace()->no_allocs_since_save_marks();
2546}
2547
2548#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2549                                                                \
2550void ConcurrentMarkSweepGeneration::                            \
2551oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2552  cl->set_generation(this);                                     \
2553  cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2554  cl->reset_generation();                                       \
2555  save_marks();                                                 \
2556}
2557
2558ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2559
2560void
2561ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2562  if (freelistLock()->owned_by_self()) {
2563    Generation::oop_iterate(cl);
2564  } else {
2565    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2566    Generation::oop_iterate(cl);
2567  }
2568}
2569
2570void
2571ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2572  if (freelistLock()->owned_by_self()) {
2573    Generation::object_iterate(cl);
2574  } else {
2575    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2576    Generation::object_iterate(cl);
2577  }
2578}
2579
2580void
2581ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2582  if (freelistLock()->owned_by_self()) {
2583    Generation::safe_object_iterate(cl);
2584  } else {
2585    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2586    Generation::safe_object_iterate(cl);
2587  }
2588}
2589
2590void
2591ConcurrentMarkSweepGeneration::post_compact() {
2592}
2593
2594void
2595ConcurrentMarkSweepGeneration::prepare_for_verify() {
2596  // Fix the linear allocation blocks to look like free blocks.
2597
2598  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2599  // are not called when the heap is verified during universe initialization and
2600  // at vm shutdown.
2601  if (freelistLock()->owned_by_self()) {
2602    cmsSpace()->prepare_for_verify();
2603  } else {
2604    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2605    cmsSpace()->prepare_for_verify();
2606  }
2607}
2608
2609void
2610ConcurrentMarkSweepGeneration::verify() {
2611  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2612  // are not called when the heap is verified during universe initialization and
2613  // at vm shutdown.
2614  if (freelistLock()->owned_by_self()) {
2615    cmsSpace()->verify();
2616  } else {
2617    MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2618    cmsSpace()->verify();
2619  }
2620}
2621
2622void CMSCollector::verify() {
2623  _cmsGen->verify();
2624}
2625
2626#ifndef PRODUCT
2627bool CMSCollector::overflow_list_is_empty() const {
2628  assert(_num_par_pushes >= 0, "Inconsistency");
2629  if (_overflow_list == NULL) {
2630    assert(_num_par_pushes == 0, "Inconsistency");
2631  }
2632  return _overflow_list == NULL;
2633}
2634
2635// The methods verify_work_stacks_empty() and verify_overflow_empty()
2636// merely consolidate assertion checks that appear to occur together frequently.
2637void CMSCollector::verify_work_stacks_empty() const {
2638  assert(_markStack.isEmpty(), "Marking stack should be empty");
2639  assert(overflow_list_is_empty(), "Overflow list should be empty");
2640}
2641
2642void CMSCollector::verify_overflow_empty() const {
2643  assert(overflow_list_is_empty(), "Overflow list should be empty");
2644  assert(no_preserved_marks(), "No preserved marks");
2645}
2646#endif // PRODUCT
2647
2648// Decide if we want to enable class unloading as part of the
2649// ensuing concurrent GC cycle. We will collect and
2650// unload classes if it's the case that:
2651// (1) an explicit gc request has been made and the flag
2652//     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2653// (2) (a) class unloading is enabled at the command line, and
2654//     (b) old gen is getting really full
2655// NOTE: Provided there is no change in the state of the heap between
2656// calls to this method, it should have idempotent results. Moreover,
2657// its results should be monotonically increasing (i.e. going from 0 to 1,
2658// but not 1 to 0) between successive calls between which the heap was
2659// not collected. For the implementation below, it must thus rely on
2660// the property that concurrent_cycles_since_last_unload()
2661// will not decrease unless a collection cycle happened and that
2662// _cmsGen->is_too_full() are
2663// themselves also monotonic in that sense. See check_monotonicity()
2664// below.
2665void CMSCollector::update_should_unload_classes() {
2666  _should_unload_classes = false;
2667  // Condition 1 above
2668  if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2669    _should_unload_classes = true;
2670  } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2671    // Disjuncts 2.b.(i,ii,iii) above
2672    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2673                              CMSClassUnloadingMaxInterval)
2674                           || _cmsGen->is_too_full();
2675  }
2676}
2677
2678bool ConcurrentMarkSweepGeneration::is_too_full() const {
2679  bool res = should_concurrent_collect();
2680  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2681  return res;
2682}
2683
2684void CMSCollector::setup_cms_unloading_and_verification_state() {
2685  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2686                             || VerifyBeforeExit;
2687  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2688
2689  // We set the proper root for this CMS cycle here.
2690  if (should_unload_classes()) {   // Should unload classes this cycle
2691    remove_root_scanning_option(rso);  // Shrink the root set appropriately
2692    set_verifying(should_verify);    // Set verification state for this cycle
2693    return;                            // Nothing else needs to be done at this time
2694  }
2695
2696  // Not unloading classes this cycle
2697  assert(!should_unload_classes(), "Inconsistency!");
2698
2699  // If we are not unloading classes then add SO_AllCodeCache to root
2700  // scanning options.
2701  add_root_scanning_option(rso);
2702
2703  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2704    set_verifying(true);
2705  } else if (verifying() && !should_verify) {
2706    // We were verifying, but some verification flags got disabled.
2707    set_verifying(false);
2708    // Exclude symbols, strings and code cache elements from root scanning to
2709    // reduce IM and RM pauses.
2710    remove_root_scanning_option(rso);
2711  }
2712}
2713
2714
2715#ifndef PRODUCT
2716HeapWord* CMSCollector::block_start(const void* p) const {
2717  const HeapWord* addr = (HeapWord*)p;
2718  if (_span.contains(p)) {
2719    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2720      return _cmsGen->cmsSpace()->block_start(p);
2721    }
2722  }
2723  return NULL;
2724}
2725#endif
2726
2727HeapWord*
2728ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2729                                                   bool   tlab,
2730                                                   bool   parallel) {
2731  CMSSynchronousYieldRequest yr;
2732  assert(!tlab, "Can't deal with TLAB allocation");
2733  MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2734  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2735  if (GCExpandToAllocateDelayMillis > 0) {
2736    os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2737  }
2738  return have_lock_and_allocate(word_size, tlab);
2739}
2740
2741void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2742    size_t bytes,
2743    size_t expand_bytes,
2744    CMSExpansionCause::Cause cause)
2745{
2746
2747  bool success = expand(bytes, expand_bytes);
2748
2749  // remember why we expanded; this information is used
2750  // by shouldConcurrentCollect() when making decisions on whether to start
2751  // a new CMS cycle.
2752  if (success) {
2753    set_expansion_cause(cause);
2754    if (PrintGCDetails && Verbose) {
2755      gclog_or_tty->print_cr("Expanded CMS gen for %s",
2756        CMSExpansionCause::to_string(cause));
2757    }
2758  }
2759}
2760
2761HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2762  HeapWord* res = NULL;
2763  MutexLocker x(ParGCRareEvent_lock);
2764  while (true) {
2765    // Expansion by some other thread might make alloc OK now:
2766    res = ps->lab.alloc(word_sz);
2767    if (res != NULL) return res;
2768    // If there's not enough expansion space available, give up.
2769    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2770      return NULL;
2771    }
2772    // Otherwise, we try expansion.
2773    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2774    // Now go around the loop and try alloc again;
2775    // A competing par_promote might beat us to the expansion space,
2776    // so we may go around the loop again if promotion fails again.
2777    if (GCExpandToAllocateDelayMillis > 0) {
2778      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2779    }
2780  }
2781}
2782
2783
2784bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2785  PromotionInfo* promo) {
2786  MutexLocker x(ParGCRareEvent_lock);
2787  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2788  while (true) {
2789    // Expansion by some other thread might make alloc OK now:
2790    if (promo->ensure_spooling_space()) {
2791      assert(promo->has_spooling_space(),
2792             "Post-condition of successful ensure_spooling_space()");
2793      return true;
2794    }
2795    // If there's not enough expansion space available, give up.
2796    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2797      return false;
2798    }
2799    // Otherwise, we try expansion.
2800    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2801    // Now go around the loop and try alloc again;
2802    // A competing allocation might beat us to the expansion space,
2803    // so we may go around the loop again if allocation fails again.
2804    if (GCExpandToAllocateDelayMillis > 0) {
2805      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2806    }
2807  }
2808}
2809
2810void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2811  // Only shrink if a compaction was done so that all the free space
2812  // in the generation is in a contiguous block at the end.
2813  if (did_compact()) {
2814    CardGeneration::shrink(bytes);
2815  }
2816}
2817
2818void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2819  assert_locked_or_safepoint(Heap_lock);
2820}
2821
2822void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2823  assert_locked_or_safepoint(Heap_lock);
2824  assert_lock_strong(freelistLock());
2825  if (PrintGCDetails && Verbose) {
2826    warning("Shrinking of CMS not yet implemented");
2827  }
2828  return;
2829}
2830
2831
2832// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2833// phases.
2834class CMSPhaseAccounting: public StackObj {
2835 public:
2836  CMSPhaseAccounting(CMSCollector *collector,
2837                     const char *phase,
2838                     const GCId gc_id,
2839                     bool print_cr = true);
2840  ~CMSPhaseAccounting();
2841
2842 private:
2843  CMSCollector *_collector;
2844  const char *_phase;
2845  elapsedTimer _wallclock;
2846  bool _print_cr;
2847  const GCId _gc_id;
2848
2849 public:
2850  // Not MT-safe; so do not pass around these StackObj's
2851  // where they may be accessed by other threads.
2852  jlong wallclock_millis() {
2853    assert(_wallclock.is_active(), "Wall clock should not stop");
2854    _wallclock.stop();  // to record time
2855    jlong ret = _wallclock.milliseconds();
2856    _wallclock.start(); // restart
2857    return ret;
2858  }
2859};
2860
2861CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2862                                       const char *phase,
2863                                       const GCId gc_id,
2864                                       bool print_cr) :
2865  _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2866
2867  if (PrintCMSStatistics != 0) {
2868    _collector->resetYields();
2869  }
2870  if (PrintGCDetails) {
2871    gclog_or_tty->gclog_stamp(_gc_id);
2872    gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2873      _collector->cmsGen()->short_name(), _phase);
2874  }
2875  _collector->resetTimer();
2876  _wallclock.start();
2877  _collector->startTimer();
2878}
2879
2880CMSPhaseAccounting::~CMSPhaseAccounting() {
2881  assert(_wallclock.is_active(), "Wall clock should not have stopped");
2882  _collector->stopTimer();
2883  _wallclock.stop();
2884  if (PrintGCDetails) {
2885    gclog_or_tty->gclog_stamp(_gc_id);
2886    gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2887                 _collector->cmsGen()->short_name(),
2888                 _phase, _collector->timerValue(), _wallclock.seconds());
2889    if (_print_cr) {
2890      gclog_or_tty->cr();
2891    }
2892    if (PrintCMSStatistics != 0) {
2893      gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2894                    _collector->yields());
2895    }
2896  }
2897}
2898
2899// CMS work
2900
2901// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2902class CMSParMarkTask : public AbstractGangTask {
2903 protected:
2904  CMSCollector*     _collector;
2905  uint              _n_workers;
2906  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2907      AbstractGangTask(name),
2908      _collector(collector),
2909      _n_workers(n_workers) {}
2910  // Work method in support of parallel rescan ... of young gen spaces
2911  void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2912                             ContiguousSpace* space,
2913                             HeapWord** chunk_array, size_t chunk_top);
2914  void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2915};
2916
2917// Parallel initial mark task
2918class CMSParInitialMarkTask: public CMSParMarkTask {
2919  StrongRootsScope* _strong_roots_scope;
2920 public:
2921  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2922      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2923      _strong_roots_scope(strong_roots_scope) {}
2924  void work(uint worker_id);
2925};
2926
2927// Checkpoint the roots into this generation from outside
2928// this generation. [Note this initial checkpoint need only
2929// be approximate -- we'll do a catch up phase subsequently.]
2930void CMSCollector::checkpointRootsInitial() {
2931  assert(_collectorState == InitialMarking, "Wrong collector state");
2932  check_correct_thread_executing();
2933  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2934
2935  save_heap_summary();
2936  report_heap_summary(GCWhen::BeforeGC);
2937
2938  ReferenceProcessor* rp = ref_processor();
2939  assert(_restart_addr == NULL, "Control point invariant");
2940  {
2941    // acquire locks for subsequent manipulations
2942    MutexLockerEx x(bitMapLock(),
2943                    Mutex::_no_safepoint_check_flag);
2944    checkpointRootsInitialWork();
2945    // enable ("weak") refs discovery
2946    rp->enable_discovery();
2947    _collectorState = Marking;
2948  }
2949}
2950
2951void CMSCollector::checkpointRootsInitialWork() {
2952  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2953  assert(_collectorState == InitialMarking, "just checking");
2954
2955  // If there has not been a GC[n-1] since last GC[n] cycle completed,
2956  // precede our marking with a collection of all
2957  // younger generations to keep floating garbage to a minimum.
2958  // XXX: we won't do this for now -- it's an optimization to be done later.
2959
2960  // already have locks
2961  assert_lock_strong(bitMapLock());
2962  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2963
2964  // Setup the verification and class unloading state for this
2965  // CMS collection cycle.
2966  setup_cms_unloading_and_verification_state();
2967
2968  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2969    PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2970
2971  // Reset all the PLAB chunk arrays if necessary.
2972  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2973    reset_survivor_plab_arrays();
2974  }
2975
2976  ResourceMark rm;
2977  HandleMark  hm;
2978
2979  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2980  GenCollectedHeap* gch = GenCollectedHeap::heap();
2981
2982  verify_work_stacks_empty();
2983  verify_overflow_empty();
2984
2985  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2986  // Update the saved marks which may affect the root scans.
2987  gch->save_marks();
2988
2989  // weak reference processing has not started yet.
2990  ref_processor()->set_enqueuing_is_done(false);
2991
2992  // Need to remember all newly created CLDs,
2993  // so that we can guarantee that the remark finds them.
2994  ClassLoaderDataGraph::remember_new_clds(true);
2995
2996  // Whenever a CLD is found, it will be claimed before proceeding to mark
2997  // the klasses. The claimed marks need to be cleared before marking starts.
2998  ClassLoaderDataGraph::clear_claimed_marks();
2999
3000  if (CMSPrintEdenSurvivorChunks) {
3001    print_eden_and_survivor_chunk_arrays();
3002  }
3003
3004  {
3005    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3006    if (CMSParallelInitialMarkEnabled) {
3007      // The parallel version.
3008      WorkGang* workers = gch->workers();
3009      assert(workers != NULL, "Need parallel worker threads.");
3010      uint n_workers = workers->active_workers();
3011
3012      StrongRootsScope srs(n_workers);
3013
3014      CMSParInitialMarkTask tsk(this, &srs, n_workers);
3015      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3016      if (n_workers > 1) {
3017        workers->run_task(&tsk);
3018      } else {
3019        tsk.work(0);
3020      }
3021    } else {
3022      // The serial version.
3023      CLDToOopClosure cld_closure(&notOlder, true);
3024      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3025
3026      StrongRootsScope srs(1);
3027
3028      gch->gen_process_roots(&srs,
3029                             GenCollectedHeap::OldGen,
3030                             true,   // younger gens are roots
3031                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
3032                             should_unload_classes(),
3033                             &notOlder,
3034                             NULL,
3035                             &cld_closure);
3036    }
3037  }
3038
3039  // Clear mod-union table; it will be dirtied in the prologue of
3040  // CMS generation per each younger generation collection.
3041
3042  assert(_modUnionTable.isAllClear(),
3043       "Was cleared in most recent final checkpoint phase"
3044       " or no bits are set in the gc_prologue before the start of the next "
3045       "subsequent marking phase.");
3046
3047  assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3048
3049  // Save the end of the used_region of the constituent generations
3050  // to be used to limit the extent of sweep in each generation.
3051  save_sweep_limits();
3052  verify_overflow_empty();
3053}
3054
3055bool CMSCollector::markFromRoots() {
3056  // we might be tempted to assert that:
3057  // assert(!SafepointSynchronize::is_at_safepoint(),
3058  //        "inconsistent argument?");
3059  // However that wouldn't be right, because it's possible that
3060  // a safepoint is indeed in progress as a younger generation
3061  // stop-the-world GC happens even as we mark in this generation.
3062  assert(_collectorState == Marking, "inconsistent state?");
3063  check_correct_thread_executing();
3064  verify_overflow_empty();
3065
3066  // Weak ref discovery note: We may be discovering weak
3067  // refs in this generation concurrent (but interleaved) with
3068  // weak ref discovery by a younger generation collector.
3069
3070  CMSTokenSyncWithLocks ts(true, bitMapLock());
3071  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3072  CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3073  bool res = markFromRootsWork();
3074  if (res) {
3075    _collectorState = Precleaning;
3076  } else { // We failed and a foreground collection wants to take over
3077    assert(_foregroundGCIsActive, "internal state inconsistency");
3078    assert(_restart_addr == NULL,  "foreground will restart from scratch");
3079    if (PrintGCDetails) {
3080      gclog_or_tty->print_cr("bailing out to foreground collection");
3081    }
3082  }
3083  verify_overflow_empty();
3084  return res;
3085}
3086
3087bool CMSCollector::markFromRootsWork() {
3088  // iterate over marked bits in bit map, doing a full scan and mark
3089  // from these roots using the following algorithm:
3090  // . if oop is to the right of the current scan pointer,
3091  //   mark corresponding bit (we'll process it later)
3092  // . else (oop is to left of current scan pointer)
3093  //   push oop on marking stack
3094  // . drain the marking stack
3095
3096  // Note that when we do a marking step we need to hold the
3097  // bit map lock -- recall that direct allocation (by mutators)
3098  // and promotion (by younger generation collectors) is also
3099  // marking the bit map. [the so-called allocate live policy.]
3100  // Because the implementation of bit map marking is not
3101  // robust wrt simultaneous marking of bits in the same word,
3102  // we need to make sure that there is no such interference
3103  // between concurrent such updates.
3104
3105  // already have locks
3106  assert_lock_strong(bitMapLock());
3107
3108  verify_work_stacks_empty();
3109  verify_overflow_empty();
3110  bool result = false;
3111  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3112    result = do_marking_mt();
3113  } else {
3114    result = do_marking_st();
3115  }
3116  return result;
3117}
3118
3119// Forward decl
3120class CMSConcMarkingTask;
3121
3122class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3123  CMSCollector*       _collector;
3124  CMSConcMarkingTask* _task;
3125 public:
3126  virtual void yield();
3127
3128  // "n_threads" is the number of threads to be terminated.
3129  // "queue_set" is a set of work queues of other threads.
3130  // "collector" is the CMS collector associated with this task terminator.
3131  // "yield" indicates whether we need the gang as a whole to yield.
3132  CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3133    ParallelTaskTerminator(n_threads, queue_set),
3134    _collector(collector) { }
3135
3136  void set_task(CMSConcMarkingTask* task) {
3137    _task = task;
3138  }
3139};
3140
3141class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3142  CMSConcMarkingTask* _task;
3143 public:
3144  bool should_exit_termination();
3145  void set_task(CMSConcMarkingTask* task) {
3146    _task = task;
3147  }
3148};
3149
3150// MT Concurrent Marking Task
3151class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3152  CMSCollector* _collector;
3153  uint          _n_workers;       // requested/desired # workers
3154  bool          _result;
3155  CompactibleFreeListSpace*  _cms_space;
3156  char          _pad_front[64];   // padding to ...
3157  HeapWord*     _global_finger;   // ... avoid sharing cache line
3158  char          _pad_back[64];
3159  HeapWord*     _restart_addr;
3160
3161  //  Exposed here for yielding support
3162  Mutex* const _bit_map_lock;
3163
3164  // The per thread work queues, available here for stealing
3165  OopTaskQueueSet*  _task_queues;
3166
3167  // Termination (and yielding) support
3168  CMSConcMarkingTerminator _term;
3169  CMSConcMarkingTerminatorTerminator _term_term;
3170
3171 public:
3172  CMSConcMarkingTask(CMSCollector* collector,
3173                 CompactibleFreeListSpace* cms_space,
3174                 YieldingFlexibleWorkGang* workers,
3175                 OopTaskQueueSet* task_queues):
3176    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3177    _collector(collector),
3178    _cms_space(cms_space),
3179    _n_workers(0), _result(true),
3180    _task_queues(task_queues),
3181    _term(_n_workers, task_queues, _collector),
3182    _bit_map_lock(collector->bitMapLock())
3183  {
3184    _requested_size = _n_workers;
3185    _term.set_task(this);
3186    _term_term.set_task(this);
3187    _restart_addr = _global_finger = _cms_space->bottom();
3188  }
3189
3190
3191  OopTaskQueueSet* task_queues()  { return _task_queues; }
3192
3193  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3194
3195  HeapWord** global_finger_addr() { return &_global_finger; }
3196
3197  CMSConcMarkingTerminator* terminator() { return &_term; }
3198
3199  virtual void set_for_termination(uint active_workers) {
3200    terminator()->reset_for_reuse(active_workers);
3201  }
3202
3203  void work(uint worker_id);
3204  bool should_yield() {
3205    return    ConcurrentMarkSweepThread::should_yield()
3206           && !_collector->foregroundGCIsActive();
3207  }
3208
3209  virtual void coordinator_yield();  // stuff done by coordinator
3210  bool result() { return _result; }
3211
3212  void reset(HeapWord* ra) {
3213    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3214    _restart_addr = _global_finger = ra;
3215    _term.reset_for_reuse();
3216  }
3217
3218  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3219                                           OopTaskQueue* work_q);
3220
3221 private:
3222  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3223  void do_work_steal(int i);
3224  void bump_global_finger(HeapWord* f);
3225};
3226
3227bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3228  assert(_task != NULL, "Error");
3229  return _task->yielding();
3230  // Note that we do not need the disjunct || _task->should_yield() above
3231  // because we want terminating threads to yield only if the task
3232  // is already in the midst of yielding, which happens only after at least one
3233  // thread has yielded.
3234}
3235
3236void CMSConcMarkingTerminator::yield() {
3237  if (_task->should_yield()) {
3238    _task->yield();
3239  } else {
3240    ParallelTaskTerminator::yield();
3241  }
3242}
3243
3244////////////////////////////////////////////////////////////////
3245// Concurrent Marking Algorithm Sketch
3246////////////////////////////////////////////////////////////////
3247// Until all tasks exhausted (both spaces):
3248// -- claim next available chunk
3249// -- bump global finger via CAS
3250// -- find first object that starts in this chunk
3251//    and start scanning bitmap from that position
3252// -- scan marked objects for oops
3253// -- CAS-mark target, and if successful:
3254//    . if target oop is above global finger (volatile read)
3255//      nothing to do
3256//    . if target oop is in chunk and above local finger
3257//        then nothing to do
3258//    . else push on work-queue
3259// -- Deal with possible overflow issues:
3260//    . local work-queue overflow causes stuff to be pushed on
3261//      global (common) overflow queue
3262//    . always first empty local work queue
3263//    . then get a batch of oops from global work queue if any
3264//    . then do work stealing
3265// -- When all tasks claimed (both spaces)
3266//    and local work queue empty,
3267//    then in a loop do:
3268//    . check global overflow stack; steal a batch of oops and trace
3269//    . try to steal from other threads oif GOS is empty
3270//    . if neither is available, offer termination
3271// -- Terminate and return result
3272//
3273void CMSConcMarkingTask::work(uint worker_id) {
3274  elapsedTimer _timer;
3275  ResourceMark rm;
3276  HandleMark hm;
3277
3278  DEBUG_ONLY(_collector->verify_overflow_empty();)
3279
3280  // Before we begin work, our work queue should be empty
3281  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3282  // Scan the bitmap covering _cms_space, tracing through grey objects.
3283  _timer.start();
3284  do_scan_and_mark(worker_id, _cms_space);
3285  _timer.stop();
3286  if (PrintCMSStatistics != 0) {
3287    gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3288      worker_id, _timer.seconds());
3289      // XXX: need xxx/xxx type of notation, two timers
3290  }
3291
3292  // ... do work stealing
3293  _timer.reset();
3294  _timer.start();
3295  do_work_steal(worker_id);
3296  _timer.stop();
3297  if (PrintCMSStatistics != 0) {
3298    gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3299      worker_id, _timer.seconds());
3300      // XXX: need xxx/xxx type of notation, two timers
3301  }
3302  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3303  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3304  // Note that under the current task protocol, the
3305  // following assertion is true even of the spaces
3306  // expanded since the completion of the concurrent
3307  // marking. XXX This will likely change under a strict
3308  // ABORT semantics.
3309  // After perm removal the comparison was changed to
3310  // greater than or equal to from strictly greater than.
3311  // Before perm removal the highest address sweep would
3312  // have been at the end of perm gen but now is at the
3313  // end of the tenured gen.
3314  assert(_global_finger >=  _cms_space->end(),
3315         "All tasks have been completed");
3316  DEBUG_ONLY(_collector->verify_overflow_empty();)
3317}
3318
3319void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3320  HeapWord* read = _global_finger;
3321  HeapWord* cur  = read;
3322  while (f > read) {
3323    cur = read;
3324    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3325    if (cur == read) {
3326      // our cas succeeded
3327      assert(_global_finger >= f, "protocol consistency");
3328      break;
3329    }
3330  }
3331}
3332
3333// This is really inefficient, and should be redone by
3334// using (not yet available) block-read and -write interfaces to the
3335// stack and the work_queue. XXX FIX ME !!!
3336bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3337                                                      OopTaskQueue* work_q) {
3338  // Fast lock-free check
3339  if (ovflw_stk->length() == 0) {
3340    return false;
3341  }
3342  assert(work_q->size() == 0, "Shouldn't steal");
3343  MutexLockerEx ml(ovflw_stk->par_lock(),
3344                   Mutex::_no_safepoint_check_flag);
3345  // Grab up to 1/4 the size of the work queue
3346  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3347                    (size_t)ParGCDesiredObjsFromOverflowList);
3348  num = MIN2(num, ovflw_stk->length());
3349  for (int i = (int) num; i > 0; i--) {
3350    oop cur = ovflw_stk->pop();
3351    assert(cur != NULL, "Counted wrong?");
3352    work_q->push(cur);
3353  }
3354  return num > 0;
3355}
3356
3357void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3358  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3359  int n_tasks = pst->n_tasks();
3360  // We allow that there may be no tasks to do here because
3361  // we are restarting after a stack overflow.
3362  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3363  uint nth_task = 0;
3364
3365  HeapWord* aligned_start = sp->bottom();
3366  if (sp->used_region().contains(_restart_addr)) {
3367    // Align down to a card boundary for the start of 0th task
3368    // for this space.
3369    aligned_start =
3370      (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3371                                 CardTableModRefBS::card_size);
3372  }
3373
3374  size_t chunk_size = sp->marking_task_size();
3375  while (!pst->is_task_claimed(/* reference */ nth_task)) {
3376    // Having claimed the nth task in this space,
3377    // compute the chunk that it corresponds to:
3378    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3379                               aligned_start + (nth_task+1)*chunk_size);
3380    // Try and bump the global finger via a CAS;
3381    // note that we need to do the global finger bump
3382    // _before_ taking the intersection below, because
3383    // the task corresponding to that region will be
3384    // deemed done even if the used_region() expands
3385    // because of allocation -- as it almost certainly will
3386    // during start-up while the threads yield in the
3387    // closure below.
3388    HeapWord* finger = span.end();
3389    bump_global_finger(finger);   // atomically
3390    // There are null tasks here corresponding to chunks
3391    // beyond the "top" address of the space.
3392    span = span.intersection(sp->used_region());
3393    if (!span.is_empty()) {  // Non-null task
3394      HeapWord* prev_obj;
3395      assert(!span.contains(_restart_addr) || nth_task == 0,
3396             "Inconsistency");
3397      if (nth_task == 0) {
3398        // For the 0th task, we'll not need to compute a block_start.
3399        if (span.contains(_restart_addr)) {
3400          // In the case of a restart because of stack overflow,
3401          // we might additionally skip a chunk prefix.
3402          prev_obj = _restart_addr;
3403        } else {
3404          prev_obj = span.start();
3405        }
3406      } else {
3407        // We want to skip the first object because
3408        // the protocol is to scan any object in its entirety
3409        // that _starts_ in this span; a fortiori, any
3410        // object starting in an earlier span is scanned
3411        // as part of an earlier claimed task.
3412        // Below we use the "careful" version of block_start
3413        // so we do not try to navigate uninitialized objects.
3414        prev_obj = sp->block_start_careful(span.start());
3415        // Below we use a variant of block_size that uses the
3416        // Printezis bits to avoid waiting for allocated
3417        // objects to become initialized/parsable.
3418        while (prev_obj < span.start()) {
3419          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3420          if (sz > 0) {
3421            prev_obj += sz;
3422          } else {
3423            // In this case we may end up doing a bit of redundant
3424            // scanning, but that appears unavoidable, short of
3425            // locking the free list locks; see bug 6324141.
3426            break;
3427          }
3428        }
3429      }
3430      if (prev_obj < span.end()) {
3431        MemRegion my_span = MemRegion(prev_obj, span.end());
3432        // Do the marking work within a non-empty span --
3433        // the last argument to the constructor indicates whether the
3434        // iteration should be incremental with periodic yields.
3435        Par_MarkFromRootsClosure cl(this, _collector, my_span,
3436                                    &_collector->_markBitMap,
3437                                    work_queue(i),
3438                                    &_collector->_markStack);
3439        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3440      } // else nothing to do for this task
3441    }   // else nothing to do for this task
3442  }
3443  // We'd be tempted to assert here that since there are no
3444  // more tasks left to claim in this space, the global_finger
3445  // must exceed space->top() and a fortiori space->end(). However,
3446  // that would not quite be correct because the bumping of
3447  // global_finger occurs strictly after the claiming of a task,
3448  // so by the time we reach here the global finger may not yet
3449  // have been bumped up by the thread that claimed the last
3450  // task.
3451  pst->all_tasks_completed();
3452}
3453
3454class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3455 private:
3456  CMSCollector* _collector;
3457  CMSConcMarkingTask* _task;
3458  MemRegion     _span;
3459  CMSBitMap*    _bit_map;
3460  CMSMarkStack* _overflow_stack;
3461  OopTaskQueue* _work_queue;
3462 protected:
3463  DO_OOP_WORK_DEFN
3464 public:
3465  Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3466                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3467    MetadataAwareOopClosure(collector->ref_processor()),
3468    _collector(collector),
3469    _task(task),
3470    _span(collector->_span),
3471    _work_queue(work_queue),
3472    _bit_map(bit_map),
3473    _overflow_stack(overflow_stack)
3474  { }
3475  virtual void do_oop(oop* p);
3476  virtual void do_oop(narrowOop* p);
3477
3478  void trim_queue(size_t max);
3479  void handle_stack_overflow(HeapWord* lost);
3480  void do_yield_check() {
3481    if (_task->should_yield()) {
3482      _task->yield();
3483    }
3484  }
3485};
3486
3487// Grey object scanning during work stealing phase --
3488// the salient assumption here is that any references
3489// that are in these stolen objects being scanned must
3490// already have been initialized (else they would not have
3491// been published), so we do not need to check for
3492// uninitialized objects before pushing here.
3493void Par_ConcMarkingClosure::do_oop(oop obj) {
3494  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3495  HeapWord* addr = (HeapWord*)obj;
3496  // Check if oop points into the CMS generation
3497  // and is not marked
3498  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3499    // a white object ...
3500    // If we manage to "claim" the object, by being the
3501    // first thread to mark it, then we push it on our
3502    // marking stack
3503    if (_bit_map->par_mark(addr)) {     // ... now grey
3504      // push on work queue (grey set)
3505      bool simulate_overflow = false;
3506      NOT_PRODUCT(
3507        if (CMSMarkStackOverflowALot &&
3508            _collector->simulate_overflow()) {
3509          // simulate a stack overflow
3510          simulate_overflow = true;
3511        }
3512      )
3513      if (simulate_overflow ||
3514          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3515        // stack overflow
3516        if (PrintCMSStatistics != 0) {
3517          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3518                                 SIZE_FORMAT, _overflow_stack->capacity());
3519        }
3520        // We cannot assert that the overflow stack is full because
3521        // it may have been emptied since.
3522        assert(simulate_overflow ||
3523               _work_queue->size() == _work_queue->max_elems(),
3524              "Else push should have succeeded");
3525        handle_stack_overflow(addr);
3526      }
3527    } // Else, some other thread got there first
3528    do_yield_check();
3529  }
3530}
3531
3532void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3533void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3534
3535void Par_ConcMarkingClosure::trim_queue(size_t max) {
3536  while (_work_queue->size() > max) {
3537    oop new_oop;
3538    if (_work_queue->pop_local(new_oop)) {
3539      assert(new_oop->is_oop(), "Should be an oop");
3540      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3541      assert(_span.contains((HeapWord*)new_oop), "Not in span");
3542      new_oop->oop_iterate(this);  // do_oop() above
3543      do_yield_check();
3544    }
3545  }
3546}
3547
3548// Upon stack overflow, we discard (part of) the stack,
3549// remembering the least address amongst those discarded
3550// in CMSCollector's _restart_address.
3551void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3552  // We need to do this under a mutex to prevent other
3553  // workers from interfering with the work done below.
3554  MutexLockerEx ml(_overflow_stack->par_lock(),
3555                   Mutex::_no_safepoint_check_flag);
3556  // Remember the least grey address discarded
3557  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3558  _collector->lower_restart_addr(ra);
3559  _overflow_stack->reset();  // discard stack contents
3560  _overflow_stack->expand(); // expand the stack if possible
3561}
3562
3563
3564void CMSConcMarkingTask::do_work_steal(int i) {
3565  OopTaskQueue* work_q = work_queue(i);
3566  oop obj_to_scan;
3567  CMSBitMap* bm = &(_collector->_markBitMap);
3568  CMSMarkStack* ovflw = &(_collector->_markStack);
3569  int* seed = _collector->hash_seed(i);
3570  Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3571  while (true) {
3572    cl.trim_queue(0);
3573    assert(work_q->size() == 0, "Should have been emptied above");
3574    if (get_work_from_overflow_stack(ovflw, work_q)) {
3575      // Can't assert below because the work obtained from the
3576      // overflow stack may already have been stolen from us.
3577      // assert(work_q->size() > 0, "Work from overflow stack");
3578      continue;
3579    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3580      assert(obj_to_scan->is_oop(), "Should be an oop");
3581      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3582      obj_to_scan->oop_iterate(&cl);
3583    } else if (terminator()->offer_termination(&_term_term)) {
3584      assert(work_q->size() == 0, "Impossible!");
3585      break;
3586    } else if (yielding() || should_yield()) {
3587      yield();
3588    }
3589  }
3590}
3591
3592// This is run by the CMS (coordinator) thread.
3593void CMSConcMarkingTask::coordinator_yield() {
3594  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3595         "CMS thread should hold CMS token");
3596  // First give up the locks, then yield, then re-lock
3597  // We should probably use a constructor/destructor idiom to
3598  // do this unlock/lock or modify the MutexUnlocker class to
3599  // serve our purpose. XXX
3600  assert_lock_strong(_bit_map_lock);
3601  _bit_map_lock->unlock();
3602  ConcurrentMarkSweepThread::desynchronize(true);
3603  _collector->stopTimer();
3604  if (PrintCMSStatistics != 0) {
3605    _collector->incrementYields();
3606  }
3607
3608  // It is possible for whichever thread initiated the yield request
3609  // not to get a chance to wake up and take the bitmap lock between
3610  // this thread releasing it and reacquiring it. So, while the
3611  // should_yield() flag is on, let's sleep for a bit to give the
3612  // other thread a chance to wake up. The limit imposed on the number
3613  // of iterations is defensive, to avoid any unforseen circumstances
3614  // putting us into an infinite loop. Since it's always been this
3615  // (coordinator_yield()) method that was observed to cause the
3616  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3617  // which is by default non-zero. For the other seven methods that
3618  // also perform the yield operation, as are using a different
3619  // parameter (CMSYieldSleepCount) which is by default zero. This way we
3620  // can enable the sleeping for those methods too, if necessary.
3621  // See 6442774.
3622  //
3623  // We really need to reconsider the synchronization between the GC
3624  // thread and the yield-requesting threads in the future and we
3625  // should really use wait/notify, which is the recommended
3626  // way of doing this type of interaction. Additionally, we should
3627  // consolidate the eight methods that do the yield operation and they
3628  // are almost identical into one for better maintainability and
3629  // readability. See 6445193.
3630  //
3631  // Tony 2006.06.29
3632  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3633                   ConcurrentMarkSweepThread::should_yield() &&
3634                   !CMSCollector::foregroundGCIsActive(); ++i) {
3635    os::sleep(Thread::current(), 1, false);
3636  }
3637
3638  ConcurrentMarkSweepThread::synchronize(true);
3639  _bit_map_lock->lock_without_safepoint_check();
3640  _collector->startTimer();
3641}
3642
3643bool CMSCollector::do_marking_mt() {
3644  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3645  uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3646                                                                  conc_workers()->active_workers(),
3647                                                                  Threads::number_of_non_daemon_threads());
3648  conc_workers()->set_active_workers(num_workers);
3649
3650  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3651
3652  CMSConcMarkingTask tsk(this,
3653                         cms_space,
3654                         conc_workers(),
3655                         task_queues());
3656
3657  // Since the actual number of workers we get may be different
3658  // from the number we requested above, do we need to do anything different
3659  // below? In particular, may be we need to subclass the SequantialSubTasksDone
3660  // class?? XXX
3661  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3662
3663  // Refs discovery is already non-atomic.
3664  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3665  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3666  conc_workers()->start_task(&tsk);
3667  while (tsk.yielded()) {
3668    tsk.coordinator_yield();
3669    conc_workers()->continue_task(&tsk);
3670  }
3671  // If the task was aborted, _restart_addr will be non-NULL
3672  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3673  while (_restart_addr != NULL) {
3674    // XXX For now we do not make use of ABORTED state and have not
3675    // yet implemented the right abort semantics (even in the original
3676    // single-threaded CMS case). That needs some more investigation
3677    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3678    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3679    // If _restart_addr is non-NULL, a marking stack overflow
3680    // occurred; we need to do a fresh marking iteration from the
3681    // indicated restart address.
3682    if (_foregroundGCIsActive) {
3683      // We may be running into repeated stack overflows, having
3684      // reached the limit of the stack size, while making very
3685      // slow forward progress. It may be best to bail out and
3686      // let the foreground collector do its job.
3687      // Clear _restart_addr, so that foreground GC
3688      // works from scratch. This avoids the headache of
3689      // a "rescan" which would otherwise be needed because
3690      // of the dirty mod union table & card table.
3691      _restart_addr = NULL;
3692      return false;
3693    }
3694    // Adjust the task to restart from _restart_addr
3695    tsk.reset(_restart_addr);
3696    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3697                  _restart_addr);
3698    _restart_addr = NULL;
3699    // Get the workers going again
3700    conc_workers()->start_task(&tsk);
3701    while (tsk.yielded()) {
3702      tsk.coordinator_yield();
3703      conc_workers()->continue_task(&tsk);
3704    }
3705  }
3706  assert(tsk.completed(), "Inconsistency");
3707  assert(tsk.result() == true, "Inconsistency");
3708  return true;
3709}
3710
3711bool CMSCollector::do_marking_st() {
3712  ResourceMark rm;
3713  HandleMark   hm;
3714
3715  // Temporarily make refs discovery single threaded (non-MT)
3716  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3717  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3718    &_markStack, CMSYield);
3719  // the last argument to iterate indicates whether the iteration
3720  // should be incremental with periodic yields.
3721  _markBitMap.iterate(&markFromRootsClosure);
3722  // If _restart_addr is non-NULL, a marking stack overflow
3723  // occurred; we need to do a fresh iteration from the
3724  // indicated restart address.
3725  while (_restart_addr != NULL) {
3726    if (_foregroundGCIsActive) {
3727      // We may be running into repeated stack overflows, having
3728      // reached the limit of the stack size, while making very
3729      // slow forward progress. It may be best to bail out and
3730      // let the foreground collector do its job.
3731      // Clear _restart_addr, so that foreground GC
3732      // works from scratch. This avoids the headache of
3733      // a "rescan" which would otherwise be needed because
3734      // of the dirty mod union table & card table.
3735      _restart_addr = NULL;
3736      return false;  // indicating failure to complete marking
3737    }
3738    // Deal with stack overflow:
3739    // we restart marking from _restart_addr
3740    HeapWord* ra = _restart_addr;
3741    markFromRootsClosure.reset(ra);
3742    _restart_addr = NULL;
3743    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3744  }
3745  return true;
3746}
3747
3748void CMSCollector::preclean() {
3749  check_correct_thread_executing();
3750  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3751  verify_work_stacks_empty();
3752  verify_overflow_empty();
3753  _abort_preclean = false;
3754  if (CMSPrecleaningEnabled) {
3755    if (!CMSEdenChunksRecordAlways) {
3756      _eden_chunk_index = 0;
3757    }
3758    size_t used = get_eden_used();
3759    size_t capacity = get_eden_capacity();
3760    // Don't start sampling unless we will get sufficiently
3761    // many samples.
3762    if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3763                * CMSScheduleRemarkEdenPenetration)) {
3764      _start_sampling = true;
3765    } else {
3766      _start_sampling = false;
3767    }
3768    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3769    CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3770    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3771  }
3772  CMSTokenSync x(true); // is cms thread
3773  if (CMSPrecleaningEnabled) {
3774    sample_eden();
3775    _collectorState = AbortablePreclean;
3776  } else {
3777    _collectorState = FinalMarking;
3778  }
3779  verify_work_stacks_empty();
3780  verify_overflow_empty();
3781}
3782
3783// Try and schedule the remark such that young gen
3784// occupancy is CMSScheduleRemarkEdenPenetration %.
3785void CMSCollector::abortable_preclean() {
3786  check_correct_thread_executing();
3787  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3788  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3789
3790  // If Eden's current occupancy is below this threshold,
3791  // immediately schedule the remark; else preclean
3792  // past the next scavenge in an effort to
3793  // schedule the pause as described above. By choosing
3794  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3795  // we will never do an actual abortable preclean cycle.
3796  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3797    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3798    CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3799    // We need more smarts in the abortable preclean
3800    // loop below to deal with cases where allocation
3801    // in young gen is very very slow, and our precleaning
3802    // is running a losing race against a horde of
3803    // mutators intent on flooding us with CMS updates
3804    // (dirty cards).
3805    // One, admittedly dumb, strategy is to give up
3806    // after a certain number of abortable precleaning loops
3807    // or after a certain maximum time. We want to make
3808    // this smarter in the next iteration.
3809    // XXX FIX ME!!! YSR
3810    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3811    while (!(should_abort_preclean() ||
3812             ConcurrentMarkSweepThread::should_terminate())) {
3813      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3814      cumworkdone += workdone;
3815      loops++;
3816      // Voluntarily terminate abortable preclean phase if we have
3817      // been at it for too long.
3818      if ((CMSMaxAbortablePrecleanLoops != 0) &&
3819          loops >= CMSMaxAbortablePrecleanLoops) {
3820        if (PrintGCDetails) {
3821          gclog_or_tty->print(" CMS: abort preclean due to loops ");
3822        }
3823        break;
3824      }
3825      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3826        if (PrintGCDetails) {
3827          gclog_or_tty->print(" CMS: abort preclean due to time ");
3828        }
3829        break;
3830      }
3831      // If we are doing little work each iteration, we should
3832      // take a short break.
3833      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3834        // Sleep for some time, waiting for work to accumulate
3835        stopTimer();
3836        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3837        startTimer();
3838        waited++;
3839      }
3840    }
3841    if (PrintCMSStatistics > 0) {
3842      gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3843                          loops, waited, cumworkdone);
3844    }
3845  }
3846  CMSTokenSync x(true); // is cms thread
3847  if (_collectorState != Idling) {
3848    assert(_collectorState == AbortablePreclean,
3849           "Spontaneous state transition?");
3850    _collectorState = FinalMarking;
3851  } // Else, a foreground collection completed this CMS cycle.
3852  return;
3853}
3854
3855// Respond to an Eden sampling opportunity
3856void CMSCollector::sample_eden() {
3857  // Make sure a young gc cannot sneak in between our
3858  // reading and recording of a sample.
3859  assert(Thread::current()->is_ConcurrentGC_thread(),
3860         "Only the cms thread may collect Eden samples");
3861  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3862         "Should collect samples while holding CMS token");
3863  if (!_start_sampling) {
3864    return;
3865  }
3866  // When CMSEdenChunksRecordAlways is true, the eden chunk array
3867  // is populated by the young generation.
3868  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3869    if (_eden_chunk_index < _eden_chunk_capacity) {
3870      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3871      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3872             "Unexpected state of Eden");
3873      // We'd like to check that what we just sampled is an oop-start address;
3874      // however, we cannot do that here since the object may not yet have been
3875      // initialized. So we'll instead do the check when we _use_ this sample
3876      // later.
3877      if (_eden_chunk_index == 0 ||
3878          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3879                         _eden_chunk_array[_eden_chunk_index-1])
3880           >= CMSSamplingGrain)) {
3881        _eden_chunk_index++;  // commit sample
3882      }
3883    }
3884  }
3885  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3886    size_t used = get_eden_used();
3887    size_t capacity = get_eden_capacity();
3888    assert(used <= capacity, "Unexpected state of Eden");
3889    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3890      _abort_preclean = true;
3891    }
3892  }
3893}
3894
3895
3896size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3897  assert(_collectorState == Precleaning ||
3898         _collectorState == AbortablePreclean, "incorrect state");
3899  ResourceMark rm;
3900  HandleMark   hm;
3901
3902  // Precleaning is currently not MT but the reference processor
3903  // may be set for MT.  Disable it temporarily here.
3904  ReferenceProcessor* rp = ref_processor();
3905  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3906
3907  // Do one pass of scrubbing the discovered reference lists
3908  // to remove any reference objects with strongly-reachable
3909  // referents.
3910  if (clean_refs) {
3911    CMSPrecleanRefsYieldClosure yield_cl(this);
3912    assert(rp->span().equals(_span), "Spans should be equal");
3913    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3914                                   &_markStack, true /* preclean */);
3915    CMSDrainMarkingStackClosure complete_trace(this,
3916                                   _span, &_markBitMap, &_markStack,
3917                                   &keep_alive, true /* preclean */);
3918
3919    // We don't want this step to interfere with a young
3920    // collection because we don't want to take CPU
3921    // or memory bandwidth away from the young GC threads
3922    // (which may be as many as there are CPUs).
3923    // Note that we don't need to protect ourselves from
3924    // interference with mutators because they can't
3925    // manipulate the discovered reference lists nor affect
3926    // the computed reachability of the referents, the
3927    // only properties manipulated by the precleaning
3928    // of these reference lists.
3929    stopTimer();
3930    CMSTokenSyncWithLocks x(true /* is cms thread */,
3931                            bitMapLock());
3932    startTimer();
3933    sample_eden();
3934
3935    // The following will yield to allow foreground
3936    // collection to proceed promptly. XXX YSR:
3937    // The code in this method may need further
3938    // tweaking for better performance and some restructuring
3939    // for cleaner interfaces.
3940    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3941    rp->preclean_discovered_references(
3942          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3943          gc_timer, _gc_tracer_cm->gc_id());
3944  }
3945
3946  if (clean_survivor) {  // preclean the active survivor space(s)
3947    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3948                             &_markBitMap, &_modUnionTable,
3949                             &_markStack, true /* precleaning phase */);
3950    stopTimer();
3951    CMSTokenSyncWithLocks ts(true /* is cms thread */,
3952                             bitMapLock());
3953    startTimer();
3954    unsigned int before_count =
3955      GenCollectedHeap::heap()->total_collections();
3956    SurvivorSpacePrecleanClosure
3957      sss_cl(this, _span, &_markBitMap, &_markStack,
3958             &pam_cl, before_count, CMSYield);
3959    _young_gen->from()->object_iterate_careful(&sss_cl);
3960    _young_gen->to()->object_iterate_careful(&sss_cl);
3961  }
3962  MarkRefsIntoAndScanClosure
3963    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3964             &_markStack, this, CMSYield,
3965             true /* precleaning phase */);
3966  // CAUTION: The following closure has persistent state that may need to
3967  // be reset upon a decrease in the sequence of addresses it
3968  // processes.
3969  ScanMarkedObjectsAgainCarefullyClosure
3970    smoac_cl(this, _span,
3971      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3972
3973  // Preclean dirty cards in ModUnionTable and CardTable using
3974  // appropriate convergence criterion;
3975  // repeat CMSPrecleanIter times unless we find that
3976  // we are losing.
3977  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3978  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3979         "Bad convergence multiplier");
3980  assert(CMSPrecleanThreshold >= 100,
3981         "Unreasonably low CMSPrecleanThreshold");
3982
3983  size_t numIter, cumNumCards, lastNumCards, curNumCards;
3984  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3985       numIter < CMSPrecleanIter;
3986       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3987    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3988    if (Verbose && PrintGCDetails) {
3989      gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3990    }
3991    // Either there are very few dirty cards, so re-mark
3992    // pause will be small anyway, or our pre-cleaning isn't
3993    // that much faster than the rate at which cards are being
3994    // dirtied, so we might as well stop and re-mark since
3995    // precleaning won't improve our re-mark time by much.
3996    if (curNumCards <= CMSPrecleanThreshold ||
3997        (numIter > 0 &&
3998         (curNumCards * CMSPrecleanDenominator >
3999         lastNumCards * CMSPrecleanNumerator))) {
4000      numIter++;
4001      cumNumCards += curNumCards;
4002      break;
4003    }
4004  }
4005
4006  preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4007
4008  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4009  cumNumCards += curNumCards;
4010  if (PrintGCDetails && PrintCMSStatistics != 0) {
4011    gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4012                  curNumCards, cumNumCards, numIter);
4013  }
4014  return cumNumCards;   // as a measure of useful work done
4015}
4016
4017// PRECLEANING NOTES:
4018// Precleaning involves:
4019// . reading the bits of the modUnionTable and clearing the set bits.
4020// . For the cards corresponding to the set bits, we scan the
4021//   objects on those cards. This means we need the free_list_lock
4022//   so that we can safely iterate over the CMS space when scanning
4023//   for oops.
4024// . When we scan the objects, we'll be both reading and setting
4025//   marks in the marking bit map, so we'll need the marking bit map.
4026// . For protecting _collector_state transitions, we take the CGC_lock.
4027//   Note that any races in the reading of of card table entries by the
4028//   CMS thread on the one hand and the clearing of those entries by the
4029//   VM thread or the setting of those entries by the mutator threads on the
4030//   other are quite benign. However, for efficiency it makes sense to keep
4031//   the VM thread from racing with the CMS thread while the latter is
4032//   dirty card info to the modUnionTable. We therefore also use the
4033//   CGC_lock to protect the reading of the card table and the mod union
4034//   table by the CM thread.
4035// . We run concurrently with mutator updates, so scanning
4036//   needs to be done carefully  -- we should not try to scan
4037//   potentially uninitialized objects.
4038//
4039// Locking strategy: While holding the CGC_lock, we scan over and
4040// reset a maximal dirty range of the mod union / card tables, then lock
4041// the free_list_lock and bitmap lock to do a full marking, then
4042// release these locks; and repeat the cycle. This allows for a
4043// certain amount of fairness in the sharing of these locks between
4044// the CMS collector on the one hand, and the VM thread and the
4045// mutators on the other.
4046
4047// NOTE: preclean_mod_union_table() and preclean_card_table()
4048// further below are largely identical; if you need to modify
4049// one of these methods, please check the other method too.
4050
4051size_t CMSCollector::preclean_mod_union_table(
4052  ConcurrentMarkSweepGeneration* gen,
4053  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4054  verify_work_stacks_empty();
4055  verify_overflow_empty();
4056
4057  // strategy: starting with the first card, accumulate contiguous
4058  // ranges of dirty cards; clear these cards, then scan the region
4059  // covered by these cards.
4060
4061  // Since all of the MUT is committed ahead, we can just use
4062  // that, in case the generations expand while we are precleaning.
4063  // It might also be fine to just use the committed part of the
4064  // generation, but we might potentially miss cards when the
4065  // generation is rapidly expanding while we are in the midst
4066  // of precleaning.
4067  HeapWord* startAddr = gen->reserved().start();
4068  HeapWord* endAddr   = gen->reserved().end();
4069
4070  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4071
4072  size_t numDirtyCards, cumNumDirtyCards;
4073  HeapWord *nextAddr, *lastAddr;
4074  for (cumNumDirtyCards = numDirtyCards = 0,
4075       nextAddr = lastAddr = startAddr;
4076       nextAddr < endAddr;
4077       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4078
4079    ResourceMark rm;
4080    HandleMark   hm;
4081
4082    MemRegion dirtyRegion;
4083    {
4084      stopTimer();
4085      // Potential yield point
4086      CMSTokenSync ts(true);
4087      startTimer();
4088      sample_eden();
4089      // Get dirty region starting at nextOffset (inclusive),
4090      // simultaneously clearing it.
4091      dirtyRegion =
4092        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4093      assert(dirtyRegion.start() >= nextAddr,
4094             "returned region inconsistent?");
4095    }
4096    // Remember where the next search should begin.
4097    // The returned region (if non-empty) is a right open interval,
4098    // so lastOffset is obtained from the right end of that
4099    // interval.
4100    lastAddr = dirtyRegion.end();
4101    // Should do something more transparent and less hacky XXX
4102    numDirtyCards =
4103      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4104
4105    // We'll scan the cards in the dirty region (with periodic
4106    // yields for foreground GC as needed).
4107    if (!dirtyRegion.is_empty()) {
4108      assert(numDirtyCards > 0, "consistency check");
4109      HeapWord* stop_point = NULL;
4110      stopTimer();
4111      // Potential yield point
4112      CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4113                               bitMapLock());
4114      startTimer();
4115      {
4116        verify_work_stacks_empty();
4117        verify_overflow_empty();
4118        sample_eden();
4119        stop_point =
4120          gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4121      }
4122      if (stop_point != NULL) {
4123        // The careful iteration stopped early either because it found an
4124        // uninitialized object, or because we were in the midst of an
4125        // "abortable preclean", which should now be aborted. Redirty
4126        // the bits corresponding to the partially-scanned or unscanned
4127        // cards. We'll either restart at the next block boundary or
4128        // abort the preclean.
4129        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4130               "Should only be AbortablePreclean.");
4131        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4132        if (should_abort_preclean()) {
4133          break; // out of preclean loop
4134        } else {
4135          // Compute the next address at which preclean should pick up;
4136          // might need bitMapLock in order to read P-bits.
4137          lastAddr = next_card_start_after_block(stop_point);
4138        }
4139      }
4140    } else {
4141      assert(lastAddr == endAddr, "consistency check");
4142      assert(numDirtyCards == 0, "consistency check");
4143      break;
4144    }
4145  }
4146  verify_work_stacks_empty();
4147  verify_overflow_empty();
4148  return cumNumDirtyCards;
4149}
4150
4151// NOTE: preclean_mod_union_table() above and preclean_card_table()
4152// below are largely identical; if you need to modify
4153// one of these methods, please check the other method too.
4154
4155size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4156  ScanMarkedObjectsAgainCarefullyClosure* cl) {
4157  // strategy: it's similar to precleamModUnionTable above, in that
4158  // we accumulate contiguous ranges of dirty cards, mark these cards
4159  // precleaned, then scan the region covered by these cards.
4160  HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4161  HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4162
4163  cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4164
4165  size_t numDirtyCards, cumNumDirtyCards;
4166  HeapWord *lastAddr, *nextAddr;
4167
4168  for (cumNumDirtyCards = numDirtyCards = 0,
4169       nextAddr = lastAddr = startAddr;
4170       nextAddr < endAddr;
4171       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4172
4173    ResourceMark rm;
4174    HandleMark   hm;
4175
4176    MemRegion dirtyRegion;
4177    {
4178      // See comments in "Precleaning notes" above on why we
4179      // do this locking. XXX Could the locking overheads be
4180      // too high when dirty cards are sparse? [I don't think so.]
4181      stopTimer();
4182      CMSTokenSync x(true); // is cms thread
4183      startTimer();
4184      sample_eden();
4185      // Get and clear dirty region from card table
4186      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4187                                    MemRegion(nextAddr, endAddr),
4188                                    true,
4189                                    CardTableModRefBS::precleaned_card_val());
4190
4191      assert(dirtyRegion.start() >= nextAddr,
4192             "returned region inconsistent?");
4193    }
4194    lastAddr = dirtyRegion.end();
4195    numDirtyCards =
4196      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4197
4198    if (!dirtyRegion.is_empty()) {
4199      stopTimer();
4200      CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4201      startTimer();
4202      sample_eden();
4203      verify_work_stacks_empty();
4204      verify_overflow_empty();
4205      HeapWord* stop_point =
4206        gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4207      if (stop_point != NULL) {
4208        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4209               "Should only be AbortablePreclean.");
4210        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4211        if (should_abort_preclean()) {
4212          break; // out of preclean loop
4213        } else {
4214          // Compute the next address at which preclean should pick up.
4215          lastAddr = next_card_start_after_block(stop_point);
4216        }
4217      }
4218    } else {
4219      break;
4220    }
4221  }
4222  verify_work_stacks_empty();
4223  verify_overflow_empty();
4224  return cumNumDirtyCards;
4225}
4226
4227class PrecleanKlassClosure : public KlassClosure {
4228  KlassToOopClosure _cm_klass_closure;
4229 public:
4230  PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4231  void do_klass(Klass* k) {
4232    if (k->has_accumulated_modified_oops()) {
4233      k->clear_accumulated_modified_oops();
4234
4235      _cm_klass_closure.do_klass(k);
4236    }
4237  }
4238};
4239
4240// The freelist lock is needed to prevent asserts, is it really needed?
4241void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4242
4243  cl->set_freelistLock(freelistLock);
4244
4245  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4246
4247  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4248  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4249  PrecleanKlassClosure preclean_klass_closure(cl);
4250  ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4251
4252  verify_work_stacks_empty();
4253  verify_overflow_empty();
4254}
4255
4256void CMSCollector::checkpointRootsFinal() {
4257  assert(_collectorState == FinalMarking, "incorrect state transition?");
4258  check_correct_thread_executing();
4259  // world is stopped at this checkpoint
4260  assert(SafepointSynchronize::is_at_safepoint(),
4261         "world should be stopped");
4262  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4263
4264  verify_work_stacks_empty();
4265  verify_overflow_empty();
4266
4267  if (PrintGCDetails) {
4268    gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4269                        _young_gen->used() / K,
4270                        _young_gen->capacity() / K);
4271  }
4272  {
4273    if (CMSScavengeBeforeRemark) {
4274      GenCollectedHeap* gch = GenCollectedHeap::heap();
4275      // Temporarily set flag to false, GCH->do_collection will
4276      // expect it to be false and set to true
4277      FlagSetting fl(gch->_is_gc_active, false);
4278      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4279        PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4280      gch->do_collection(true,                      // full (i.e. force, see below)
4281                         false,                     // !clear_all_soft_refs
4282                         0,                         // size
4283                         false,                     // is_tlab
4284                         GenCollectedHeap::YoungGen // type
4285        );
4286    }
4287    FreelistLocker x(this);
4288    MutexLockerEx y(bitMapLock(),
4289                    Mutex::_no_safepoint_check_flag);
4290    checkpointRootsFinalWork();
4291  }
4292  verify_work_stacks_empty();
4293  verify_overflow_empty();
4294}
4295
4296void CMSCollector::checkpointRootsFinalWork() {
4297  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4298
4299  assert(haveFreelistLocks(), "must have free list locks");
4300  assert_lock_strong(bitMapLock());
4301
4302  ResourceMark rm;
4303  HandleMark   hm;
4304
4305  GenCollectedHeap* gch = GenCollectedHeap::heap();
4306
4307  if (should_unload_classes()) {
4308    CodeCache::gc_prologue();
4309  }
4310  assert(haveFreelistLocks(), "must have free list locks");
4311  assert_lock_strong(bitMapLock());
4312
4313  // We might assume that we need not fill TLAB's when
4314  // CMSScavengeBeforeRemark is set, because we may have just done
4315  // a scavenge which would have filled all TLAB's -- and besides
4316  // Eden would be empty. This however may not always be the case --
4317  // for instance although we asked for a scavenge, it may not have
4318  // happened because of a JNI critical section. We probably need
4319  // a policy for deciding whether we can in that case wait until
4320  // the critical section releases and then do the remark following
4321  // the scavenge, and skip it here. In the absence of that policy,
4322  // or of an indication of whether the scavenge did indeed occur,
4323  // we cannot rely on TLAB's having been filled and must do
4324  // so here just in case a scavenge did not happen.
4325  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4326  // Update the saved marks which may affect the root scans.
4327  gch->save_marks();
4328
4329  if (CMSPrintEdenSurvivorChunks) {
4330    print_eden_and_survivor_chunk_arrays();
4331  }
4332
4333  {
4334    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4335
4336    // Note on the role of the mod union table:
4337    // Since the marker in "markFromRoots" marks concurrently with
4338    // mutators, it is possible for some reachable objects not to have been
4339    // scanned. For instance, an only reference to an object A was
4340    // placed in object B after the marker scanned B. Unless B is rescanned,
4341    // A would be collected. Such updates to references in marked objects
4342    // are detected via the mod union table which is the set of all cards
4343    // dirtied since the first checkpoint in this GC cycle and prior to
4344    // the most recent young generation GC, minus those cleaned up by the
4345    // concurrent precleaning.
4346    if (CMSParallelRemarkEnabled) {
4347      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4348      do_remark_parallel();
4349    } else {
4350      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4351                  _gc_timer_cm, _gc_tracer_cm->gc_id());
4352      do_remark_non_parallel();
4353    }
4354  }
4355  verify_work_stacks_empty();
4356  verify_overflow_empty();
4357
4358  {
4359    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4360    refProcessingWork();
4361  }
4362  verify_work_stacks_empty();
4363  verify_overflow_empty();
4364
4365  if (should_unload_classes()) {
4366    CodeCache::gc_epilogue();
4367  }
4368  JvmtiExport::gc_epilogue();
4369
4370  // If we encountered any (marking stack / work queue) overflow
4371  // events during the current CMS cycle, take appropriate
4372  // remedial measures, where possible, so as to try and avoid
4373  // recurrence of that condition.
4374  assert(_markStack.isEmpty(), "No grey objects");
4375  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4376                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4377  if (ser_ovflw > 0) {
4378    if (PrintCMSStatistics != 0) {
4379      gclog_or_tty->print_cr("Marking stack overflow (benign) "
4380        "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
4381        ", kac_preclean=" SIZE_FORMAT ")",
4382        _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4383        _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4384    }
4385    _markStack.expand();
4386    _ser_pmc_remark_ovflw = 0;
4387    _ser_pmc_preclean_ovflw = 0;
4388    _ser_kac_preclean_ovflw = 0;
4389    _ser_kac_ovflw = 0;
4390  }
4391  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4392    if (PrintCMSStatistics != 0) {
4393      gclog_or_tty->print_cr("Work queue overflow (benign) "
4394        "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4395        _par_pmc_remark_ovflw, _par_kac_ovflw);
4396    }
4397    _par_pmc_remark_ovflw = 0;
4398    _par_kac_ovflw = 0;
4399  }
4400  if (PrintCMSStatistics != 0) {
4401     if (_markStack._hit_limit > 0) {
4402       gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4403                              _markStack._hit_limit);
4404     }
4405     if (_markStack._failed_double > 0) {
4406       gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
4407                              " current capacity " SIZE_FORMAT,
4408                              _markStack._failed_double,
4409                              _markStack.capacity());
4410     }
4411  }
4412  _markStack._hit_limit = 0;
4413  _markStack._failed_double = 0;
4414
4415  if ((VerifyAfterGC || VerifyDuringGC) &&
4416      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4417    verify_after_remark();
4418  }
4419
4420  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4421
4422  // Change under the freelistLocks.
4423  _collectorState = Sweeping;
4424  // Call isAllClear() under bitMapLock
4425  assert(_modUnionTable.isAllClear(),
4426      "Should be clear by end of the final marking");
4427  assert(_ct->klass_rem_set()->mod_union_is_clear(),
4428      "Should be clear by end of the final marking");
4429}
4430
4431void CMSParInitialMarkTask::work(uint worker_id) {
4432  elapsedTimer _timer;
4433  ResourceMark rm;
4434  HandleMark   hm;
4435
4436  // ---------- scan from roots --------------
4437  _timer.start();
4438  GenCollectedHeap* gch = GenCollectedHeap::heap();
4439  Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4440
4441  // ---------- young gen roots --------------
4442  {
4443    work_on_young_gen_roots(worker_id, &par_mri_cl);
4444    _timer.stop();
4445    if (PrintCMSStatistics != 0) {
4446      gclog_or_tty->print_cr(
4447        "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4448        worker_id, _timer.seconds());
4449    }
4450  }
4451
4452  // ---------- remaining roots --------------
4453  _timer.reset();
4454  _timer.start();
4455
4456  CLDToOopClosure cld_closure(&par_mri_cl, true);
4457
4458  gch->gen_process_roots(_strong_roots_scope,
4459                         GenCollectedHeap::OldGen,
4460                         false,     // yg was scanned above
4461                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4462                         _collector->should_unload_classes(),
4463                         &par_mri_cl,
4464                         NULL,
4465                         &cld_closure);
4466  assert(_collector->should_unload_classes()
4467         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4468         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4469  _timer.stop();
4470  if (PrintCMSStatistics != 0) {
4471    gclog_or_tty->print_cr(
4472      "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4473      worker_id, _timer.seconds());
4474  }
4475}
4476
4477// Parallel remark task
4478class CMSParRemarkTask: public CMSParMarkTask {
4479  CompactibleFreeListSpace* _cms_space;
4480
4481  // The per-thread work queues, available here for stealing.
4482  OopTaskQueueSet*       _task_queues;
4483  ParallelTaskTerminator _term;
4484  StrongRootsScope*      _strong_roots_scope;
4485
4486 public:
4487  // A value of 0 passed to n_workers will cause the number of
4488  // workers to be taken from the active workers in the work gang.
4489  CMSParRemarkTask(CMSCollector* collector,
4490                   CompactibleFreeListSpace* cms_space,
4491                   uint n_workers, WorkGang* workers,
4492                   OopTaskQueueSet* task_queues,
4493                   StrongRootsScope* strong_roots_scope):
4494    CMSParMarkTask("Rescan roots and grey objects in parallel",
4495                   collector, n_workers),
4496    _cms_space(cms_space),
4497    _task_queues(task_queues),
4498    _term(n_workers, task_queues),
4499    _strong_roots_scope(strong_roots_scope) { }
4500
4501  OopTaskQueueSet* task_queues() { return _task_queues; }
4502
4503  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4504
4505  ParallelTaskTerminator* terminator() { return &_term; }
4506  uint n_workers() { return _n_workers; }
4507
4508  void work(uint worker_id);
4509
4510 private:
4511  // ... of  dirty cards in old space
4512  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4513                                  Par_MarkRefsIntoAndScanClosure* cl);
4514
4515  // ... work stealing for the above
4516  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4517};
4518
4519class RemarkKlassClosure : public KlassClosure {
4520  KlassToOopClosure _cm_klass_closure;
4521 public:
4522  RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4523  void do_klass(Klass* k) {
4524    // Check if we have modified any oops in the Klass during the concurrent marking.
4525    if (k->has_accumulated_modified_oops()) {
4526      k->clear_accumulated_modified_oops();
4527
4528      // We could have transfered the current modified marks to the accumulated marks,
4529      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4530    } else if (k->has_modified_oops()) {
4531      // Don't clear anything, this info is needed by the next young collection.
4532    } else {
4533      // No modified oops in the Klass.
4534      return;
4535    }
4536
4537    // The klass has modified fields, need to scan the klass.
4538    _cm_klass_closure.do_klass(k);
4539  }
4540};
4541
4542void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4543  ParNewGeneration* young_gen = _collector->_young_gen;
4544  ContiguousSpace* eden_space = young_gen->eden();
4545  ContiguousSpace* from_space = young_gen->from();
4546  ContiguousSpace* to_space   = young_gen->to();
4547
4548  HeapWord** eca = _collector->_eden_chunk_array;
4549  size_t     ect = _collector->_eden_chunk_index;
4550  HeapWord** sca = _collector->_survivor_chunk_array;
4551  size_t     sct = _collector->_survivor_chunk_index;
4552
4553  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4554  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4555
4556  do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4557  do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4558  do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4559}
4560
4561// work_queue(i) is passed to the closure
4562// Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4563// also is passed to do_dirty_card_rescan_tasks() and to
4564// do_work_steal() to select the i-th task_queue.
4565
4566void CMSParRemarkTask::work(uint worker_id) {
4567  elapsedTimer _timer;
4568  ResourceMark rm;
4569  HandleMark   hm;
4570
4571  // ---------- rescan from roots --------------
4572  _timer.start();
4573  GenCollectedHeap* gch = GenCollectedHeap::heap();
4574  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4575    _collector->_span, _collector->ref_processor(),
4576    &(_collector->_markBitMap),
4577    work_queue(worker_id));
4578
4579  // Rescan young gen roots first since these are likely
4580  // coarsely partitioned and may, on that account, constitute
4581  // the critical path; thus, it's best to start off that
4582  // work first.
4583  // ---------- young gen roots --------------
4584  {
4585    work_on_young_gen_roots(worker_id, &par_mrias_cl);
4586    _timer.stop();
4587    if (PrintCMSStatistics != 0) {
4588      gclog_or_tty->print_cr(
4589        "Finished young gen rescan work in %dth thread: %3.3f sec",
4590        worker_id, _timer.seconds());
4591    }
4592  }
4593
4594  // ---------- remaining roots --------------
4595  _timer.reset();
4596  _timer.start();
4597  gch->gen_process_roots(_strong_roots_scope,
4598                         GenCollectedHeap::OldGen,
4599                         false,     // yg was scanned above
4600                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4601                         _collector->should_unload_classes(),
4602                         &par_mrias_cl,
4603                         NULL,
4604                         NULL);     // The dirty klasses will be handled below
4605
4606  assert(_collector->should_unload_classes()
4607         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4608         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4609  _timer.stop();
4610  if (PrintCMSStatistics != 0) {
4611    gclog_or_tty->print_cr(
4612      "Finished remaining root rescan work in %dth thread: %3.3f sec",
4613      worker_id, _timer.seconds());
4614  }
4615
4616  // ---------- unhandled CLD scanning ----------
4617  if (worker_id == 0) { // Single threaded at the moment.
4618    _timer.reset();
4619    _timer.start();
4620
4621    // Scan all new class loader data objects and new dependencies that were
4622    // introduced during concurrent marking.
4623    ResourceMark rm;
4624    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4625    for (int i = 0; i < array->length(); i++) {
4626      par_mrias_cl.do_class_loader_data(array->at(i));
4627    }
4628
4629    // We don't need to keep track of new CLDs anymore.
4630    ClassLoaderDataGraph::remember_new_clds(false);
4631
4632    _timer.stop();
4633    if (PrintCMSStatistics != 0) {
4634      gclog_or_tty->print_cr(
4635          "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4636          worker_id, _timer.seconds());
4637    }
4638  }
4639
4640  // ---------- dirty klass scanning ----------
4641  if (worker_id == 0) { // Single threaded at the moment.
4642    _timer.reset();
4643    _timer.start();
4644
4645    // Scan all classes that was dirtied during the concurrent marking phase.
4646    RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4647    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4648
4649    _timer.stop();
4650    if (PrintCMSStatistics != 0) {
4651      gclog_or_tty->print_cr(
4652          "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4653          worker_id, _timer.seconds());
4654    }
4655  }
4656
4657  // We might have added oops to ClassLoaderData::_handles during the
4658  // concurrent marking phase. These oops point to newly allocated objects
4659  // that are guaranteed to be kept alive. Either by the direct allocation
4660  // code, or when the young collector processes the roots. Hence,
4661  // we don't have to revisit the _handles block during the remark phase.
4662
4663  // ---------- rescan dirty cards ------------
4664  _timer.reset();
4665  _timer.start();
4666
4667  // Do the rescan tasks for each of the two spaces
4668  // (cms_space) in turn.
4669  // "worker_id" is passed to select the task_queue for "worker_id"
4670  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4671  _timer.stop();
4672  if (PrintCMSStatistics != 0) {
4673    gclog_or_tty->print_cr(
4674      "Finished dirty card rescan work in %dth thread: %3.3f sec",
4675      worker_id, _timer.seconds());
4676  }
4677
4678  // ---------- steal work from other threads ...
4679  // ---------- ... and drain overflow list.
4680  _timer.reset();
4681  _timer.start();
4682  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4683  _timer.stop();
4684  if (PrintCMSStatistics != 0) {
4685    gclog_or_tty->print_cr(
4686      "Finished work stealing in %dth thread: %3.3f sec",
4687      worker_id, _timer.seconds());
4688  }
4689}
4690
4691// Note that parameter "i" is not used.
4692void
4693CMSParMarkTask::do_young_space_rescan(uint worker_id,
4694  OopsInGenClosure* cl, ContiguousSpace* space,
4695  HeapWord** chunk_array, size_t chunk_top) {
4696  // Until all tasks completed:
4697  // . claim an unclaimed task
4698  // . compute region boundaries corresponding to task claimed
4699  //   using chunk_array
4700  // . par_oop_iterate(cl) over that region
4701
4702  ResourceMark rm;
4703  HandleMark   hm;
4704
4705  SequentialSubTasksDone* pst = space->par_seq_tasks();
4706
4707  uint nth_task = 0;
4708  uint n_tasks  = pst->n_tasks();
4709
4710  if (n_tasks > 0) {
4711    assert(pst->valid(), "Uninitialized use?");
4712    HeapWord *start, *end;
4713    while (!pst->is_task_claimed(/* reference */ nth_task)) {
4714      // We claimed task # nth_task; compute its boundaries.
4715      if (chunk_top == 0) {  // no samples were taken
4716        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4717        start = space->bottom();
4718        end   = space->top();
4719      } else if (nth_task == 0) {
4720        start = space->bottom();
4721        end   = chunk_array[nth_task];
4722      } else if (nth_task < (uint)chunk_top) {
4723        assert(nth_task >= 1, "Control point invariant");
4724        start = chunk_array[nth_task - 1];
4725        end   = chunk_array[nth_task];
4726      } else {
4727        assert(nth_task == (uint)chunk_top, "Control point invariant");
4728        start = chunk_array[chunk_top - 1];
4729        end   = space->top();
4730      }
4731      MemRegion mr(start, end);
4732      // Verify that mr is in space
4733      assert(mr.is_empty() || space->used_region().contains(mr),
4734             "Should be in space");
4735      // Verify that "start" is an object boundary
4736      assert(mr.is_empty() || oop(mr.start())->is_oop(),
4737             "Should be an oop");
4738      space->par_oop_iterate(mr, cl);
4739    }
4740    pst->all_tasks_completed();
4741  }
4742}
4743
4744void
4745CMSParRemarkTask::do_dirty_card_rescan_tasks(
4746  CompactibleFreeListSpace* sp, int i,
4747  Par_MarkRefsIntoAndScanClosure* cl) {
4748  // Until all tasks completed:
4749  // . claim an unclaimed task
4750  // . compute region boundaries corresponding to task claimed
4751  // . transfer dirty bits ct->mut for that region
4752  // . apply rescanclosure to dirty mut bits for that region
4753
4754  ResourceMark rm;
4755  HandleMark   hm;
4756
4757  OopTaskQueue* work_q = work_queue(i);
4758  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4759  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4760  // CAUTION: This closure has state that persists across calls to
4761  // the work method dirty_range_iterate_clear() in that it has
4762  // embedded in it a (subtype of) UpwardsObjectClosure. The
4763  // use of that state in the embedded UpwardsObjectClosure instance
4764  // assumes that the cards are always iterated (even if in parallel
4765  // by several threads) in monotonically increasing order per each
4766  // thread. This is true of the implementation below which picks
4767  // card ranges (chunks) in monotonically increasing order globally
4768  // and, a-fortiori, in monotonically increasing order per thread
4769  // (the latter order being a subsequence of the former).
4770  // If the work code below is ever reorganized into a more chaotic
4771  // work-partitioning form than the current "sequential tasks"
4772  // paradigm, the use of that persistent state will have to be
4773  // revisited and modified appropriately. See also related
4774  // bug 4756801 work on which should examine this code to make
4775  // sure that the changes there do not run counter to the
4776  // assumptions made here and necessary for correctness and
4777  // efficiency. Note also that this code might yield inefficient
4778  // behavior in the case of very large objects that span one or
4779  // more work chunks. Such objects would potentially be scanned
4780  // several times redundantly. Work on 4756801 should try and
4781  // address that performance anomaly if at all possible. XXX
4782  MemRegion  full_span  = _collector->_span;
4783  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4784  MarkFromDirtyCardsClosure
4785    greyRescanClosure(_collector, full_span, // entire span of interest
4786                      sp, bm, work_q, cl);
4787
4788  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4789  assert(pst->valid(), "Uninitialized use?");
4790  uint nth_task = 0;
4791  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4792  MemRegion span = sp->used_region();
4793  HeapWord* start_addr = span.start();
4794  HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4795                                           alignment);
4796  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4797  assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4798         start_addr, "Check alignment");
4799  assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4800         chunk_size, "Check alignment");
4801
4802  while (!pst->is_task_claimed(/* reference */ nth_task)) {
4803    // Having claimed the nth_task, compute corresponding mem-region,
4804    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4805    // The alignment restriction ensures that we do not need any
4806    // synchronization with other gang-workers while setting or
4807    // clearing bits in thus chunk of the MUT.
4808    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4809                                    start_addr + (nth_task+1)*chunk_size);
4810    // The last chunk's end might be way beyond end of the
4811    // used region. In that case pull back appropriately.
4812    if (this_span.end() > end_addr) {
4813      this_span.set_end(end_addr);
4814      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4815    }
4816    // Iterate over the dirty cards covering this chunk, marking them
4817    // precleaned, and setting the corresponding bits in the mod union
4818    // table. Since we have been careful to partition at Card and MUT-word
4819    // boundaries no synchronization is needed between parallel threads.
4820    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4821                                                 &modUnionClosure);
4822
4823    // Having transferred these marks into the modUnionTable,
4824    // rescan the marked objects on the dirty cards in the modUnionTable.
4825    // Even if this is at a synchronous collection, the initial marking
4826    // may have been done during an asynchronous collection so there
4827    // may be dirty bits in the mod-union table.
4828    _collector->_modUnionTable.dirty_range_iterate_clear(
4829                  this_span, &greyRescanClosure);
4830    _collector->_modUnionTable.verifyNoOneBitsInRange(
4831                                 this_span.start(),
4832                                 this_span.end());
4833  }
4834  pst->all_tasks_completed();  // declare that i am done
4835}
4836
4837// . see if we can share work_queues with ParNew? XXX
4838void
4839CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4840                                int* seed) {
4841  OopTaskQueue* work_q = work_queue(i);
4842  NOT_PRODUCT(int num_steals = 0;)
4843  oop obj_to_scan;
4844  CMSBitMap* bm = &(_collector->_markBitMap);
4845
4846  while (true) {
4847    // Completely finish any left over work from (an) earlier round(s)
4848    cl->trim_queue(0);
4849    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4850                                         (size_t)ParGCDesiredObjsFromOverflowList);
4851    // Now check if there's any work in the overflow list
4852    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4853    // only affects the number of attempts made to get work from the
4854    // overflow list and does not affect the number of workers.  Just
4855    // pass ParallelGCThreads so this behavior is unchanged.
4856    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4857                                                work_q,
4858                                                ParallelGCThreads)) {
4859      // found something in global overflow list;
4860      // not yet ready to go stealing work from others.
4861      // We'd like to assert(work_q->size() != 0, ...)
4862      // because we just took work from the overflow list,
4863      // but of course we can't since all of that could have
4864      // been already stolen from us.
4865      // "He giveth and He taketh away."
4866      continue;
4867    }
4868    // Verify that we have no work before we resort to stealing
4869    assert(work_q->size() == 0, "Have work, shouldn't steal");
4870    // Try to steal from other queues that have work
4871    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4872      NOT_PRODUCT(num_steals++;)
4873      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4874      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4875      // Do scanning work
4876      obj_to_scan->oop_iterate(cl);
4877      // Loop around, finish this work, and try to steal some more
4878    } else if (terminator()->offer_termination()) {
4879        break;  // nirvana from the infinite cycle
4880    }
4881  }
4882  NOT_PRODUCT(
4883    if (PrintCMSStatistics != 0) {
4884      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4885    }
4886  )
4887  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4888         "Else our work is not yet done");
4889}
4890
4891// Record object boundaries in _eden_chunk_array by sampling the eden
4892// top in the slow-path eden object allocation code path and record
4893// the boundaries, if CMSEdenChunksRecordAlways is true. If
4894// CMSEdenChunksRecordAlways is false, we use the other asynchronous
4895// sampling in sample_eden() that activates during the part of the
4896// preclean phase.
4897void CMSCollector::sample_eden_chunk() {
4898  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4899    if (_eden_chunk_lock->try_lock()) {
4900      // Record a sample. This is the critical section. The contents
4901      // of the _eden_chunk_array have to be non-decreasing in the
4902      // address order.
4903      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4904      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4905             "Unexpected state of Eden");
4906      if (_eden_chunk_index == 0 ||
4907          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4908           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4909                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4910        _eden_chunk_index++;  // commit sample
4911      }
4912      _eden_chunk_lock->unlock();
4913    }
4914  }
4915}
4916
4917// Return a thread-local PLAB recording array, as appropriate.
4918void* CMSCollector::get_data_recorder(int thr_num) {
4919  if (_survivor_plab_array != NULL &&
4920      (CMSPLABRecordAlways ||
4921       (_collectorState > Marking && _collectorState < FinalMarking))) {
4922    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4923    ChunkArray* ca = &_survivor_plab_array[thr_num];
4924    ca->reset();   // clear it so that fresh data is recorded
4925    return (void*) ca;
4926  } else {
4927    return NULL;
4928  }
4929}
4930
4931// Reset all the thread-local PLAB recording arrays
4932void CMSCollector::reset_survivor_plab_arrays() {
4933  for (uint i = 0; i < ParallelGCThreads; i++) {
4934    _survivor_plab_array[i].reset();
4935  }
4936}
4937
4938// Merge the per-thread plab arrays into the global survivor chunk
4939// array which will provide the partitioning of the survivor space
4940// for CMS initial scan and rescan.
4941void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4942                                              int no_of_gc_threads) {
4943  assert(_survivor_plab_array  != NULL, "Error");
4944  assert(_survivor_chunk_array != NULL, "Error");
4945  assert(_collectorState == FinalMarking ||
4946         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4947  for (int j = 0; j < no_of_gc_threads; j++) {
4948    _cursor[j] = 0;
4949  }
4950  HeapWord* top = surv->top();
4951  size_t i;
4952  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4953    HeapWord* min_val = top;          // Higher than any PLAB address
4954    uint      min_tid = 0;            // position of min_val this round
4955    for (int j = 0; j < no_of_gc_threads; j++) {
4956      ChunkArray* cur_sca = &_survivor_plab_array[j];
4957      if (_cursor[j] == cur_sca->end()) {
4958        continue;
4959      }
4960      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4961      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4962      assert(surv->used_region().contains(cur_val), "Out of bounds value");
4963      if (cur_val < min_val) {
4964        min_tid = j;
4965        min_val = cur_val;
4966      } else {
4967        assert(cur_val < top, "All recorded addresses should be less");
4968      }
4969    }
4970    // At this point min_val and min_tid are respectively
4971    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4972    // and the thread (j) that witnesses that address.
4973    // We record this address in the _survivor_chunk_array[i]
4974    // and increment _cursor[min_tid] prior to the next round i.
4975    if (min_val == top) {
4976      break;
4977    }
4978    _survivor_chunk_array[i] = min_val;
4979    _cursor[min_tid]++;
4980  }
4981  // We are all done; record the size of the _survivor_chunk_array
4982  _survivor_chunk_index = i; // exclusive: [0, i)
4983  if (PrintCMSStatistics > 0) {
4984    gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4985  }
4986  // Verify that we used up all the recorded entries
4987  #ifdef ASSERT
4988    size_t total = 0;
4989    for (int j = 0; j < no_of_gc_threads; j++) {
4990      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4991      total += _cursor[j];
4992    }
4993    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4994    // Check that the merged array is in sorted order
4995    if (total > 0) {
4996      for (size_t i = 0; i < total - 1; i++) {
4997        if (PrintCMSStatistics > 0) {
4998          gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4999                              i, p2i(_survivor_chunk_array[i]));
5000        }
5001        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5002               "Not sorted");
5003      }
5004    }
5005  #endif // ASSERT
5006}
5007
5008// Set up the space's par_seq_tasks structure for work claiming
5009// for parallel initial scan and rescan of young gen.
5010// See ParRescanTask where this is currently used.
5011void
5012CMSCollector::
5013initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5014  assert(n_threads > 0, "Unexpected n_threads argument");
5015
5016  // Eden space
5017  if (!_young_gen->eden()->is_empty()) {
5018    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5019    assert(!pst->valid(), "Clobbering existing data?");
5020    // Each valid entry in [0, _eden_chunk_index) represents a task.
5021    size_t n_tasks = _eden_chunk_index + 1;
5022    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5023    // Sets the condition for completion of the subtask (how many threads
5024    // need to finish in order to be done).
5025    pst->set_n_threads(n_threads);
5026    pst->set_n_tasks((int)n_tasks);
5027  }
5028
5029  // Merge the survivor plab arrays into _survivor_chunk_array
5030  if (_survivor_plab_array != NULL) {
5031    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5032  } else {
5033    assert(_survivor_chunk_index == 0, "Error");
5034  }
5035
5036  // To space
5037  {
5038    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5039    assert(!pst->valid(), "Clobbering existing data?");
5040    // Sets the condition for completion of the subtask (how many threads
5041    // need to finish in order to be done).
5042    pst->set_n_threads(n_threads);
5043    pst->set_n_tasks(1);
5044    assert(pst->valid(), "Error");
5045  }
5046
5047  // From space
5048  {
5049    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5050    assert(!pst->valid(), "Clobbering existing data?");
5051    size_t n_tasks = _survivor_chunk_index + 1;
5052    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5053    // Sets the condition for completion of the subtask (how many threads
5054    // need to finish in order to be done).
5055    pst->set_n_threads(n_threads);
5056    pst->set_n_tasks((int)n_tasks);
5057    assert(pst->valid(), "Error");
5058  }
5059}
5060
5061// Parallel version of remark
5062void CMSCollector::do_remark_parallel() {
5063  GenCollectedHeap* gch = GenCollectedHeap::heap();
5064  WorkGang* workers = gch->workers();
5065  assert(workers != NULL, "Need parallel worker threads.");
5066  // Choose to use the number of GC workers most recently set
5067  // into "active_workers".
5068  uint n_workers = workers->active_workers();
5069
5070  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5071
5072  StrongRootsScope srs(n_workers);
5073
5074  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5075
5076  // We won't be iterating over the cards in the card table updating
5077  // the younger_gen cards, so we shouldn't call the following else
5078  // the verification code as well as subsequent younger_refs_iterate
5079  // code would get confused. XXX
5080  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5081
5082  // The young gen rescan work will not be done as part of
5083  // process_roots (which currently doesn't know how to
5084  // parallelize such a scan), but rather will be broken up into
5085  // a set of parallel tasks (via the sampling that the [abortable]
5086  // preclean phase did of eden, plus the [two] tasks of
5087  // scanning the [two] survivor spaces. Further fine-grain
5088  // parallelization of the scanning of the survivor spaces
5089  // themselves, and of precleaning of the younger gen itself
5090  // is deferred to the future.
5091  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5092
5093  // The dirty card rescan work is broken up into a "sequence"
5094  // of parallel tasks (per constituent space) that are dynamically
5095  // claimed by the parallel threads.
5096  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5097
5098  // It turns out that even when we're using 1 thread, doing the work in a
5099  // separate thread causes wide variance in run times.  We can't help this
5100  // in the multi-threaded case, but we special-case n=1 here to get
5101  // repeatable measurements of the 1-thread overhead of the parallel code.
5102  if (n_workers > 1) {
5103    // Make refs discovery MT-safe, if it isn't already: it may not
5104    // necessarily be so, since it's possible that we are doing
5105    // ST marking.
5106    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5107    workers->run_task(&tsk);
5108  } else {
5109    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5110    tsk.work(0);
5111  }
5112
5113  // restore, single-threaded for now, any preserved marks
5114  // as a result of work_q overflow
5115  restore_preserved_marks_if_any();
5116}
5117
5118// Non-parallel version of remark
5119void CMSCollector::do_remark_non_parallel() {
5120  ResourceMark rm;
5121  HandleMark   hm;
5122  GenCollectedHeap* gch = GenCollectedHeap::heap();
5123  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5124
5125  MarkRefsIntoAndScanClosure
5126    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5127             &_markStack, this,
5128             false /* should_yield */, false /* not precleaning */);
5129  MarkFromDirtyCardsClosure
5130    markFromDirtyCardsClosure(this, _span,
5131                              NULL,  // space is set further below
5132                              &_markBitMap, &_markStack, &mrias_cl);
5133  {
5134    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5135    // Iterate over the dirty cards, setting the corresponding bits in the
5136    // mod union table.
5137    {
5138      ModUnionClosure modUnionClosure(&_modUnionTable);
5139      _ct->ct_bs()->dirty_card_iterate(
5140                      _cmsGen->used_region(),
5141                      &modUnionClosure);
5142    }
5143    // Having transferred these marks into the modUnionTable, we just need
5144    // to rescan the marked objects on the dirty cards in the modUnionTable.
5145    // The initial marking may have been done during an asynchronous
5146    // collection so there may be dirty bits in the mod-union table.
5147    const int alignment =
5148      CardTableModRefBS::card_size * BitsPerWord;
5149    {
5150      // ... First handle dirty cards in CMS gen
5151      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5152      MemRegion ur = _cmsGen->used_region();
5153      HeapWord* lb = ur.start();
5154      HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5155      MemRegion cms_span(lb, ub);
5156      _modUnionTable.dirty_range_iterate_clear(cms_span,
5157                                               &markFromDirtyCardsClosure);
5158      verify_work_stacks_empty();
5159      if (PrintCMSStatistics != 0) {
5160        gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5161          markFromDirtyCardsClosure.num_dirty_cards());
5162      }
5163    }
5164  }
5165  if (VerifyDuringGC &&
5166      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5167    HandleMark hm;  // Discard invalid handles created during verification
5168    Universe::verify();
5169  }
5170  {
5171    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5172
5173    verify_work_stacks_empty();
5174
5175    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5176    StrongRootsScope srs(1);
5177
5178    gch->gen_process_roots(&srs,
5179                           GenCollectedHeap::OldGen,
5180                           true,  // younger gens as roots
5181                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
5182                           should_unload_classes(),
5183                           &mrias_cl,
5184                           NULL,
5185                           NULL); // The dirty klasses will be handled below
5186
5187    assert(should_unload_classes()
5188           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5189           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5190  }
5191
5192  {
5193    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5194
5195    verify_work_stacks_empty();
5196
5197    // Scan all class loader data objects that might have been introduced
5198    // during concurrent marking.
5199    ResourceMark rm;
5200    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5201    for (int i = 0; i < array->length(); i++) {
5202      mrias_cl.do_class_loader_data(array->at(i));
5203    }
5204
5205    // We don't need to keep track of new CLDs anymore.
5206    ClassLoaderDataGraph::remember_new_clds(false);
5207
5208    verify_work_stacks_empty();
5209  }
5210
5211  {
5212    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5213
5214    verify_work_stacks_empty();
5215
5216    RemarkKlassClosure remark_klass_closure(&mrias_cl);
5217    ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5218
5219    verify_work_stacks_empty();
5220  }
5221
5222  // We might have added oops to ClassLoaderData::_handles during the
5223  // concurrent marking phase. These oops point to newly allocated objects
5224  // that are guaranteed to be kept alive. Either by the direct allocation
5225  // code, or when the young collector processes the roots. Hence,
5226  // we don't have to revisit the _handles block during the remark phase.
5227
5228  verify_work_stacks_empty();
5229  // Restore evacuated mark words, if any, used for overflow list links
5230  if (!CMSOverflowEarlyRestoration) {
5231    restore_preserved_marks_if_any();
5232  }
5233  verify_overflow_empty();
5234}
5235
5236////////////////////////////////////////////////////////
5237// Parallel Reference Processing Task Proxy Class
5238////////////////////////////////////////////////////////
5239class AbstractGangTaskWOopQueues : public AbstractGangTask {
5240  OopTaskQueueSet*       _queues;
5241  ParallelTaskTerminator _terminator;
5242 public:
5243  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5244    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5245  ParallelTaskTerminator* terminator() { return &_terminator; }
5246  OopTaskQueueSet* queues() { return _queues; }
5247};
5248
5249class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5250  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5251  CMSCollector*          _collector;
5252  CMSBitMap*             _mark_bit_map;
5253  const MemRegion        _span;
5254  ProcessTask&           _task;
5255
5256public:
5257  CMSRefProcTaskProxy(ProcessTask&     task,
5258                      CMSCollector*    collector,
5259                      const MemRegion& span,
5260                      CMSBitMap*       mark_bit_map,
5261                      AbstractWorkGang* workers,
5262                      OopTaskQueueSet* task_queues):
5263    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5264      task_queues,
5265      workers->active_workers()),
5266    _task(task),
5267    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5268  {
5269    assert(_collector->_span.equals(_span) && !_span.is_empty(),
5270           "Inconsistency in _span");
5271  }
5272
5273  OopTaskQueueSet* task_queues() { return queues(); }
5274
5275  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5276
5277  void do_work_steal(int i,
5278                     CMSParDrainMarkingStackClosure* drain,
5279                     CMSParKeepAliveClosure* keep_alive,
5280                     int* seed);
5281
5282  virtual void work(uint worker_id);
5283};
5284
5285void CMSRefProcTaskProxy::work(uint worker_id) {
5286  ResourceMark rm;
5287  HandleMark hm;
5288  assert(_collector->_span.equals(_span), "Inconsistency in _span");
5289  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5290                                        _mark_bit_map,
5291                                        work_queue(worker_id));
5292  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5293                                                 _mark_bit_map,
5294                                                 work_queue(worker_id));
5295  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5296  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5297  if (_task.marks_oops_alive()) {
5298    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5299                  _collector->hash_seed(worker_id));
5300  }
5301  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5302  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5303}
5304
5305class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5306  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5307  EnqueueTask& _task;
5308
5309public:
5310  CMSRefEnqueueTaskProxy(EnqueueTask& task)
5311    : AbstractGangTask("Enqueue reference objects in parallel"),
5312      _task(task)
5313  { }
5314
5315  virtual void work(uint worker_id)
5316  {
5317    _task.work(worker_id);
5318  }
5319};
5320
5321CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5322  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5323   _span(span),
5324   _bit_map(bit_map),
5325   _work_queue(work_queue),
5326   _mark_and_push(collector, span, bit_map, work_queue),
5327   _low_water_mark(MIN2((work_queue->max_elems()/4),
5328                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5329{ }
5330
5331// . see if we can share work_queues with ParNew? XXX
5332void CMSRefProcTaskProxy::do_work_steal(int i,
5333  CMSParDrainMarkingStackClosure* drain,
5334  CMSParKeepAliveClosure* keep_alive,
5335  int* seed) {
5336  OopTaskQueue* work_q = work_queue(i);
5337  NOT_PRODUCT(int num_steals = 0;)
5338  oop obj_to_scan;
5339
5340  while (true) {
5341    // Completely finish any left over work from (an) earlier round(s)
5342    drain->trim_queue(0);
5343    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5344                                         (size_t)ParGCDesiredObjsFromOverflowList);
5345    // Now check if there's any work in the overflow list
5346    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5347    // only affects the number of attempts made to get work from the
5348    // overflow list and does not affect the number of workers.  Just
5349    // pass ParallelGCThreads so this behavior is unchanged.
5350    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5351                                                work_q,
5352                                                ParallelGCThreads)) {
5353      // Found something in global overflow list;
5354      // not yet ready to go stealing work from others.
5355      // We'd like to assert(work_q->size() != 0, ...)
5356      // because we just took work from the overflow list,
5357      // but of course we can't, since all of that might have
5358      // been already stolen from us.
5359      continue;
5360    }
5361    // Verify that we have no work before we resort to stealing
5362    assert(work_q->size() == 0, "Have work, shouldn't steal");
5363    // Try to steal from other queues that have work
5364    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5365      NOT_PRODUCT(num_steals++;)
5366      assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5367      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5368      // Do scanning work
5369      obj_to_scan->oop_iterate(keep_alive);
5370      // Loop around, finish this work, and try to steal some more
5371    } else if (terminator()->offer_termination()) {
5372      break;  // nirvana from the infinite cycle
5373    }
5374  }
5375  NOT_PRODUCT(
5376    if (PrintCMSStatistics != 0) {
5377      gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5378    }
5379  )
5380}
5381
5382void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5383{
5384  GenCollectedHeap* gch = GenCollectedHeap::heap();
5385  WorkGang* workers = gch->workers();
5386  assert(workers != NULL, "Need parallel worker threads.");
5387  CMSRefProcTaskProxy rp_task(task, &_collector,
5388                              _collector.ref_processor()->span(),
5389                              _collector.markBitMap(),
5390                              workers, _collector.task_queues());
5391  workers->run_task(&rp_task);
5392}
5393
5394void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5395{
5396
5397  GenCollectedHeap* gch = GenCollectedHeap::heap();
5398  WorkGang* workers = gch->workers();
5399  assert(workers != NULL, "Need parallel worker threads.");
5400  CMSRefEnqueueTaskProxy enq_task(task);
5401  workers->run_task(&enq_task);
5402}
5403
5404void CMSCollector::refProcessingWork() {
5405  ResourceMark rm;
5406  HandleMark   hm;
5407
5408  ReferenceProcessor* rp = ref_processor();
5409  assert(rp->span().equals(_span), "Spans should be equal");
5410  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5411  // Process weak references.
5412  rp->setup_policy(false);
5413  verify_work_stacks_empty();
5414
5415  CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5416                                          &_markStack, false /* !preclean */);
5417  CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5418                                _span, &_markBitMap, &_markStack,
5419                                &cmsKeepAliveClosure, false /* !preclean */);
5420  {
5421    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5422
5423    ReferenceProcessorStats stats;
5424    if (rp->processing_is_mt()) {
5425      // Set the degree of MT here.  If the discovery is done MT, there
5426      // may have been a different number of threads doing the discovery
5427      // and a different number of discovered lists may have Ref objects.
5428      // That is OK as long as the Reference lists are balanced (see
5429      // balance_all_queues() and balance_queues()).
5430      GenCollectedHeap* gch = GenCollectedHeap::heap();
5431      uint active_workers = ParallelGCThreads;
5432      WorkGang* workers = gch->workers();
5433      if (workers != NULL) {
5434        active_workers = workers->active_workers();
5435        // The expectation is that active_workers will have already
5436        // been set to a reasonable value.  If it has not been set,
5437        // investigate.
5438        assert(active_workers > 0, "Should have been set during scavenge");
5439      }
5440      rp->set_active_mt_degree(active_workers);
5441      CMSRefProcTaskExecutor task_executor(*this);
5442      stats = rp->process_discovered_references(&_is_alive_closure,
5443                                        &cmsKeepAliveClosure,
5444                                        &cmsDrainMarkingStackClosure,
5445                                        &task_executor,
5446                                        _gc_timer_cm,
5447                                        _gc_tracer_cm->gc_id());
5448    } else {
5449      stats = rp->process_discovered_references(&_is_alive_closure,
5450                                        &cmsKeepAliveClosure,
5451                                        &cmsDrainMarkingStackClosure,
5452                                        NULL,
5453                                        _gc_timer_cm,
5454                                        _gc_tracer_cm->gc_id());
5455    }
5456    _gc_tracer_cm->report_gc_reference_stats(stats);
5457
5458  }
5459
5460  // This is the point where the entire marking should have completed.
5461  verify_work_stacks_empty();
5462
5463  if (should_unload_classes()) {
5464    {
5465      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5466
5467      // Unload classes and purge the SystemDictionary.
5468      bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5469
5470      // Unload nmethods.
5471      CodeCache::do_unloading(&_is_alive_closure, purged_class);
5472
5473      // Prune dead klasses from subklass/sibling/implementor lists.
5474      Klass::clean_weak_klass_links(&_is_alive_closure);
5475    }
5476
5477    {
5478      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5479      // Clean up unreferenced symbols in symbol table.
5480      SymbolTable::unlink();
5481    }
5482
5483    {
5484      GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5485      // Delete entries for dead interned strings.
5486      StringTable::unlink(&_is_alive_closure);
5487    }
5488  }
5489
5490
5491  // Restore any preserved marks as a result of mark stack or
5492  // work queue overflow
5493  restore_preserved_marks_if_any();  // done single-threaded for now
5494
5495  rp->set_enqueuing_is_done(true);
5496  if (rp->processing_is_mt()) {
5497    rp->balance_all_queues();
5498    CMSRefProcTaskExecutor task_executor(*this);
5499    rp->enqueue_discovered_references(&task_executor);
5500  } else {
5501    rp->enqueue_discovered_references(NULL);
5502  }
5503  rp->verify_no_references_recorded();
5504  assert(!rp->discovery_enabled(), "should have been disabled");
5505}
5506
5507#ifndef PRODUCT
5508void CMSCollector::check_correct_thread_executing() {
5509  Thread* t = Thread::current();
5510  // Only the VM thread or the CMS thread should be here.
5511  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5512         "Unexpected thread type");
5513  // If this is the vm thread, the foreground process
5514  // should not be waiting.  Note that _foregroundGCIsActive is
5515  // true while the foreground collector is waiting.
5516  if (_foregroundGCShouldWait) {
5517    // We cannot be the VM thread
5518    assert(t->is_ConcurrentGC_thread(),
5519           "Should be CMS thread");
5520  } else {
5521    // We can be the CMS thread only if we are in a stop-world
5522    // phase of CMS collection.
5523    if (t->is_ConcurrentGC_thread()) {
5524      assert(_collectorState == InitialMarking ||
5525             _collectorState == FinalMarking,
5526             "Should be a stop-world phase");
5527      // The CMS thread should be holding the CMS_token.
5528      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5529             "Potential interference with concurrently "
5530             "executing VM thread");
5531    }
5532  }
5533}
5534#endif
5535
5536void CMSCollector::sweep() {
5537  assert(_collectorState == Sweeping, "just checking");
5538  check_correct_thread_executing();
5539  verify_work_stacks_empty();
5540  verify_overflow_empty();
5541  increment_sweep_count();
5542  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5543
5544  _inter_sweep_timer.stop();
5545  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5546
5547  assert(!_intra_sweep_timer.is_active(), "Should not be active");
5548  _intra_sweep_timer.reset();
5549  _intra_sweep_timer.start();
5550  {
5551    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5552    CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5553    // First sweep the old gen
5554    {
5555      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5556                               bitMapLock());
5557      sweepWork(_cmsGen);
5558    }
5559
5560    // Update Universe::_heap_*_at_gc figures.
5561    // We need all the free list locks to make the abstract state
5562    // transition from Sweeping to Resetting. See detailed note
5563    // further below.
5564    {
5565      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5566      // Update heap occupancy information which is used as
5567      // input to soft ref clearing policy at the next gc.
5568      Universe::update_heap_info_at_gc();
5569      _collectorState = Resizing;
5570    }
5571  }
5572  verify_work_stacks_empty();
5573  verify_overflow_empty();
5574
5575  if (should_unload_classes()) {
5576    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5577    // requires that the virtual spaces are stable and not deleted.
5578    ClassLoaderDataGraph::set_should_purge(true);
5579  }
5580
5581  _intra_sweep_timer.stop();
5582  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5583
5584  _inter_sweep_timer.reset();
5585  _inter_sweep_timer.start();
5586
5587  // We need to use a monotonically non-decreasing time in ms
5588  // or we will see time-warp warnings and os::javaTimeMillis()
5589  // does not guarantee monotonicity.
5590  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5591  update_time_of_last_gc(now);
5592
5593  // NOTE on abstract state transitions:
5594  // Mutators allocate-live and/or mark the mod-union table dirty
5595  // based on the state of the collection.  The former is done in
5596  // the interval [Marking, Sweeping] and the latter in the interval
5597  // [Marking, Sweeping).  Thus the transitions into the Marking state
5598  // and out of the Sweeping state must be synchronously visible
5599  // globally to the mutators.
5600  // The transition into the Marking state happens with the world
5601  // stopped so the mutators will globally see it.  Sweeping is
5602  // done asynchronously by the background collector so the transition
5603  // from the Sweeping state to the Resizing state must be done
5604  // under the freelistLock (as is the check for whether to
5605  // allocate-live and whether to dirty the mod-union table).
5606  assert(_collectorState == Resizing, "Change of collector state to"
5607    " Resizing must be done under the freelistLocks (plural)");
5608
5609  // Now that sweeping has been completed, we clear
5610  // the incremental_collection_failed flag,
5611  // thus inviting a younger gen collection to promote into
5612  // this generation. If such a promotion may still fail,
5613  // the flag will be set again when a young collection is
5614  // attempted.
5615  GenCollectedHeap* gch = GenCollectedHeap::heap();
5616  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5617  gch->update_full_collections_completed(_collection_count_start);
5618}
5619
5620// FIX ME!!! Looks like this belongs in CFLSpace, with
5621// CMSGen merely delegating to it.
5622void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5623  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5624  HeapWord*  minAddr        = _cmsSpace->bottom();
5625  HeapWord*  largestAddr    =
5626    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5627  if (largestAddr == NULL) {
5628    // The dictionary appears to be empty.  In this case
5629    // try to coalesce at the end of the heap.
5630    largestAddr = _cmsSpace->end();
5631  }
5632  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5633  size_t nearLargestOffset =
5634    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5635  if (PrintFLSStatistics != 0) {
5636    gclog_or_tty->print_cr(
5637      "CMS: Large Block: " PTR_FORMAT ";"
5638      " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5639      p2i(largestAddr),
5640      p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5641  }
5642  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5643}
5644
5645bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5646  return addr >= _cmsSpace->nearLargestChunk();
5647}
5648
5649FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5650  return _cmsSpace->find_chunk_at_end();
5651}
5652
5653void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5654                                                    bool full) {
5655  // If the young generation has been collected, gather any statistics
5656  // that are of interest at this point.
5657  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5658  if (!full && current_is_young) {
5659    // Gather statistics on the young generation collection.
5660    collector()->stats().record_gc0_end(used());
5661  }
5662}
5663
5664void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5665  // We iterate over the space(s) underlying this generation,
5666  // checking the mark bit map to see if the bits corresponding
5667  // to specific blocks are marked or not. Blocks that are
5668  // marked are live and are not swept up. All remaining blocks
5669  // are swept up, with coalescing on-the-fly as we sweep up
5670  // contiguous free and/or garbage blocks:
5671  // We need to ensure that the sweeper synchronizes with allocators
5672  // and stop-the-world collectors. In particular, the following
5673  // locks are used:
5674  // . CMS token: if this is held, a stop the world collection cannot occur
5675  // . freelistLock: if this is held no allocation can occur from this
5676  //                 generation by another thread
5677  // . bitMapLock: if this is held, no other thread can access or update
5678  //
5679
5680  // Note that we need to hold the freelistLock if we use
5681  // block iterate below; else the iterator might go awry if
5682  // a mutator (or promotion) causes block contents to change
5683  // (for instance if the allocator divvies up a block).
5684  // If we hold the free list lock, for all practical purposes
5685  // young generation GC's can't occur (they'll usually need to
5686  // promote), so we might as well prevent all young generation
5687  // GC's while we do a sweeping step. For the same reason, we might
5688  // as well take the bit map lock for the entire duration
5689
5690  // check that we hold the requisite locks
5691  assert(have_cms_token(), "Should hold cms token");
5692  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5693  assert_lock_strong(gen->freelistLock());
5694  assert_lock_strong(bitMapLock());
5695
5696  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5697  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5698  gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5699                                      _inter_sweep_estimate.padded_average(),
5700                                      _intra_sweep_estimate.padded_average());
5701  gen->setNearLargestChunk();
5702
5703  {
5704    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
5705    gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5706    // We need to free-up/coalesce garbage/blocks from a
5707    // co-terminal free run. This is done in the SweepClosure
5708    // destructor; so, do not remove this scope, else the
5709    // end-of-sweep-census below will be off by a little bit.
5710  }
5711  gen->cmsSpace()->sweep_completed();
5712  gen->cmsSpace()->endSweepFLCensus(sweep_count());
5713  if (should_unload_classes()) {                // unloaded classes this cycle,
5714    _concurrent_cycles_since_last_unload = 0;   // ... reset count
5715  } else {                                      // did not unload classes,
5716    _concurrent_cycles_since_last_unload++;     // ... increment count
5717  }
5718}
5719
5720// Reset CMS data structures (for now just the marking bit map)
5721// preparatory for the next cycle.
5722void CMSCollector::reset(bool concurrent) {
5723  if (concurrent) {
5724    CMSTokenSyncWithLocks ts(true, bitMapLock());
5725
5726    // If the state is not "Resetting", the foreground  thread
5727    // has done a collection and the resetting.
5728    if (_collectorState != Resetting) {
5729      assert(_collectorState == Idling, "The state should only change"
5730        " because the foreground collector has finished the collection");
5731      return;
5732    }
5733
5734    // Clear the mark bitmap (no grey objects to start with)
5735    // for the next cycle.
5736    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5737    CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5738
5739    HeapWord* curAddr = _markBitMap.startWord();
5740    while (curAddr < _markBitMap.endWord()) {
5741      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5742      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5743      _markBitMap.clear_large_range(chunk);
5744      if (ConcurrentMarkSweepThread::should_yield() &&
5745          !foregroundGCIsActive() &&
5746          CMSYield) {
5747        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5748               "CMS thread should hold CMS token");
5749        assert_lock_strong(bitMapLock());
5750        bitMapLock()->unlock();
5751        ConcurrentMarkSweepThread::desynchronize(true);
5752        stopTimer();
5753        if (PrintCMSStatistics != 0) {
5754          incrementYields();
5755        }
5756
5757        // See the comment in coordinator_yield()
5758        for (unsigned i = 0; i < CMSYieldSleepCount &&
5759                         ConcurrentMarkSweepThread::should_yield() &&
5760                         !CMSCollector::foregroundGCIsActive(); ++i) {
5761          os::sleep(Thread::current(), 1, false);
5762        }
5763
5764        ConcurrentMarkSweepThread::synchronize(true);
5765        bitMapLock()->lock_without_safepoint_check();
5766        startTimer();
5767      }
5768      curAddr = chunk.end();
5769    }
5770    // A successful mostly concurrent collection has been done.
5771    // Because only the full (i.e., concurrent mode failure) collections
5772    // are being measured for gc overhead limits, clean the "near" flag
5773    // and count.
5774    size_policy()->reset_gc_overhead_limit_count();
5775    _collectorState = Idling;
5776  } else {
5777    // already have the lock
5778    assert(_collectorState == Resetting, "just checking");
5779    assert_lock_strong(bitMapLock());
5780    _markBitMap.clear_all();
5781    _collectorState = Idling;
5782  }
5783
5784  register_gc_end();
5785}
5786
5787void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5788  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5789  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5790  TraceCollectorStats tcs(counters());
5791
5792  switch (op) {
5793    case CMS_op_checkpointRootsInitial: {
5794      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5795      checkpointRootsInitial();
5796      if (PrintGC) {
5797        _cmsGen->printOccupancy("initial-mark");
5798      }
5799      break;
5800    }
5801    case CMS_op_checkpointRootsFinal: {
5802      SvcGCMarker sgcm(SvcGCMarker::OTHER);
5803      checkpointRootsFinal();
5804      if (PrintGC) {
5805        _cmsGen->printOccupancy("remark");
5806      }
5807      break;
5808    }
5809    default:
5810      fatal("No such CMS_op");
5811  }
5812}
5813
5814#ifndef PRODUCT
5815size_t const CMSCollector::skip_header_HeapWords() {
5816  return FreeChunk::header_size();
5817}
5818
5819// Try and collect here conditions that should hold when
5820// CMS thread is exiting. The idea is that the foreground GC
5821// thread should not be blocked if it wants to terminate
5822// the CMS thread and yet continue to run the VM for a while
5823// after that.
5824void CMSCollector::verify_ok_to_terminate() const {
5825  assert(Thread::current()->is_ConcurrentGC_thread(),
5826         "should be called by CMS thread");
5827  assert(!_foregroundGCShouldWait, "should be false");
5828  // We could check here that all the various low-level locks
5829  // are not held by the CMS thread, but that is overkill; see
5830  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5831  // is checked.
5832}
5833#endif
5834
5835size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5836   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5837          "missing Printezis mark?");
5838  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5839  size_t size = pointer_delta(nextOneAddr + 1, addr);
5840  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5841         "alignment problem");
5842  assert(size >= 3, "Necessary for Printezis marks to work");
5843  return size;
5844}
5845
5846// A variant of the above (block_size_using_printezis_bits()) except
5847// that we return 0 if the P-bits are not yet set.
5848size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5849  if (_markBitMap.isMarked(addr + 1)) {
5850    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5851    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5852    size_t size = pointer_delta(nextOneAddr + 1, addr);
5853    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5854           "alignment problem");
5855    assert(size >= 3, "Necessary for Printezis marks to work");
5856    return size;
5857  }
5858  return 0;
5859}
5860
5861HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5862  size_t sz = 0;
5863  oop p = (oop)addr;
5864  if (p->klass_or_null() != NULL) {
5865    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5866  } else {
5867    sz = block_size_using_printezis_bits(addr);
5868  }
5869  assert(sz > 0, "size must be nonzero");
5870  HeapWord* next_block = addr + sz;
5871  HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5872                                             CardTableModRefBS::card_size);
5873  assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5874         round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5875         "must be different cards");
5876  return next_card;
5877}
5878
5879
5880// CMS Bit Map Wrapper /////////////////////////////////////////
5881
5882// Construct a CMS bit map infrastructure, but don't create the
5883// bit vector itself. That is done by a separate call CMSBitMap::allocate()
5884// further below.
5885CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5886  _bm(),
5887  _shifter(shifter),
5888  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5889                                    Monitor::_safepoint_check_sometimes) : NULL)
5890{
5891  _bmStartWord = 0;
5892  _bmWordSize  = 0;
5893}
5894
5895bool CMSBitMap::allocate(MemRegion mr) {
5896  _bmStartWord = mr.start();
5897  _bmWordSize  = mr.word_size();
5898  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5899                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5900  if (!brs.is_reserved()) {
5901    warning("CMS bit map allocation failure");
5902    return false;
5903  }
5904  // For now we'll just commit all of the bit map up front.
5905  // Later on we'll try to be more parsimonious with swap.
5906  if (!_virtual_space.initialize(brs, brs.size())) {
5907    warning("CMS bit map backing store failure");
5908    return false;
5909  }
5910  assert(_virtual_space.committed_size() == brs.size(),
5911         "didn't reserve backing store for all of CMS bit map?");
5912  _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5913  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5914         _bmWordSize, "inconsistency in bit map sizing");
5915  _bm.set_size(_bmWordSize >> _shifter);
5916
5917  // bm.clear(); // can we rely on getting zero'd memory? verify below
5918  assert(isAllClear(),
5919         "Expected zero'd memory from ReservedSpace constructor");
5920  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5921         "consistency check");
5922  return true;
5923}
5924
5925void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5926  HeapWord *next_addr, *end_addr, *last_addr;
5927  assert_locked();
5928  assert(covers(mr), "out-of-range error");
5929  // XXX assert that start and end are appropriately aligned
5930  for (next_addr = mr.start(), end_addr = mr.end();
5931       next_addr < end_addr; next_addr = last_addr) {
5932    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5933    last_addr = dirty_region.end();
5934    if (!dirty_region.is_empty()) {
5935      cl->do_MemRegion(dirty_region);
5936    } else {
5937      assert(last_addr == end_addr, "program logic");
5938      return;
5939    }
5940  }
5941}
5942
5943void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5944  _bm.print_on_error(st, prefix);
5945}
5946
5947#ifndef PRODUCT
5948void CMSBitMap::assert_locked() const {
5949  CMSLockVerifier::assert_locked(lock());
5950}
5951
5952bool CMSBitMap::covers(MemRegion mr) const {
5953  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5954  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5955         "size inconsistency");
5956  return (mr.start() >= _bmStartWord) &&
5957         (mr.end()   <= endWord());
5958}
5959
5960bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5961    return (start >= _bmStartWord && (start + size) <= endWord());
5962}
5963
5964void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5965  // verify that there are no 1 bits in the interval [left, right)
5966  FalseBitMapClosure falseBitMapClosure;
5967  iterate(&falseBitMapClosure, left, right);
5968}
5969
5970void CMSBitMap::region_invariant(MemRegion mr)
5971{
5972  assert_locked();
5973  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5974  assert(!mr.is_empty(), "unexpected empty region");
5975  assert(covers(mr), "mr should be covered by bit map");
5976  // convert address range into offset range
5977  size_t start_ofs = heapWordToOffset(mr.start());
5978  // Make sure that end() is appropriately aligned
5979  assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5980                        (1 << (_shifter+LogHeapWordSize))),
5981         "Misaligned mr.end()");
5982  size_t end_ofs   = heapWordToOffset(mr.end());
5983  assert(end_ofs > start_ofs, "Should mark at least one bit");
5984}
5985
5986#endif
5987
5988bool CMSMarkStack::allocate(size_t size) {
5989  // allocate a stack of the requisite depth
5990  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5991                   size * sizeof(oop)));
5992  if (!rs.is_reserved()) {
5993    warning("CMSMarkStack allocation failure");
5994    return false;
5995  }
5996  if (!_virtual_space.initialize(rs, rs.size())) {
5997    warning("CMSMarkStack backing store failure");
5998    return false;
5999  }
6000  assert(_virtual_space.committed_size() == rs.size(),
6001         "didn't reserve backing store for all of CMS stack?");
6002  _base = (oop*)(_virtual_space.low());
6003  _index = 0;
6004  _capacity = size;
6005  NOT_PRODUCT(_max_depth = 0);
6006  return true;
6007}
6008
6009// XXX FIX ME !!! In the MT case we come in here holding a
6010// leaf lock. For printing we need to take a further lock
6011// which has lower rank. We need to recalibrate the two
6012// lock-ranks involved in order to be able to print the
6013// messages below. (Or defer the printing to the caller.
6014// For now we take the expedient path of just disabling the
6015// messages for the problematic case.)
6016void CMSMarkStack::expand() {
6017  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6018  if (_capacity == MarkStackSizeMax) {
6019    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6020      // We print a warning message only once per CMS cycle.
6021      gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6022    }
6023    return;
6024  }
6025  // Double capacity if possible
6026  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6027  // Do not give up existing stack until we have managed to
6028  // get the double capacity that we desired.
6029  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6030                   new_capacity * sizeof(oop)));
6031  if (rs.is_reserved()) {
6032    // Release the backing store associated with old stack
6033    _virtual_space.release();
6034    // Reinitialize virtual space for new stack
6035    if (!_virtual_space.initialize(rs, rs.size())) {
6036      fatal("Not enough swap for expanded marking stack");
6037    }
6038    _base = (oop*)(_virtual_space.low());
6039    _index = 0;
6040    _capacity = new_capacity;
6041  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6042    // Failed to double capacity, continue;
6043    // we print a detail message only once per CMS cycle.
6044    gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
6045            SIZE_FORMAT "K",
6046            _capacity / K, new_capacity / K);
6047  }
6048}
6049
6050
6051// Closures
6052// XXX: there seems to be a lot of code  duplication here;
6053// should refactor and consolidate common code.
6054
6055// This closure is used to mark refs into the CMS generation in
6056// the CMS bit map. Called at the first checkpoint. This closure
6057// assumes that we do not need to re-mark dirty cards; if the CMS
6058// generation on which this is used is not an oldest
6059// generation then this will lose younger_gen cards!
6060
6061MarkRefsIntoClosure::MarkRefsIntoClosure(
6062  MemRegion span, CMSBitMap* bitMap):
6063    _span(span),
6064    _bitMap(bitMap)
6065{
6066    assert(_ref_processor == NULL, "deliberately left NULL");
6067    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6068}
6069
6070void MarkRefsIntoClosure::do_oop(oop obj) {
6071  // if p points into _span, then mark corresponding bit in _markBitMap
6072  assert(obj->is_oop(), "expected an oop");
6073  HeapWord* addr = (HeapWord*)obj;
6074  if (_span.contains(addr)) {
6075    // this should be made more efficient
6076    _bitMap->mark(addr);
6077  }
6078}
6079
6080void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6081void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6082
6083Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6084  MemRegion span, CMSBitMap* bitMap):
6085    _span(span),
6086    _bitMap(bitMap)
6087{
6088    assert(_ref_processor == NULL, "deliberately left NULL");
6089    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6090}
6091
6092void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6093  // if p points into _span, then mark corresponding bit in _markBitMap
6094  assert(obj->is_oop(), "expected an oop");
6095  HeapWord* addr = (HeapWord*)obj;
6096  if (_span.contains(addr)) {
6097    // this should be made more efficient
6098    _bitMap->par_mark(addr);
6099  }
6100}
6101
6102void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6103void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6104
6105// A variant of the above, used for CMS marking verification.
6106MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6107  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6108    _span(span),
6109    _verification_bm(verification_bm),
6110    _cms_bm(cms_bm)
6111{
6112    assert(_ref_processor == NULL, "deliberately left NULL");
6113    assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6114}
6115
6116void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6117  // if p points into _span, then mark corresponding bit in _markBitMap
6118  assert(obj->is_oop(), "expected an oop");
6119  HeapWord* addr = (HeapWord*)obj;
6120  if (_span.contains(addr)) {
6121    _verification_bm->mark(addr);
6122    if (!_cms_bm->isMarked(addr)) {
6123      oop(addr)->print();
6124      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6125      fatal("... aborting");
6126    }
6127  }
6128}
6129
6130void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6131void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6132
6133//////////////////////////////////////////////////
6134// MarkRefsIntoAndScanClosure
6135//////////////////////////////////////////////////
6136
6137MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6138                                                       ReferenceProcessor* rp,
6139                                                       CMSBitMap* bit_map,
6140                                                       CMSBitMap* mod_union_table,
6141                                                       CMSMarkStack*  mark_stack,
6142                                                       CMSCollector* collector,
6143                                                       bool should_yield,
6144                                                       bool concurrent_precleaning):
6145  _collector(collector),
6146  _span(span),
6147  _bit_map(bit_map),
6148  _mark_stack(mark_stack),
6149  _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6150                      mark_stack, concurrent_precleaning),
6151  _yield(should_yield),
6152  _concurrent_precleaning(concurrent_precleaning),
6153  _freelistLock(NULL)
6154{
6155  _ref_processor = rp;
6156  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6157}
6158
6159// This closure is used to mark refs into the CMS generation at the
6160// second (final) checkpoint, and to scan and transitively follow
6161// the unmarked oops. It is also used during the concurrent precleaning
6162// phase while scanning objects on dirty cards in the CMS generation.
6163// The marks are made in the marking bit map and the marking stack is
6164// used for keeping the (newly) grey objects during the scan.
6165// The parallel version (Par_...) appears further below.
6166void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6167  if (obj != NULL) {
6168    assert(obj->is_oop(), "expected an oop");
6169    HeapWord* addr = (HeapWord*)obj;
6170    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6171    assert(_collector->overflow_list_is_empty(),
6172           "overflow list should be empty");
6173    if (_span.contains(addr) &&
6174        !_bit_map->isMarked(addr)) {
6175      // mark bit map (object is now grey)
6176      _bit_map->mark(addr);
6177      // push on marking stack (stack should be empty), and drain the
6178      // stack by applying this closure to the oops in the oops popped
6179      // from the stack (i.e. blacken the grey objects)
6180      bool res = _mark_stack->push(obj);
6181      assert(res, "Should have space to push on empty stack");
6182      do {
6183        oop new_oop = _mark_stack->pop();
6184        assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6185        assert(_bit_map->isMarked((HeapWord*)new_oop),
6186               "only grey objects on this stack");
6187        // iterate over the oops in this oop, marking and pushing
6188        // the ones in CMS heap (i.e. in _span).
6189        new_oop->oop_iterate(&_pushAndMarkClosure);
6190        // check if it's time to yield
6191        do_yield_check();
6192      } while (!_mark_stack->isEmpty() ||
6193               (!_concurrent_precleaning && take_from_overflow_list()));
6194        // if marking stack is empty, and we are not doing this
6195        // during precleaning, then check the overflow list
6196    }
6197    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6198    assert(_collector->overflow_list_is_empty(),
6199           "overflow list was drained above");
6200    // We could restore evacuated mark words, if any, used for
6201    // overflow list links here because the overflow list is
6202    // provably empty here. That would reduce the maximum
6203    // size requirements for preserved_{oop,mark}_stack.
6204    // But we'll just postpone it until we are all done
6205    // so we can just stream through.
6206    if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6207      _collector->restore_preserved_marks_if_any();
6208      assert(_collector->no_preserved_marks(), "No preserved marks");
6209    }
6210    assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6211           "All preserved marks should have been restored above");
6212  }
6213}
6214
6215void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6216void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6217
6218void MarkRefsIntoAndScanClosure::do_yield_work() {
6219  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6220         "CMS thread should hold CMS token");
6221  assert_lock_strong(_freelistLock);
6222  assert_lock_strong(_bit_map->lock());
6223  // relinquish the free_list_lock and bitMaplock()
6224  _bit_map->lock()->unlock();
6225  _freelistLock->unlock();
6226  ConcurrentMarkSweepThread::desynchronize(true);
6227  _collector->stopTimer();
6228  if (PrintCMSStatistics != 0) {
6229    _collector->incrementYields();
6230  }
6231
6232  // See the comment in coordinator_yield()
6233  for (unsigned i = 0;
6234       i < CMSYieldSleepCount &&
6235       ConcurrentMarkSweepThread::should_yield() &&
6236       !CMSCollector::foregroundGCIsActive();
6237       ++i) {
6238    os::sleep(Thread::current(), 1, false);
6239  }
6240
6241  ConcurrentMarkSweepThread::synchronize(true);
6242  _freelistLock->lock_without_safepoint_check();
6243  _bit_map->lock()->lock_without_safepoint_check();
6244  _collector->startTimer();
6245}
6246
6247///////////////////////////////////////////////////////////
6248// Par_MarkRefsIntoAndScanClosure: a parallel version of
6249//                                 MarkRefsIntoAndScanClosure
6250///////////////////////////////////////////////////////////
6251Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6252  CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6253  CMSBitMap* bit_map, OopTaskQueue* work_queue):
6254  _span(span),
6255  _bit_map(bit_map),
6256  _work_queue(work_queue),
6257  _low_water_mark(MIN2((work_queue->max_elems()/4),
6258                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6259  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6260{
6261  _ref_processor = rp;
6262  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6263}
6264
6265// This closure is used to mark refs into the CMS generation at the
6266// second (final) checkpoint, and to scan and transitively follow
6267// the unmarked oops. The marks are made in the marking bit map and
6268// the work_queue is used for keeping the (newly) grey objects during
6269// the scan phase whence they are also available for stealing by parallel
6270// threads. Since the marking bit map is shared, updates are
6271// synchronized (via CAS).
6272void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6273  if (obj != NULL) {
6274    // Ignore mark word because this could be an already marked oop
6275    // that may be chained at the end of the overflow list.
6276    assert(obj->is_oop(true), "expected an oop");
6277    HeapWord* addr = (HeapWord*)obj;
6278    if (_span.contains(addr) &&
6279        !_bit_map->isMarked(addr)) {
6280      // mark bit map (object will become grey):
6281      // It is possible for several threads to be
6282      // trying to "claim" this object concurrently;
6283      // the unique thread that succeeds in marking the
6284      // object first will do the subsequent push on
6285      // to the work queue (or overflow list).
6286      if (_bit_map->par_mark(addr)) {
6287        // push on work_queue (which may not be empty), and trim the
6288        // queue to an appropriate length by applying this closure to
6289        // the oops in the oops popped from the stack (i.e. blacken the
6290        // grey objects)
6291        bool res = _work_queue->push(obj);
6292        assert(res, "Low water mark should be less than capacity?");
6293        trim_queue(_low_water_mark);
6294      } // Else, another thread claimed the object
6295    }
6296  }
6297}
6298
6299void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6300void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6301
6302// This closure is used to rescan the marked objects on the dirty cards
6303// in the mod union table and the card table proper.
6304size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6305  oop p, MemRegion mr) {
6306
6307  size_t size = 0;
6308  HeapWord* addr = (HeapWord*)p;
6309  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6310  assert(_span.contains(addr), "we are scanning the CMS generation");
6311  // check if it's time to yield
6312  if (do_yield_check()) {
6313    // We yielded for some foreground stop-world work,
6314    // and we have been asked to abort this ongoing preclean cycle.
6315    return 0;
6316  }
6317  if (_bitMap->isMarked(addr)) {
6318    // it's marked; is it potentially uninitialized?
6319    if (p->klass_or_null() != NULL) {
6320        // an initialized object; ignore mark word in verification below
6321        // since we are running concurrent with mutators
6322        assert(p->is_oop(true), "should be an oop");
6323        if (p->is_objArray()) {
6324          // objArrays are precisely marked; restrict scanning
6325          // to dirty cards only.
6326          size = CompactibleFreeListSpace::adjustObjectSize(
6327                   p->oop_iterate(_scanningClosure, mr));
6328        } else {
6329          // A non-array may have been imprecisely marked; we need
6330          // to scan object in its entirety.
6331          size = CompactibleFreeListSpace::adjustObjectSize(
6332                   p->oop_iterate(_scanningClosure));
6333        }
6334        #ifdef ASSERT
6335          size_t direct_size =
6336            CompactibleFreeListSpace::adjustObjectSize(p->size());
6337          assert(size == direct_size, "Inconsistency in size");
6338          assert(size >= 3, "Necessary for Printezis marks to work");
6339          if (!_bitMap->isMarked(addr+1)) {
6340            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6341          } else {
6342            _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6343            assert(_bitMap->isMarked(addr+size-1),
6344                   "inconsistent Printezis mark");
6345          }
6346        #endif // ASSERT
6347    } else {
6348      // An uninitialized object.
6349      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6350      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6351      size = pointer_delta(nextOneAddr + 1, addr);
6352      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6353             "alignment problem");
6354      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6355      // will dirty the card when the klass pointer is installed in the
6356      // object (signaling the completion of initialization).
6357    }
6358  } else {
6359    // Either a not yet marked object or an uninitialized object
6360    if (p->klass_or_null() == NULL) {
6361      // An uninitialized object, skip to the next card, since
6362      // we may not be able to read its P-bits yet.
6363      assert(size == 0, "Initial value");
6364    } else {
6365      // An object not (yet) reached by marking: we merely need to
6366      // compute its size so as to go look at the next block.
6367      assert(p->is_oop(true), "should be an oop");
6368      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6369    }
6370  }
6371  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6372  return size;
6373}
6374
6375void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6376  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6377         "CMS thread should hold CMS token");
6378  assert_lock_strong(_freelistLock);
6379  assert_lock_strong(_bitMap->lock());
6380  // relinquish the free_list_lock and bitMaplock()
6381  _bitMap->lock()->unlock();
6382  _freelistLock->unlock();
6383  ConcurrentMarkSweepThread::desynchronize(true);
6384  _collector->stopTimer();
6385  if (PrintCMSStatistics != 0) {
6386    _collector->incrementYields();
6387  }
6388
6389  // See the comment in coordinator_yield()
6390  for (unsigned i = 0; i < CMSYieldSleepCount &&
6391                   ConcurrentMarkSweepThread::should_yield() &&
6392                   !CMSCollector::foregroundGCIsActive(); ++i) {
6393    os::sleep(Thread::current(), 1, false);
6394  }
6395
6396  ConcurrentMarkSweepThread::synchronize(true);
6397  _freelistLock->lock_without_safepoint_check();
6398  _bitMap->lock()->lock_without_safepoint_check();
6399  _collector->startTimer();
6400}
6401
6402
6403//////////////////////////////////////////////////////////////////
6404// SurvivorSpacePrecleanClosure
6405//////////////////////////////////////////////////////////////////
6406// This (single-threaded) closure is used to preclean the oops in
6407// the survivor spaces.
6408size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6409
6410  HeapWord* addr = (HeapWord*)p;
6411  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6412  assert(!_span.contains(addr), "we are scanning the survivor spaces");
6413  assert(p->klass_or_null() != NULL, "object should be initialized");
6414  // an initialized object; ignore mark word in verification below
6415  // since we are running concurrent with mutators
6416  assert(p->is_oop(true), "should be an oop");
6417  // Note that we do not yield while we iterate over
6418  // the interior oops of p, pushing the relevant ones
6419  // on our marking stack.
6420  size_t size = p->oop_iterate(_scanning_closure);
6421  do_yield_check();
6422  // Observe that below, we do not abandon the preclean
6423  // phase as soon as we should; rather we empty the
6424  // marking stack before returning. This is to satisfy
6425  // some existing assertions. In general, it may be a
6426  // good idea to abort immediately and complete the marking
6427  // from the grey objects at a later time.
6428  while (!_mark_stack->isEmpty()) {
6429    oop new_oop = _mark_stack->pop();
6430    assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6431    assert(_bit_map->isMarked((HeapWord*)new_oop),
6432           "only grey objects on this stack");
6433    // iterate over the oops in this oop, marking and pushing
6434    // the ones in CMS heap (i.e. in _span).
6435    new_oop->oop_iterate(_scanning_closure);
6436    // check if it's time to yield
6437    do_yield_check();
6438  }
6439  unsigned int after_count =
6440    GenCollectedHeap::heap()->total_collections();
6441  bool abort = (_before_count != after_count) ||
6442               _collector->should_abort_preclean();
6443  return abort ? 0 : size;
6444}
6445
6446void SurvivorSpacePrecleanClosure::do_yield_work() {
6447  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6448         "CMS thread should hold CMS token");
6449  assert_lock_strong(_bit_map->lock());
6450  // Relinquish the bit map lock
6451  _bit_map->lock()->unlock();
6452  ConcurrentMarkSweepThread::desynchronize(true);
6453  _collector->stopTimer();
6454  if (PrintCMSStatistics != 0) {
6455    _collector->incrementYields();
6456  }
6457
6458  // See the comment in coordinator_yield()
6459  for (unsigned i = 0; i < CMSYieldSleepCount &&
6460                       ConcurrentMarkSweepThread::should_yield() &&
6461                       !CMSCollector::foregroundGCIsActive(); ++i) {
6462    os::sleep(Thread::current(), 1, false);
6463  }
6464
6465  ConcurrentMarkSweepThread::synchronize(true);
6466  _bit_map->lock()->lock_without_safepoint_check();
6467  _collector->startTimer();
6468}
6469
6470// This closure is used to rescan the marked objects on the dirty cards
6471// in the mod union table and the card table proper. In the parallel
6472// case, although the bitMap is shared, we do a single read so the
6473// isMarked() query is "safe".
6474bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6475  // Ignore mark word because we are running concurrent with mutators
6476  assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6477  HeapWord* addr = (HeapWord*)p;
6478  assert(_span.contains(addr), "we are scanning the CMS generation");
6479  bool is_obj_array = false;
6480  #ifdef ASSERT
6481    if (!_parallel) {
6482      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6483      assert(_collector->overflow_list_is_empty(),
6484             "overflow list should be empty");
6485
6486    }
6487  #endif // ASSERT
6488  if (_bit_map->isMarked(addr)) {
6489    // Obj arrays are precisely marked, non-arrays are not;
6490    // so we scan objArrays precisely and non-arrays in their
6491    // entirety.
6492    if (p->is_objArray()) {
6493      is_obj_array = true;
6494      if (_parallel) {
6495        p->oop_iterate(_par_scan_closure, mr);
6496      } else {
6497        p->oop_iterate(_scan_closure, mr);
6498      }
6499    } else {
6500      if (_parallel) {
6501        p->oop_iterate(_par_scan_closure);
6502      } else {
6503        p->oop_iterate(_scan_closure);
6504      }
6505    }
6506  }
6507  #ifdef ASSERT
6508    if (!_parallel) {
6509      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6510      assert(_collector->overflow_list_is_empty(),
6511             "overflow list should be empty");
6512
6513    }
6514  #endif // ASSERT
6515  return is_obj_array;
6516}
6517
6518MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6519                        MemRegion span,
6520                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
6521                        bool should_yield, bool verifying):
6522  _collector(collector),
6523  _span(span),
6524  _bitMap(bitMap),
6525  _mut(&collector->_modUnionTable),
6526  _markStack(markStack),
6527  _yield(should_yield),
6528  _skipBits(0)
6529{
6530  assert(_markStack->isEmpty(), "stack should be empty");
6531  _finger = _bitMap->startWord();
6532  _threshold = _finger;
6533  assert(_collector->_restart_addr == NULL, "Sanity check");
6534  assert(_span.contains(_finger), "Out of bounds _finger?");
6535  DEBUG_ONLY(_verifying = verifying;)
6536}
6537
6538void MarkFromRootsClosure::reset(HeapWord* addr) {
6539  assert(_markStack->isEmpty(), "would cause duplicates on stack");
6540  assert(_span.contains(addr), "Out of bounds _finger?");
6541  _finger = addr;
6542  _threshold = (HeapWord*)round_to(
6543                 (intptr_t)_finger, CardTableModRefBS::card_size);
6544}
6545
6546// Should revisit to see if this should be restructured for
6547// greater efficiency.
6548bool MarkFromRootsClosure::do_bit(size_t offset) {
6549  if (_skipBits > 0) {
6550    _skipBits--;
6551    return true;
6552  }
6553  // convert offset into a HeapWord*
6554  HeapWord* addr = _bitMap->startWord() + offset;
6555  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6556         "address out of range");
6557  assert(_bitMap->isMarked(addr), "tautology");
6558  if (_bitMap->isMarked(addr+1)) {
6559    // this is an allocated but not yet initialized object
6560    assert(_skipBits == 0, "tautology");
6561    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6562    oop p = oop(addr);
6563    if (p->klass_or_null() == NULL) {
6564      DEBUG_ONLY(if (!_verifying) {)
6565        // We re-dirty the cards on which this object lies and increase
6566        // the _threshold so that we'll come back to scan this object
6567        // during the preclean or remark phase. (CMSCleanOnEnter)
6568        if (CMSCleanOnEnter) {
6569          size_t sz = _collector->block_size_using_printezis_bits(addr);
6570          HeapWord* end_card_addr   = (HeapWord*)round_to(
6571                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6572          MemRegion redirty_range = MemRegion(addr, end_card_addr);
6573          assert(!redirty_range.is_empty(), "Arithmetical tautology");
6574          // Bump _threshold to end_card_addr; note that
6575          // _threshold cannot possibly exceed end_card_addr, anyhow.
6576          // This prevents future clearing of the card as the scan proceeds
6577          // to the right.
6578          assert(_threshold <= end_card_addr,
6579                 "Because we are just scanning into this object");
6580          if (_threshold < end_card_addr) {
6581            _threshold = end_card_addr;
6582          }
6583          if (p->klass_or_null() != NULL) {
6584            // Redirty the range of cards...
6585            _mut->mark_range(redirty_range);
6586          } // ...else the setting of klass will dirty the card anyway.
6587        }
6588      DEBUG_ONLY(})
6589      return true;
6590    }
6591  }
6592  scanOopsInOop(addr);
6593  return true;
6594}
6595
6596// We take a break if we've been at this for a while,
6597// so as to avoid monopolizing the locks involved.
6598void MarkFromRootsClosure::do_yield_work() {
6599  // First give up the locks, then yield, then re-lock
6600  // We should probably use a constructor/destructor idiom to
6601  // do this unlock/lock or modify the MutexUnlocker class to
6602  // serve our purpose. XXX
6603  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6604         "CMS thread should hold CMS token");
6605  assert_lock_strong(_bitMap->lock());
6606  _bitMap->lock()->unlock();
6607  ConcurrentMarkSweepThread::desynchronize(true);
6608  _collector->stopTimer();
6609  if (PrintCMSStatistics != 0) {
6610    _collector->incrementYields();
6611  }
6612
6613  // See the comment in coordinator_yield()
6614  for (unsigned i = 0; i < CMSYieldSleepCount &&
6615                       ConcurrentMarkSweepThread::should_yield() &&
6616                       !CMSCollector::foregroundGCIsActive(); ++i) {
6617    os::sleep(Thread::current(), 1, false);
6618  }
6619
6620  ConcurrentMarkSweepThread::synchronize(true);
6621  _bitMap->lock()->lock_without_safepoint_check();
6622  _collector->startTimer();
6623}
6624
6625void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6626  assert(_bitMap->isMarked(ptr), "expected bit to be set");
6627  assert(_markStack->isEmpty(),
6628         "should drain stack to limit stack usage");
6629  // convert ptr to an oop preparatory to scanning
6630  oop obj = oop(ptr);
6631  // Ignore mark word in verification below, since we
6632  // may be running concurrent with mutators.
6633  assert(obj->is_oop(true), "should be an oop");
6634  assert(_finger <= ptr, "_finger runneth ahead");
6635  // advance the finger to right end of this object
6636  _finger = ptr + obj->size();
6637  assert(_finger > ptr, "we just incremented it above");
6638  // On large heaps, it may take us some time to get through
6639  // the marking phase. During
6640  // this time it's possible that a lot of mutations have
6641  // accumulated in the card table and the mod union table --
6642  // these mutation records are redundant until we have
6643  // actually traced into the corresponding card.
6644  // Here, we check whether advancing the finger would make
6645  // us cross into a new card, and if so clear corresponding
6646  // cards in the MUT (preclean them in the card-table in the
6647  // future).
6648
6649  DEBUG_ONLY(if (!_verifying) {)
6650    // The clean-on-enter optimization is disabled by default,
6651    // until we fix 6178663.
6652    if (CMSCleanOnEnter && (_finger > _threshold)) {
6653      // [_threshold, _finger) represents the interval
6654      // of cards to be cleared  in MUT (or precleaned in card table).
6655      // The set of cards to be cleared is all those that overlap
6656      // with the interval [_threshold, _finger); note that
6657      // _threshold is always kept card-aligned but _finger isn't
6658      // always card-aligned.
6659      HeapWord* old_threshold = _threshold;
6660      assert(old_threshold == (HeapWord*)round_to(
6661              (intptr_t)old_threshold, CardTableModRefBS::card_size),
6662             "_threshold should always be card-aligned");
6663      _threshold = (HeapWord*)round_to(
6664                     (intptr_t)_finger, CardTableModRefBS::card_size);
6665      MemRegion mr(old_threshold, _threshold);
6666      assert(!mr.is_empty(), "Control point invariant");
6667      assert(_span.contains(mr), "Should clear within span");
6668      _mut->clear_range(mr);
6669    }
6670  DEBUG_ONLY(})
6671  // Note: the finger doesn't advance while we drain
6672  // the stack below.
6673  PushOrMarkClosure pushOrMarkClosure(_collector,
6674                                      _span, _bitMap, _markStack,
6675                                      _finger, this);
6676  bool res = _markStack->push(obj);
6677  assert(res, "Empty non-zero size stack should have space for single push");
6678  while (!_markStack->isEmpty()) {
6679    oop new_oop = _markStack->pop();
6680    // Skip verifying header mark word below because we are
6681    // running concurrent with mutators.
6682    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6683    // now scan this oop's oops
6684    new_oop->oop_iterate(&pushOrMarkClosure);
6685    do_yield_check();
6686  }
6687  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6688}
6689
6690Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6691                       CMSCollector* collector, MemRegion span,
6692                       CMSBitMap* bit_map,
6693                       OopTaskQueue* work_queue,
6694                       CMSMarkStack*  overflow_stack):
6695  _collector(collector),
6696  _whole_span(collector->_span),
6697  _span(span),
6698  _bit_map(bit_map),
6699  _mut(&collector->_modUnionTable),
6700  _work_queue(work_queue),
6701  _overflow_stack(overflow_stack),
6702  _skip_bits(0),
6703  _task(task)
6704{
6705  assert(_work_queue->size() == 0, "work_queue should be empty");
6706  _finger = span.start();
6707  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6708  assert(_span.contains(_finger), "Out of bounds _finger?");
6709}
6710
6711// Should revisit to see if this should be restructured for
6712// greater efficiency.
6713bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6714  if (_skip_bits > 0) {
6715    _skip_bits--;
6716    return true;
6717  }
6718  // convert offset into a HeapWord*
6719  HeapWord* addr = _bit_map->startWord() + offset;
6720  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6721         "address out of range");
6722  assert(_bit_map->isMarked(addr), "tautology");
6723  if (_bit_map->isMarked(addr+1)) {
6724    // this is an allocated object that might not yet be initialized
6725    assert(_skip_bits == 0, "tautology");
6726    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6727    oop p = oop(addr);
6728    if (p->klass_or_null() == NULL) {
6729      // in the case of Clean-on-Enter optimization, redirty card
6730      // and avoid clearing card by increasing  the threshold.
6731      return true;
6732    }
6733  }
6734  scan_oops_in_oop(addr);
6735  return true;
6736}
6737
6738void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6739  assert(_bit_map->isMarked(ptr), "expected bit to be set");
6740  // Should we assert that our work queue is empty or
6741  // below some drain limit?
6742  assert(_work_queue->size() == 0,
6743         "should drain stack to limit stack usage");
6744  // convert ptr to an oop preparatory to scanning
6745  oop obj = oop(ptr);
6746  // Ignore mark word in verification below, since we
6747  // may be running concurrent with mutators.
6748  assert(obj->is_oop(true), "should be an oop");
6749  assert(_finger <= ptr, "_finger runneth ahead");
6750  // advance the finger to right end of this object
6751  _finger = ptr + obj->size();
6752  assert(_finger > ptr, "we just incremented it above");
6753  // On large heaps, it may take us some time to get through
6754  // the marking phase. During
6755  // this time it's possible that a lot of mutations have
6756  // accumulated in the card table and the mod union table --
6757  // these mutation records are redundant until we have
6758  // actually traced into the corresponding card.
6759  // Here, we check whether advancing the finger would make
6760  // us cross into a new card, and if so clear corresponding
6761  // cards in the MUT (preclean them in the card-table in the
6762  // future).
6763
6764  // The clean-on-enter optimization is disabled by default,
6765  // until we fix 6178663.
6766  if (CMSCleanOnEnter && (_finger > _threshold)) {
6767    // [_threshold, _finger) represents the interval
6768    // of cards to be cleared  in MUT (or precleaned in card table).
6769    // The set of cards to be cleared is all those that overlap
6770    // with the interval [_threshold, _finger); note that
6771    // _threshold is always kept card-aligned but _finger isn't
6772    // always card-aligned.
6773    HeapWord* old_threshold = _threshold;
6774    assert(old_threshold == (HeapWord*)round_to(
6775            (intptr_t)old_threshold, CardTableModRefBS::card_size),
6776           "_threshold should always be card-aligned");
6777    _threshold = (HeapWord*)round_to(
6778                   (intptr_t)_finger, CardTableModRefBS::card_size);
6779    MemRegion mr(old_threshold, _threshold);
6780    assert(!mr.is_empty(), "Control point invariant");
6781    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6782    _mut->clear_range(mr);
6783  }
6784
6785  // Note: the local finger doesn't advance while we drain
6786  // the stack below, but the global finger sure can and will.
6787  HeapWord** gfa = _task->global_finger_addr();
6788  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6789                                      _span, _bit_map,
6790                                      _work_queue,
6791                                      _overflow_stack,
6792                                      _finger,
6793                                      gfa, this);
6794  bool res = _work_queue->push(obj);   // overflow could occur here
6795  assert(res, "Will hold once we use workqueues");
6796  while (true) {
6797    oop new_oop;
6798    if (!_work_queue->pop_local(new_oop)) {
6799      // We emptied our work_queue; check if there's stuff that can
6800      // be gotten from the overflow stack.
6801      if (CMSConcMarkingTask::get_work_from_overflow_stack(
6802            _overflow_stack, _work_queue)) {
6803        do_yield_check();
6804        continue;
6805      } else {  // done
6806        break;
6807      }
6808    }
6809    // Skip verifying header mark word below because we are
6810    // running concurrent with mutators.
6811    assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6812    // now scan this oop's oops
6813    new_oop->oop_iterate(&pushOrMarkClosure);
6814    do_yield_check();
6815  }
6816  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6817}
6818
6819// Yield in response to a request from VM Thread or
6820// from mutators.
6821void Par_MarkFromRootsClosure::do_yield_work() {
6822  assert(_task != NULL, "sanity");
6823  _task->yield();
6824}
6825
6826// A variant of the above used for verifying CMS marking work.
6827MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6828                        MemRegion span,
6829                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6830                        CMSMarkStack*  mark_stack):
6831  _collector(collector),
6832  _span(span),
6833  _verification_bm(verification_bm),
6834  _cms_bm(cms_bm),
6835  _mark_stack(mark_stack),
6836  _pam_verify_closure(collector, span, verification_bm, cms_bm,
6837                      mark_stack)
6838{
6839  assert(_mark_stack->isEmpty(), "stack should be empty");
6840  _finger = _verification_bm->startWord();
6841  assert(_collector->_restart_addr == NULL, "Sanity check");
6842  assert(_span.contains(_finger), "Out of bounds _finger?");
6843}
6844
6845void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6846  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6847  assert(_span.contains(addr), "Out of bounds _finger?");
6848  _finger = addr;
6849}
6850
6851// Should revisit to see if this should be restructured for
6852// greater efficiency.
6853bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6854  // convert offset into a HeapWord*
6855  HeapWord* addr = _verification_bm->startWord() + offset;
6856  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6857         "address out of range");
6858  assert(_verification_bm->isMarked(addr), "tautology");
6859  assert(_cms_bm->isMarked(addr), "tautology");
6860
6861  assert(_mark_stack->isEmpty(),
6862         "should drain stack to limit stack usage");
6863  // convert addr to an oop preparatory to scanning
6864  oop obj = oop(addr);
6865  assert(obj->is_oop(), "should be an oop");
6866  assert(_finger <= addr, "_finger runneth ahead");
6867  // advance the finger to right end of this object
6868  _finger = addr + obj->size();
6869  assert(_finger > addr, "we just incremented it above");
6870  // Note: the finger doesn't advance while we drain
6871  // the stack below.
6872  bool res = _mark_stack->push(obj);
6873  assert(res, "Empty non-zero size stack should have space for single push");
6874  while (!_mark_stack->isEmpty()) {
6875    oop new_oop = _mark_stack->pop();
6876    assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6877    // now scan this oop's oops
6878    new_oop->oop_iterate(&_pam_verify_closure);
6879  }
6880  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6881  return true;
6882}
6883
6884PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6885  CMSCollector* collector, MemRegion span,
6886  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6887  CMSMarkStack*  mark_stack):
6888  MetadataAwareOopClosure(collector->ref_processor()),
6889  _collector(collector),
6890  _span(span),
6891  _verification_bm(verification_bm),
6892  _cms_bm(cms_bm),
6893  _mark_stack(mark_stack)
6894{ }
6895
6896void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6897void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6898
6899// Upon stack overflow, we discard (part of) the stack,
6900// remembering the least address amongst those discarded
6901// in CMSCollector's _restart_address.
6902void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6903  // Remember the least grey address discarded
6904  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6905  _collector->lower_restart_addr(ra);
6906  _mark_stack->reset();  // discard stack contents
6907  _mark_stack->expand(); // expand the stack if possible
6908}
6909
6910void PushAndMarkVerifyClosure::do_oop(oop obj) {
6911  assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6912  HeapWord* addr = (HeapWord*)obj;
6913  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6914    // Oop lies in _span and isn't yet grey or black
6915    _verification_bm->mark(addr);            // now grey
6916    if (!_cms_bm->isMarked(addr)) {
6917      oop(addr)->print();
6918      gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6919                             p2i(addr));
6920      fatal("... aborting");
6921    }
6922
6923    if (!_mark_stack->push(obj)) { // stack overflow
6924      if (PrintCMSStatistics != 0) {
6925        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6926                               SIZE_FORMAT, _mark_stack->capacity());
6927      }
6928      assert(_mark_stack->isFull(), "Else push should have succeeded");
6929      handle_stack_overflow(addr);
6930    }
6931    // anything including and to the right of _finger
6932    // will be scanned as we iterate over the remainder of the
6933    // bit map
6934  }
6935}
6936
6937PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6938                     MemRegion span,
6939                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
6940                     HeapWord* finger, MarkFromRootsClosure* parent) :
6941  MetadataAwareOopClosure(collector->ref_processor()),
6942  _collector(collector),
6943  _span(span),
6944  _bitMap(bitMap),
6945  _markStack(markStack),
6946  _finger(finger),
6947  _parent(parent)
6948{ }
6949
6950Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6951                     MemRegion span,
6952                     CMSBitMap* bit_map,
6953                     OopTaskQueue* work_queue,
6954                     CMSMarkStack*  overflow_stack,
6955                     HeapWord* finger,
6956                     HeapWord** global_finger_addr,
6957                     Par_MarkFromRootsClosure* parent) :
6958  MetadataAwareOopClosure(collector->ref_processor()),
6959  _collector(collector),
6960  _whole_span(collector->_span),
6961  _span(span),
6962  _bit_map(bit_map),
6963  _work_queue(work_queue),
6964  _overflow_stack(overflow_stack),
6965  _finger(finger),
6966  _global_finger_addr(global_finger_addr),
6967  _parent(parent)
6968{ }
6969
6970// Assumes thread-safe access by callers, who are
6971// responsible for mutual exclusion.
6972void CMSCollector::lower_restart_addr(HeapWord* low) {
6973  assert(_span.contains(low), "Out of bounds addr");
6974  if (_restart_addr == NULL) {
6975    _restart_addr = low;
6976  } else {
6977    _restart_addr = MIN2(_restart_addr, low);
6978  }
6979}
6980
6981// Upon stack overflow, we discard (part of) the stack,
6982// remembering the least address amongst those discarded
6983// in CMSCollector's _restart_address.
6984void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6985  // Remember the least grey address discarded
6986  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6987  _collector->lower_restart_addr(ra);
6988  _markStack->reset();  // discard stack contents
6989  _markStack->expand(); // expand the stack if possible
6990}
6991
6992// Upon stack overflow, we discard (part of) the stack,
6993// remembering the least address amongst those discarded
6994// in CMSCollector's _restart_address.
6995void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6996  // We need to do this under a mutex to prevent other
6997  // workers from interfering with the work done below.
6998  MutexLockerEx ml(_overflow_stack->par_lock(),
6999                   Mutex::_no_safepoint_check_flag);
7000  // Remember the least grey address discarded
7001  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7002  _collector->lower_restart_addr(ra);
7003  _overflow_stack->reset();  // discard stack contents
7004  _overflow_stack->expand(); // expand the stack if possible
7005}
7006
7007void PushOrMarkClosure::do_oop(oop obj) {
7008  // Ignore mark word because we are running concurrent with mutators.
7009  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7010  HeapWord* addr = (HeapWord*)obj;
7011  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7012    // Oop lies in _span and isn't yet grey or black
7013    _bitMap->mark(addr);            // now grey
7014    if (addr < _finger) {
7015      // the bit map iteration has already either passed, or
7016      // sampled, this bit in the bit map; we'll need to
7017      // use the marking stack to scan this oop's oops.
7018      bool simulate_overflow = false;
7019      NOT_PRODUCT(
7020        if (CMSMarkStackOverflowALot &&
7021            _collector->simulate_overflow()) {
7022          // simulate a stack overflow
7023          simulate_overflow = true;
7024        }
7025      )
7026      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7027        if (PrintCMSStatistics != 0) {
7028          gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7029                                 SIZE_FORMAT, _markStack->capacity());
7030        }
7031        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7032        handle_stack_overflow(addr);
7033      }
7034    }
7035    // anything including and to the right of _finger
7036    // will be scanned as we iterate over the remainder of the
7037    // bit map
7038    do_yield_check();
7039  }
7040}
7041
7042void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7043void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7044
7045void Par_PushOrMarkClosure::do_oop(oop obj) {
7046  // Ignore mark word because we are running concurrent with mutators.
7047  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7048  HeapWord* addr = (HeapWord*)obj;
7049  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7050    // Oop lies in _span and isn't yet grey or black
7051    // We read the global_finger (volatile read) strictly after marking oop
7052    bool res = _bit_map->par_mark(addr);    // now grey
7053    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7054    // Should we push this marked oop on our stack?
7055    // -- if someone else marked it, nothing to do
7056    // -- if target oop is above global finger nothing to do
7057    // -- if target oop is in chunk and above local finger
7058    //      then nothing to do
7059    // -- else push on work queue
7060    if (   !res       // someone else marked it, they will deal with it
7061        || (addr >= *gfa)  // will be scanned in a later task
7062        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7063      return;
7064    }
7065    // the bit map iteration has already either passed, or
7066    // sampled, this bit in the bit map; we'll need to
7067    // use the marking stack to scan this oop's oops.
7068    bool simulate_overflow = false;
7069    NOT_PRODUCT(
7070      if (CMSMarkStackOverflowALot &&
7071          _collector->simulate_overflow()) {
7072        // simulate a stack overflow
7073        simulate_overflow = true;
7074      }
7075    )
7076    if (simulate_overflow ||
7077        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7078      // stack overflow
7079      if (PrintCMSStatistics != 0) {
7080        gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7081                               SIZE_FORMAT, _overflow_stack->capacity());
7082      }
7083      // We cannot assert that the overflow stack is full because
7084      // it may have been emptied since.
7085      assert(simulate_overflow ||
7086             _work_queue->size() == _work_queue->max_elems(),
7087            "Else push should have succeeded");
7088      handle_stack_overflow(addr);
7089    }
7090    do_yield_check();
7091  }
7092}
7093
7094void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7095void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7096
7097PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7098                                       MemRegion span,
7099                                       ReferenceProcessor* rp,
7100                                       CMSBitMap* bit_map,
7101                                       CMSBitMap* mod_union_table,
7102                                       CMSMarkStack*  mark_stack,
7103                                       bool           concurrent_precleaning):
7104  MetadataAwareOopClosure(rp),
7105  _collector(collector),
7106  _span(span),
7107  _bit_map(bit_map),
7108  _mod_union_table(mod_union_table),
7109  _mark_stack(mark_stack),
7110  _concurrent_precleaning(concurrent_precleaning)
7111{
7112  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7113}
7114
7115// Grey object rescan during pre-cleaning and second checkpoint phases --
7116// the non-parallel version (the parallel version appears further below.)
7117void PushAndMarkClosure::do_oop(oop obj) {
7118  // Ignore mark word verification. If during concurrent precleaning,
7119  // the object monitor may be locked. If during the checkpoint
7120  // phases, the object may already have been reached by a  different
7121  // path and may be at the end of the global overflow list (so
7122  // the mark word may be NULL).
7123  assert(obj->is_oop_or_null(true /* ignore mark word */),
7124         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7125  HeapWord* addr = (HeapWord*)obj;
7126  // Check if oop points into the CMS generation
7127  // and is not marked
7128  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7129    // a white object ...
7130    _bit_map->mark(addr);         // ... now grey
7131    // push on the marking stack (grey set)
7132    bool simulate_overflow = false;
7133    NOT_PRODUCT(
7134      if (CMSMarkStackOverflowALot &&
7135          _collector->simulate_overflow()) {
7136        // simulate a stack overflow
7137        simulate_overflow = true;
7138      }
7139    )
7140    if (simulate_overflow || !_mark_stack->push(obj)) {
7141      if (_concurrent_precleaning) {
7142         // During precleaning we can just dirty the appropriate card(s)
7143         // in the mod union table, thus ensuring that the object remains
7144         // in the grey set  and continue. In the case of object arrays
7145         // we need to dirty all of the cards that the object spans,
7146         // since the rescan of object arrays will be limited to the
7147         // dirty cards.
7148         // Note that no one can be interfering with us in this action
7149         // of dirtying the mod union table, so no locking or atomics
7150         // are required.
7151         if (obj->is_objArray()) {
7152           size_t sz = obj->size();
7153           HeapWord* end_card_addr = (HeapWord*)round_to(
7154                                        (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7155           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7156           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7157           _mod_union_table->mark_range(redirty_range);
7158         } else {
7159           _mod_union_table->mark(addr);
7160         }
7161         _collector->_ser_pmc_preclean_ovflw++;
7162      } else {
7163         // During the remark phase, we need to remember this oop
7164         // in the overflow list.
7165         _collector->push_on_overflow_list(obj);
7166         _collector->_ser_pmc_remark_ovflw++;
7167      }
7168    }
7169  }
7170}
7171
7172Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7173                                               MemRegion span,
7174                                               ReferenceProcessor* rp,
7175                                               CMSBitMap* bit_map,
7176                                               OopTaskQueue* work_queue):
7177  MetadataAwareOopClosure(rp),
7178  _collector(collector),
7179  _span(span),
7180  _bit_map(bit_map),
7181  _work_queue(work_queue)
7182{
7183  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7184}
7185
7186void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7187void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7188
7189// Grey object rescan during second checkpoint phase --
7190// the parallel version.
7191void Par_PushAndMarkClosure::do_oop(oop obj) {
7192  // In the assert below, we ignore the mark word because
7193  // this oop may point to an already visited object that is
7194  // on the overflow stack (in which case the mark word has
7195  // been hijacked for chaining into the overflow stack --
7196  // if this is the last object in the overflow stack then
7197  // its mark word will be NULL). Because this object may
7198  // have been subsequently popped off the global overflow
7199  // stack, and the mark word possibly restored to the prototypical
7200  // value, by the time we get to examined this failing assert in
7201  // the debugger, is_oop_or_null(false) may subsequently start
7202  // to hold.
7203  assert(obj->is_oop_or_null(true),
7204         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7205  HeapWord* addr = (HeapWord*)obj;
7206  // Check if oop points into the CMS generation
7207  // and is not marked
7208  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7209    // a white object ...
7210    // If we manage to "claim" the object, by being the
7211    // first thread to mark it, then we push it on our
7212    // marking stack
7213    if (_bit_map->par_mark(addr)) {     // ... now grey
7214      // push on work queue (grey set)
7215      bool simulate_overflow = false;
7216      NOT_PRODUCT(
7217        if (CMSMarkStackOverflowALot &&
7218            _collector->par_simulate_overflow()) {
7219          // simulate a stack overflow
7220          simulate_overflow = true;
7221        }
7222      )
7223      if (simulate_overflow || !_work_queue->push(obj)) {
7224        _collector->par_push_on_overflow_list(obj);
7225        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7226      }
7227    } // Else, some other thread got there first
7228  }
7229}
7230
7231void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7232void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7233
7234void CMSPrecleanRefsYieldClosure::do_yield_work() {
7235  Mutex* bml = _collector->bitMapLock();
7236  assert_lock_strong(bml);
7237  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7238         "CMS thread should hold CMS token");
7239
7240  bml->unlock();
7241  ConcurrentMarkSweepThread::desynchronize(true);
7242
7243  _collector->stopTimer();
7244  if (PrintCMSStatistics != 0) {
7245    _collector->incrementYields();
7246  }
7247
7248  // See the comment in coordinator_yield()
7249  for (unsigned i = 0; i < CMSYieldSleepCount &&
7250                       ConcurrentMarkSweepThread::should_yield() &&
7251                       !CMSCollector::foregroundGCIsActive(); ++i) {
7252    os::sleep(Thread::current(), 1, false);
7253  }
7254
7255  ConcurrentMarkSweepThread::synchronize(true);
7256  bml->lock();
7257
7258  _collector->startTimer();
7259}
7260
7261bool CMSPrecleanRefsYieldClosure::should_return() {
7262  if (ConcurrentMarkSweepThread::should_yield()) {
7263    do_yield_work();
7264  }
7265  return _collector->foregroundGCIsActive();
7266}
7267
7268void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7269  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7270         "mr should be aligned to start at a card boundary");
7271  // We'd like to assert:
7272  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7273  //        "mr should be a range of cards");
7274  // However, that would be too strong in one case -- the last
7275  // partition ends at _unallocated_block which, in general, can be
7276  // an arbitrary boundary, not necessarily card aligned.
7277  if (PrintCMSStatistics != 0) {
7278    _num_dirty_cards +=
7279         mr.word_size()/CardTableModRefBS::card_size_in_words;
7280  }
7281  _space->object_iterate_mem(mr, &_scan_cl);
7282}
7283
7284SweepClosure::SweepClosure(CMSCollector* collector,
7285                           ConcurrentMarkSweepGeneration* g,
7286                           CMSBitMap* bitMap, bool should_yield) :
7287  _collector(collector),
7288  _g(g),
7289  _sp(g->cmsSpace()),
7290  _limit(_sp->sweep_limit()),
7291  _freelistLock(_sp->freelistLock()),
7292  _bitMap(bitMap),
7293  _yield(should_yield),
7294  _inFreeRange(false),           // No free range at beginning of sweep
7295  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7296  _lastFreeRangeCoalesced(false),
7297  _freeFinger(g->used_region().start())
7298{
7299  NOT_PRODUCT(
7300    _numObjectsFreed = 0;
7301    _numWordsFreed   = 0;
7302    _numObjectsLive = 0;
7303    _numWordsLive = 0;
7304    _numObjectsAlreadyFree = 0;
7305    _numWordsAlreadyFree = 0;
7306    _last_fc = NULL;
7307
7308    _sp->initializeIndexedFreeListArrayReturnedBytes();
7309    _sp->dictionary()->initialize_dict_returned_bytes();
7310  )
7311  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7312         "sweep _limit out of bounds");
7313  if (CMSTraceSweeper) {
7314    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7315                        p2i(_limit));
7316  }
7317}
7318
7319void SweepClosure::print_on(outputStream* st) const {
7320  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7321                p2i(_sp->bottom()), p2i(_sp->end()));
7322  tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7323  tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7324  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7325  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7326                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7327}
7328
7329#ifndef PRODUCT
7330// Assertion checking only:  no useful work in product mode --
7331// however, if any of the flags below become product flags,
7332// you may need to review this code to see if it needs to be
7333// enabled in product mode.
7334SweepClosure::~SweepClosure() {
7335  assert_lock_strong(_freelistLock);
7336  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7337         "sweep _limit out of bounds");
7338  if (inFreeRange()) {
7339    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7340    print();
7341    ShouldNotReachHere();
7342  }
7343  if (Verbose && PrintGC) {
7344    gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7345                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7346    gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects,  "
7347                           SIZE_FORMAT " bytes  "
7348      "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7349      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7350      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7351    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7352                        * sizeof(HeapWord);
7353    gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7354
7355    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7356      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7357      size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7358      size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7359      gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
7360      gclog_or_tty->print("   Indexed List Returned " SIZE_FORMAT " bytes",
7361        indexListReturnedBytes);
7362      gclog_or_tty->print_cr("        Dictionary Returned " SIZE_FORMAT " bytes",
7363        dict_returned_bytes);
7364    }
7365  }
7366  if (CMSTraceSweeper) {
7367    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7368                           p2i(_limit));
7369  }
7370}
7371#endif  // PRODUCT
7372
7373void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7374    bool freeRangeInFreeLists) {
7375  if (CMSTraceSweeper) {
7376    gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7377               p2i(freeFinger), freeRangeInFreeLists);
7378  }
7379  assert(!inFreeRange(), "Trampling existing free range");
7380  set_inFreeRange(true);
7381  set_lastFreeRangeCoalesced(false);
7382
7383  set_freeFinger(freeFinger);
7384  set_freeRangeInFreeLists(freeRangeInFreeLists);
7385  if (CMSTestInFreeList) {
7386    if (freeRangeInFreeLists) {
7387      FreeChunk* fc = (FreeChunk*) freeFinger;
7388      assert(fc->is_free(), "A chunk on the free list should be free.");
7389      assert(fc->size() > 0, "Free range should have a size");
7390      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7391    }
7392  }
7393}
7394
7395// Note that the sweeper runs concurrently with mutators. Thus,
7396// it is possible for direct allocation in this generation to happen
7397// in the middle of the sweep. Note that the sweeper also coalesces
7398// contiguous free blocks. Thus, unless the sweeper and the allocator
7399// synchronize appropriately freshly allocated blocks may get swept up.
7400// This is accomplished by the sweeper locking the free lists while
7401// it is sweeping. Thus blocks that are determined to be free are
7402// indeed free. There is however one additional complication:
7403// blocks that have been allocated since the final checkpoint and
7404// mark, will not have been marked and so would be treated as
7405// unreachable and swept up. To prevent this, the allocator marks
7406// the bit map when allocating during the sweep phase. This leads,
7407// however, to a further complication -- objects may have been allocated
7408// but not yet initialized -- in the sense that the header isn't yet
7409// installed. The sweeper can not then determine the size of the block
7410// in order to skip over it. To deal with this case, we use a technique
7411// (due to Printezis) to encode such uninitialized block sizes in the
7412// bit map. Since the bit map uses a bit per every HeapWord, but the
7413// CMS generation has a minimum object size of 3 HeapWords, it follows
7414// that "normal marks" won't be adjacent in the bit map (there will
7415// always be at least two 0 bits between successive 1 bits). We make use
7416// of these "unused" bits to represent uninitialized blocks -- the bit
7417// corresponding to the start of the uninitialized object and the next
7418// bit are both set. Finally, a 1 bit marks the end of the object that
7419// started with the two consecutive 1 bits to indicate its potentially
7420// uninitialized state.
7421
7422size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7423  FreeChunk* fc = (FreeChunk*)addr;
7424  size_t res;
7425
7426  // Check if we are done sweeping. Below we check "addr >= _limit" rather
7427  // than "addr == _limit" because although _limit was a block boundary when
7428  // we started the sweep, it may no longer be one because heap expansion
7429  // may have caused us to coalesce the block ending at the address _limit
7430  // with a newly expanded chunk (this happens when _limit was set to the
7431  // previous _end of the space), so we may have stepped past _limit:
7432  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7433  if (addr >= _limit) { // we have swept up to or past the limit: finish up
7434    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7435           "sweep _limit out of bounds");
7436    assert(addr < _sp->end(), "addr out of bounds");
7437    // Flush any free range we might be holding as a single
7438    // coalesced chunk to the appropriate free list.
7439    if (inFreeRange()) {
7440      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7441             err_msg("freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())));
7442      flush_cur_free_chunk(freeFinger(),
7443                           pointer_delta(addr, freeFinger()));
7444      if (CMSTraceSweeper) {
7445        gclog_or_tty->print("Sweep: last chunk: ");
7446        gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7447                   "[coalesced:%d]\n",
7448                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7449                   lastFreeRangeCoalesced() ? 1 : 0);
7450      }
7451    }
7452
7453    // help the iterator loop finish
7454    return pointer_delta(_sp->end(), addr);
7455  }
7456
7457  assert(addr < _limit, "sweep invariant");
7458  // check if we should yield
7459  do_yield_check(addr);
7460  if (fc->is_free()) {
7461    // Chunk that is already free
7462    res = fc->size();
7463    do_already_free_chunk(fc);
7464    debug_only(_sp->verifyFreeLists());
7465    // If we flush the chunk at hand in lookahead_and_flush()
7466    // and it's coalesced with a preceding chunk, then the
7467    // process of "mangling" the payload of the coalesced block
7468    // will cause erasure of the size information from the
7469    // (erstwhile) header of all the coalesced blocks but the
7470    // first, so the first disjunct in the assert will not hold
7471    // in that specific case (in which case the second disjunct
7472    // will hold).
7473    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7474           "Otherwise the size info doesn't change at this step");
7475    NOT_PRODUCT(
7476      _numObjectsAlreadyFree++;
7477      _numWordsAlreadyFree += res;
7478    )
7479    NOT_PRODUCT(_last_fc = fc;)
7480  } else if (!_bitMap->isMarked(addr)) {
7481    // Chunk is fresh garbage
7482    res = do_garbage_chunk(fc);
7483    debug_only(_sp->verifyFreeLists());
7484    NOT_PRODUCT(
7485      _numObjectsFreed++;
7486      _numWordsFreed += res;
7487    )
7488  } else {
7489    // Chunk that is alive.
7490    res = do_live_chunk(fc);
7491    debug_only(_sp->verifyFreeLists());
7492    NOT_PRODUCT(
7493        _numObjectsLive++;
7494        _numWordsLive += res;
7495    )
7496  }
7497  return res;
7498}
7499
7500// For the smart allocation, record following
7501//  split deaths - a free chunk is removed from its free list because
7502//      it is being split into two or more chunks.
7503//  split birth - a free chunk is being added to its free list because
7504//      a larger free chunk has been split and resulted in this free chunk.
7505//  coal death - a free chunk is being removed from its free list because
7506//      it is being coalesced into a large free chunk.
7507//  coal birth - a free chunk is being added to its free list because
7508//      it was created when two or more free chunks where coalesced into
7509//      this free chunk.
7510//
7511// These statistics are used to determine the desired number of free
7512// chunks of a given size.  The desired number is chosen to be relative
7513// to the end of a CMS sweep.  The desired number at the end of a sweep
7514// is the
7515//      count-at-end-of-previous-sweep (an amount that was enough)
7516//              - count-at-beginning-of-current-sweep  (the excess)
7517//              + split-births  (gains in this size during interval)
7518//              - split-deaths  (demands on this size during interval)
7519// where the interval is from the end of one sweep to the end of the
7520// next.
7521//
7522// When sweeping the sweeper maintains an accumulated chunk which is
7523// the chunk that is made up of chunks that have been coalesced.  That
7524// will be termed the left-hand chunk.  A new chunk of garbage that
7525// is being considered for coalescing will be referred to as the
7526// right-hand chunk.
7527//
7528// When making a decision on whether to coalesce a right-hand chunk with
7529// the current left-hand chunk, the current count vs. the desired count
7530// of the left-hand chunk is considered.  Also if the right-hand chunk
7531// is near the large chunk at the end of the heap (see
7532// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7533// left-hand chunk is coalesced.
7534//
7535// When making a decision about whether to split a chunk, the desired count
7536// vs. the current count of the candidate to be split is also considered.
7537// If the candidate is underpopulated (currently fewer chunks than desired)
7538// a chunk of an overpopulated (currently more chunks than desired) size may
7539// be chosen.  The "hint" associated with a free list, if non-null, points
7540// to a free list which may be overpopulated.
7541//
7542
7543void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7544  const size_t size = fc->size();
7545  // Chunks that cannot be coalesced are not in the
7546  // free lists.
7547  if (CMSTestInFreeList && !fc->cantCoalesce()) {
7548    assert(_sp->verify_chunk_in_free_list(fc),
7549      "free chunk should be in free lists");
7550  }
7551  // a chunk that is already free, should not have been
7552  // marked in the bit map
7553  HeapWord* const addr = (HeapWord*) fc;
7554  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7555  // Verify that the bit map has no bits marked between
7556  // addr and purported end of this block.
7557  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7558
7559  // Some chunks cannot be coalesced under any circumstances.
7560  // See the definition of cantCoalesce().
7561  if (!fc->cantCoalesce()) {
7562    // This chunk can potentially be coalesced.
7563    if (_sp->adaptive_freelists()) {
7564      // All the work is done in
7565      do_post_free_or_garbage_chunk(fc, size);
7566    } else {  // Not adaptive free lists
7567      // this is a free chunk that can potentially be coalesced by the sweeper;
7568      if (!inFreeRange()) {
7569        // if the next chunk is a free block that can't be coalesced
7570        // it doesn't make sense to remove this chunk from the free lists
7571        FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7572        assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7573        if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7574            nextChunk->is_free()               &&     // ... which is free...
7575            nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7576          // nothing to do
7577        } else {
7578          // Potentially the start of a new free range:
7579          // Don't eagerly remove it from the free lists.
7580          // No need to remove it if it will just be put
7581          // back again.  (Also from a pragmatic point of view
7582          // if it is a free block in a region that is beyond
7583          // any allocated blocks, an assertion will fail)
7584          // Remember the start of a free run.
7585          initialize_free_range(addr, true);
7586          // end - can coalesce with next chunk
7587        }
7588      } else {
7589        // the midst of a free range, we are coalescing
7590        print_free_block_coalesced(fc);
7591        if (CMSTraceSweeper) {
7592          gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7593        }
7594        // remove it from the free lists
7595        _sp->removeFreeChunkFromFreeLists(fc);
7596        set_lastFreeRangeCoalesced(true);
7597        // If the chunk is being coalesced and the current free range is
7598        // in the free lists, remove the current free range so that it
7599        // will be returned to the free lists in its entirety - all
7600        // the coalesced pieces included.
7601        if (freeRangeInFreeLists()) {
7602          FreeChunk* ffc = (FreeChunk*) freeFinger();
7603          assert(ffc->size() == pointer_delta(addr, freeFinger()),
7604            "Size of free range is inconsistent with chunk size.");
7605          if (CMSTestInFreeList) {
7606            assert(_sp->verify_chunk_in_free_list(ffc),
7607              "free range is not in free lists");
7608          }
7609          _sp->removeFreeChunkFromFreeLists(ffc);
7610          set_freeRangeInFreeLists(false);
7611        }
7612      }
7613    }
7614    // Note that if the chunk is not coalescable (the else arm
7615    // below), we unconditionally flush, without needing to do
7616    // a "lookahead," as we do below.
7617    if (inFreeRange()) lookahead_and_flush(fc, size);
7618  } else {
7619    // Code path common to both original and adaptive free lists.
7620
7621    // cant coalesce with previous block; this should be treated
7622    // as the end of a free run if any
7623    if (inFreeRange()) {
7624      // we kicked some butt; time to pick up the garbage
7625      assert(freeFinger() < addr, "freeFinger points too high");
7626      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7627    }
7628    // else, nothing to do, just continue
7629  }
7630}
7631
7632size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7633  // This is a chunk of garbage.  It is not in any free list.
7634  // Add it to a free list or let it possibly be coalesced into
7635  // a larger chunk.
7636  HeapWord* const addr = (HeapWord*) fc;
7637  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7638
7639  if (_sp->adaptive_freelists()) {
7640    // Verify that the bit map has no bits marked between
7641    // addr and purported end of just dead object.
7642    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7643
7644    do_post_free_or_garbage_chunk(fc, size);
7645  } else {
7646    if (!inFreeRange()) {
7647      // start of a new free range
7648      assert(size > 0, "A free range should have a size");
7649      initialize_free_range(addr, false);
7650    } else {
7651      // this will be swept up when we hit the end of the
7652      // free range
7653      if (CMSTraceSweeper) {
7654        gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7655      }
7656      // If the chunk is being coalesced and the current free range is
7657      // in the free lists, remove the current free range so that it
7658      // will be returned to the free lists in its entirety - all
7659      // the coalesced pieces included.
7660      if (freeRangeInFreeLists()) {
7661        FreeChunk* ffc = (FreeChunk*)freeFinger();
7662        assert(ffc->size() == pointer_delta(addr, freeFinger()),
7663          "Size of free range is inconsistent with chunk size.");
7664        if (CMSTestInFreeList) {
7665          assert(_sp->verify_chunk_in_free_list(ffc),
7666            "free range is not in free lists");
7667        }
7668        _sp->removeFreeChunkFromFreeLists(ffc);
7669        set_freeRangeInFreeLists(false);
7670      }
7671      set_lastFreeRangeCoalesced(true);
7672    }
7673    // this will be swept up when we hit the end of the free range
7674
7675    // Verify that the bit map has no bits marked between
7676    // addr and purported end of just dead object.
7677    _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7678  }
7679  assert(_limit >= addr + size,
7680         "A freshly garbage chunk can't possibly straddle over _limit");
7681  if (inFreeRange()) lookahead_and_flush(fc, size);
7682  return size;
7683}
7684
7685size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7686  HeapWord* addr = (HeapWord*) fc;
7687  // The sweeper has just found a live object. Return any accumulated
7688  // left hand chunk to the free lists.
7689  if (inFreeRange()) {
7690    assert(freeFinger() < addr, "freeFinger points too high");
7691    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7692  }
7693
7694  // This object is live: we'd normally expect this to be
7695  // an oop, and like to assert the following:
7696  // assert(oop(addr)->is_oop(), "live block should be an oop");
7697  // However, as we commented above, this may be an object whose
7698  // header hasn't yet been initialized.
7699  size_t size;
7700  assert(_bitMap->isMarked(addr), "Tautology for this control point");
7701  if (_bitMap->isMarked(addr + 1)) {
7702    // Determine the size from the bit map, rather than trying to
7703    // compute it from the object header.
7704    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7705    size = pointer_delta(nextOneAddr + 1, addr);
7706    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7707           "alignment problem");
7708
7709#ifdef ASSERT
7710      if (oop(addr)->klass_or_null() != NULL) {
7711        // Ignore mark word because we are running concurrent with mutators
7712        assert(oop(addr)->is_oop(true), "live block should be an oop");
7713        assert(size ==
7714               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7715               "P-mark and computed size do not agree");
7716      }
7717#endif
7718
7719  } else {
7720    // This should be an initialized object that's alive.
7721    assert(oop(addr)->klass_or_null() != NULL,
7722           "Should be an initialized object");
7723    // Ignore mark word because we are running concurrent with mutators
7724    assert(oop(addr)->is_oop(true), "live block should be an oop");
7725    // Verify that the bit map has no bits marked between
7726    // addr and purported end of this block.
7727    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7728    assert(size >= 3, "Necessary for Printezis marks to work");
7729    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7730    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7731  }
7732  return size;
7733}
7734
7735void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7736                                                 size_t chunkSize) {
7737  // do_post_free_or_garbage_chunk() should only be called in the case
7738  // of the adaptive free list allocator.
7739  const bool fcInFreeLists = fc->is_free();
7740  assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7741  assert((HeapWord*)fc <= _limit, "sweep invariant");
7742  if (CMSTestInFreeList && fcInFreeLists) {
7743    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7744  }
7745
7746  if (CMSTraceSweeper) {
7747    gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7748  }
7749
7750  HeapWord* const fc_addr = (HeapWord*) fc;
7751
7752  bool coalesce;
7753  const size_t left  = pointer_delta(fc_addr, freeFinger());
7754  const size_t right = chunkSize;
7755  switch (FLSCoalescePolicy) {
7756    // numeric value forms a coalition aggressiveness metric
7757    case 0:  { // never coalesce
7758      coalesce = false;
7759      break;
7760    }
7761    case 1: { // coalesce if left & right chunks on overpopulated lists
7762      coalesce = _sp->coalOverPopulated(left) &&
7763                 _sp->coalOverPopulated(right);
7764      break;
7765    }
7766    case 2: { // coalesce if left chunk on overpopulated list (default)
7767      coalesce = _sp->coalOverPopulated(left);
7768      break;
7769    }
7770    case 3: { // coalesce if left OR right chunk on overpopulated list
7771      coalesce = _sp->coalOverPopulated(left) ||
7772                 _sp->coalOverPopulated(right);
7773      break;
7774    }
7775    case 4: { // always coalesce
7776      coalesce = true;
7777      break;
7778    }
7779    default:
7780     ShouldNotReachHere();
7781  }
7782
7783  // Should the current free range be coalesced?
7784  // If the chunk is in a free range and either we decided to coalesce above
7785  // or the chunk is near the large block at the end of the heap
7786  // (isNearLargestChunk() returns true), then coalesce this chunk.
7787  const bool doCoalesce = inFreeRange()
7788                          && (coalesce || _g->isNearLargestChunk(fc_addr));
7789  if (doCoalesce) {
7790    // Coalesce the current free range on the left with the new
7791    // chunk on the right.  If either is on a free list,
7792    // it must be removed from the list and stashed in the closure.
7793    if (freeRangeInFreeLists()) {
7794      FreeChunk* const ffc = (FreeChunk*)freeFinger();
7795      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7796        "Size of free range is inconsistent with chunk size.");
7797      if (CMSTestInFreeList) {
7798        assert(_sp->verify_chunk_in_free_list(ffc),
7799          "Chunk is not in free lists");
7800      }
7801      _sp->coalDeath(ffc->size());
7802      _sp->removeFreeChunkFromFreeLists(ffc);
7803      set_freeRangeInFreeLists(false);
7804    }
7805    if (fcInFreeLists) {
7806      _sp->coalDeath(chunkSize);
7807      assert(fc->size() == chunkSize,
7808        "The chunk has the wrong size or is not in the free lists");
7809      _sp->removeFreeChunkFromFreeLists(fc);
7810    }
7811    set_lastFreeRangeCoalesced(true);
7812    print_free_block_coalesced(fc);
7813  } else {  // not in a free range and/or should not coalesce
7814    // Return the current free range and start a new one.
7815    if (inFreeRange()) {
7816      // In a free range but cannot coalesce with the right hand chunk.
7817      // Put the current free range into the free lists.
7818      flush_cur_free_chunk(freeFinger(),
7819                           pointer_delta(fc_addr, freeFinger()));
7820    }
7821    // Set up for new free range.  Pass along whether the right hand
7822    // chunk is in the free lists.
7823    initialize_free_range((HeapWord*)fc, fcInFreeLists);
7824  }
7825}
7826
7827// Lookahead flush:
7828// If we are tracking a free range, and this is the last chunk that
7829// we'll look at because its end crosses past _limit, we'll preemptively
7830// flush it along with any free range we may be holding on to. Note that
7831// this can be the case only for an already free or freshly garbage
7832// chunk. If this block is an object, it can never straddle
7833// over _limit. The "straddling" occurs when _limit is set at
7834// the previous end of the space when this cycle started, and
7835// a subsequent heap expansion caused the previously co-terminal
7836// free block to be coalesced with the newly expanded portion,
7837// thus rendering _limit a non-block-boundary making it dangerous
7838// for the sweeper to step over and examine.
7839void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7840  assert(inFreeRange(), "Should only be called if currently in a free range.");
7841  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7842  assert(_sp->used_region().contains(eob - 1),
7843         err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7844                 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7845                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7846                 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7847  if (eob >= _limit) {
7848    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7849    if (CMSTraceSweeper) {
7850      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7851                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7852                             "[" PTR_FORMAT "," PTR_FORMAT ")",
7853                             p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7854    }
7855    // Return the storage we are tracking back into the free lists.
7856    if (CMSTraceSweeper) {
7857      gclog_or_tty->print_cr("Flushing ... ");
7858    }
7859    assert(freeFinger() < eob, "Error");
7860    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7861  }
7862}
7863
7864void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7865  assert(inFreeRange(), "Should only be called if currently in a free range.");
7866  assert(size > 0,
7867    "A zero sized chunk cannot be added to the free lists.");
7868  if (!freeRangeInFreeLists()) {
7869    if (CMSTestInFreeList) {
7870      FreeChunk* fc = (FreeChunk*) chunk;
7871      fc->set_size(size);
7872      assert(!_sp->verify_chunk_in_free_list(fc),
7873        "chunk should not be in free lists yet");
7874    }
7875    if (CMSTraceSweeper) {
7876      gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7877                    p2i(chunk), size);
7878    }
7879    // A new free range is going to be starting.  The current
7880    // free range has not been added to the free lists yet or
7881    // was removed so add it back.
7882    // If the current free range was coalesced, then the death
7883    // of the free range was recorded.  Record a birth now.
7884    if (lastFreeRangeCoalesced()) {
7885      _sp->coalBirth(size);
7886    }
7887    _sp->addChunkAndRepairOffsetTable(chunk, size,
7888            lastFreeRangeCoalesced());
7889  } else if (CMSTraceSweeper) {
7890    gclog_or_tty->print_cr("Already in free list: nothing to flush");
7891  }
7892  set_inFreeRange(false);
7893  set_freeRangeInFreeLists(false);
7894}
7895
7896// We take a break if we've been at this for a while,
7897// so as to avoid monopolizing the locks involved.
7898void SweepClosure::do_yield_work(HeapWord* addr) {
7899  // Return current free chunk being used for coalescing (if any)
7900  // to the appropriate freelist.  After yielding, the next
7901  // free block encountered will start a coalescing range of
7902  // free blocks.  If the next free block is adjacent to the
7903  // chunk just flushed, they will need to wait for the next
7904  // sweep to be coalesced.
7905  if (inFreeRange()) {
7906    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7907  }
7908
7909  // First give up the locks, then yield, then re-lock.
7910  // We should probably use a constructor/destructor idiom to
7911  // do this unlock/lock or modify the MutexUnlocker class to
7912  // serve our purpose. XXX
7913  assert_lock_strong(_bitMap->lock());
7914  assert_lock_strong(_freelistLock);
7915  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7916         "CMS thread should hold CMS token");
7917  _bitMap->lock()->unlock();
7918  _freelistLock->unlock();
7919  ConcurrentMarkSweepThread::desynchronize(true);
7920  _collector->stopTimer();
7921  if (PrintCMSStatistics != 0) {
7922    _collector->incrementYields();
7923  }
7924
7925  // See the comment in coordinator_yield()
7926  for (unsigned i = 0; i < CMSYieldSleepCount &&
7927                       ConcurrentMarkSweepThread::should_yield() &&
7928                       !CMSCollector::foregroundGCIsActive(); ++i) {
7929    os::sleep(Thread::current(), 1, false);
7930  }
7931
7932  ConcurrentMarkSweepThread::synchronize(true);
7933  _freelistLock->lock();
7934  _bitMap->lock()->lock_without_safepoint_check();
7935  _collector->startTimer();
7936}
7937
7938#ifndef PRODUCT
7939// This is actually very useful in a product build if it can
7940// be called from the debugger.  Compile it into the product
7941// as needed.
7942bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7943  return debug_cms_space->verify_chunk_in_free_list(fc);
7944}
7945#endif
7946
7947void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7948  if (CMSTraceSweeper) {
7949    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7950                           p2i(fc), fc->size());
7951  }
7952}
7953
7954// CMSIsAliveClosure
7955bool CMSIsAliveClosure::do_object_b(oop obj) {
7956  HeapWord* addr = (HeapWord*)obj;
7957  return addr != NULL &&
7958         (!_span.contains(addr) || _bit_map->isMarked(addr));
7959}
7960
7961
7962CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7963                      MemRegion span,
7964                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7965                      bool cpc):
7966  _collector(collector),
7967  _span(span),
7968  _bit_map(bit_map),
7969  _mark_stack(mark_stack),
7970  _concurrent_precleaning(cpc) {
7971  assert(!_span.is_empty(), "Empty span could spell trouble");
7972}
7973
7974
7975// CMSKeepAliveClosure: the serial version
7976void CMSKeepAliveClosure::do_oop(oop obj) {
7977  HeapWord* addr = (HeapWord*)obj;
7978  if (_span.contains(addr) &&
7979      !_bit_map->isMarked(addr)) {
7980    _bit_map->mark(addr);
7981    bool simulate_overflow = false;
7982    NOT_PRODUCT(
7983      if (CMSMarkStackOverflowALot &&
7984          _collector->simulate_overflow()) {
7985        // simulate a stack overflow
7986        simulate_overflow = true;
7987      }
7988    )
7989    if (simulate_overflow || !_mark_stack->push(obj)) {
7990      if (_concurrent_precleaning) {
7991        // We dirty the overflown object and let the remark
7992        // phase deal with it.
7993        assert(_collector->overflow_list_is_empty(), "Error");
7994        // In the case of object arrays, we need to dirty all of
7995        // the cards that the object spans. No locking or atomics
7996        // are needed since no one else can be mutating the mod union
7997        // table.
7998        if (obj->is_objArray()) {
7999          size_t sz = obj->size();
8000          HeapWord* end_card_addr =
8001            (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8002          MemRegion redirty_range = MemRegion(addr, end_card_addr);
8003          assert(!redirty_range.is_empty(), "Arithmetical tautology");
8004          _collector->_modUnionTable.mark_range(redirty_range);
8005        } else {
8006          _collector->_modUnionTable.mark(addr);
8007        }
8008        _collector->_ser_kac_preclean_ovflw++;
8009      } else {
8010        _collector->push_on_overflow_list(obj);
8011        _collector->_ser_kac_ovflw++;
8012      }
8013    }
8014  }
8015}
8016
8017void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8018void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8019
8020// CMSParKeepAliveClosure: a parallel version of the above.
8021// The work queues are private to each closure (thread),
8022// but (may be) available for stealing by other threads.
8023void CMSParKeepAliveClosure::do_oop(oop obj) {
8024  HeapWord* addr = (HeapWord*)obj;
8025  if (_span.contains(addr) &&
8026      !_bit_map->isMarked(addr)) {
8027    // In general, during recursive tracing, several threads
8028    // may be concurrently getting here; the first one to
8029    // "tag" it, claims it.
8030    if (_bit_map->par_mark(addr)) {
8031      bool res = _work_queue->push(obj);
8032      assert(res, "Low water mark should be much less than capacity");
8033      // Do a recursive trim in the hope that this will keep
8034      // stack usage lower, but leave some oops for potential stealers
8035      trim_queue(_low_water_mark);
8036    } // Else, another thread got there first
8037  }
8038}
8039
8040void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8041void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8042
8043void CMSParKeepAliveClosure::trim_queue(uint max) {
8044  while (_work_queue->size() > max) {
8045    oop new_oop;
8046    if (_work_queue->pop_local(new_oop)) {
8047      assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8048      assert(_bit_map->isMarked((HeapWord*)new_oop),
8049             "no white objects on this stack!");
8050      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8051      // iterate over the oops in this oop, marking and pushing
8052      // the ones in CMS heap (i.e. in _span).
8053      new_oop->oop_iterate(&_mark_and_push);
8054    }
8055  }
8056}
8057
8058CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8059                                CMSCollector* collector,
8060                                MemRegion span, CMSBitMap* bit_map,
8061                                OopTaskQueue* work_queue):
8062  _collector(collector),
8063  _span(span),
8064  _bit_map(bit_map),
8065  _work_queue(work_queue) { }
8066
8067void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8068  HeapWord* addr = (HeapWord*)obj;
8069  if (_span.contains(addr) &&
8070      !_bit_map->isMarked(addr)) {
8071    if (_bit_map->par_mark(addr)) {
8072      bool simulate_overflow = false;
8073      NOT_PRODUCT(
8074        if (CMSMarkStackOverflowALot &&
8075            _collector->par_simulate_overflow()) {
8076          // simulate a stack overflow
8077          simulate_overflow = true;
8078        }
8079      )
8080      if (simulate_overflow || !_work_queue->push(obj)) {
8081        _collector->par_push_on_overflow_list(obj);
8082        _collector->_par_kac_ovflw++;
8083      }
8084    } // Else another thread got there already
8085  }
8086}
8087
8088void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8089void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8090
8091//////////////////////////////////////////////////////////////////
8092//  CMSExpansionCause                /////////////////////////////
8093//////////////////////////////////////////////////////////////////
8094const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8095  switch (cause) {
8096    case _no_expansion:
8097      return "No expansion";
8098    case _satisfy_free_ratio:
8099      return "Free ratio";
8100    case _satisfy_promotion:
8101      return "Satisfy promotion";
8102    case _satisfy_allocation:
8103      return "allocation";
8104    case _allocate_par_lab:
8105      return "Par LAB";
8106    case _allocate_par_spooling_space:
8107      return "Par Spooling Space";
8108    case _adaptive_size_policy:
8109      return "Ergonomics";
8110    default:
8111      return "unknown";
8112  }
8113}
8114
8115void CMSDrainMarkingStackClosure::do_void() {
8116  // the max number to take from overflow list at a time
8117  const size_t num = _mark_stack->capacity()/4;
8118  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8119         "Overflow list should be NULL during concurrent phases");
8120  while (!_mark_stack->isEmpty() ||
8121         // if stack is empty, check the overflow list
8122         _collector->take_from_overflow_list(num, _mark_stack)) {
8123    oop obj = _mark_stack->pop();
8124    HeapWord* addr = (HeapWord*)obj;
8125    assert(_span.contains(addr), "Should be within span");
8126    assert(_bit_map->isMarked(addr), "Should be marked");
8127    assert(obj->is_oop(), "Should be an oop");
8128    obj->oop_iterate(_keep_alive);
8129  }
8130}
8131
8132void CMSParDrainMarkingStackClosure::do_void() {
8133  // drain queue
8134  trim_queue(0);
8135}
8136
8137// Trim our work_queue so its length is below max at return
8138void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8139  while (_work_queue->size() > max) {
8140    oop new_oop;
8141    if (_work_queue->pop_local(new_oop)) {
8142      assert(new_oop->is_oop(), "Expected an oop");
8143      assert(_bit_map->isMarked((HeapWord*)new_oop),
8144             "no white objects on this stack!");
8145      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8146      // iterate over the oops in this oop, marking and pushing
8147      // the ones in CMS heap (i.e. in _span).
8148      new_oop->oop_iterate(&_mark_and_push);
8149    }
8150  }
8151}
8152
8153////////////////////////////////////////////////////////////////////
8154// Support for Marking Stack Overflow list handling and related code
8155////////////////////////////////////////////////////////////////////
8156// Much of the following code is similar in shape and spirit to the
8157// code used in ParNewGC. We should try and share that code
8158// as much as possible in the future.
8159
8160#ifndef PRODUCT
8161// Debugging support for CMSStackOverflowALot
8162
8163// It's OK to call this multi-threaded;  the worst thing
8164// that can happen is that we'll get a bunch of closely
8165// spaced simulated overflows, but that's OK, in fact
8166// probably good as it would exercise the overflow code
8167// under contention.
8168bool CMSCollector::simulate_overflow() {
8169  if (_overflow_counter-- <= 0) { // just being defensive
8170    _overflow_counter = CMSMarkStackOverflowInterval;
8171    return true;
8172  } else {
8173    return false;
8174  }
8175}
8176
8177bool CMSCollector::par_simulate_overflow() {
8178  return simulate_overflow();
8179}
8180#endif
8181
8182// Single-threaded
8183bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8184  assert(stack->isEmpty(), "Expected precondition");
8185  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8186  size_t i = num;
8187  oop  cur = _overflow_list;
8188  const markOop proto = markOopDesc::prototype();
8189  NOT_PRODUCT(ssize_t n = 0;)
8190  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8191    next = oop(cur->mark());
8192    cur->set_mark(proto);   // until proven otherwise
8193    assert(cur->is_oop(), "Should be an oop");
8194    bool res = stack->push(cur);
8195    assert(res, "Bit off more than can chew?");
8196    NOT_PRODUCT(n++;)
8197  }
8198  _overflow_list = cur;
8199#ifndef PRODUCT
8200  assert(_num_par_pushes >= n, "Too many pops?");
8201  _num_par_pushes -=n;
8202#endif
8203  return !stack->isEmpty();
8204}
8205
8206#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8207// (MT-safe) Get a prefix of at most "num" from the list.
8208// The overflow list is chained through the mark word of
8209// each object in the list. We fetch the entire list,
8210// break off a prefix of the right size and return the
8211// remainder. If other threads try to take objects from
8212// the overflow list at that time, they will wait for
8213// some time to see if data becomes available. If (and
8214// only if) another thread places one or more object(s)
8215// on the global list before we have returned the suffix
8216// to the global list, we will walk down our local list
8217// to find its end and append the global list to
8218// our suffix before returning it. This suffix walk can
8219// prove to be expensive (quadratic in the amount of traffic)
8220// when there are many objects in the overflow list and
8221// there is much producer-consumer contention on the list.
8222// *NOTE*: The overflow list manipulation code here and
8223// in ParNewGeneration:: are very similar in shape,
8224// except that in the ParNew case we use the old (from/eden)
8225// copy of the object to thread the list via its klass word.
8226// Because of the common code, if you make any changes in
8227// the code below, please check the ParNew version to see if
8228// similar changes might be needed.
8229// CR 6797058 has been filed to consolidate the common code.
8230bool CMSCollector::par_take_from_overflow_list(size_t num,
8231                                               OopTaskQueue* work_q,
8232                                               int no_of_gc_threads) {
8233  assert(work_q->size() == 0, "First empty local work queue");
8234  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8235  if (_overflow_list == NULL) {
8236    return false;
8237  }
8238  // Grab the entire list; we'll put back a suffix
8239  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8240  Thread* tid = Thread::current();
8241  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8242  // set to ParallelGCThreads.
8243  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8244  size_t sleep_time_millis = MAX2((size_t)1, num/100);
8245  // If the list is busy, we spin for a short while,
8246  // sleeping between attempts to get the list.
8247  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8248    os::sleep(tid, sleep_time_millis, false);
8249    if (_overflow_list == NULL) {
8250      // Nothing left to take
8251      return false;
8252    } else if (_overflow_list != BUSY) {
8253      // Try and grab the prefix
8254      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8255    }
8256  }
8257  // If the list was found to be empty, or we spun long
8258  // enough, we give up and return empty-handed. If we leave
8259  // the list in the BUSY state below, it must be the case that
8260  // some other thread holds the overflow list and will set it
8261  // to a non-BUSY state in the future.
8262  if (prefix == NULL || prefix == BUSY) {
8263     // Nothing to take or waited long enough
8264     if (prefix == NULL) {
8265       // Write back the NULL in case we overwrote it with BUSY above
8266       // and it is still the same value.
8267       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8268     }
8269     return false;
8270  }
8271  assert(prefix != NULL && prefix != BUSY, "Error");
8272  size_t i = num;
8273  oop cur = prefix;
8274  // Walk down the first "num" objects, unless we reach the end.
8275  for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8276  if (cur->mark() == NULL) {
8277    // We have "num" or fewer elements in the list, so there
8278    // is nothing to return to the global list.
8279    // Write back the NULL in lieu of the BUSY we wrote
8280    // above, if it is still the same value.
8281    if (_overflow_list == BUSY) {
8282      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8283    }
8284  } else {
8285    // Chop off the suffix and return it to the global list.
8286    assert(cur->mark() != BUSY, "Error");
8287    oop suffix_head = cur->mark(); // suffix will be put back on global list
8288    cur->set_mark(NULL);           // break off suffix
8289    // It's possible that the list is still in the empty(busy) state
8290    // we left it in a short while ago; in that case we may be
8291    // able to place back the suffix without incurring the cost
8292    // of a walk down the list.
8293    oop observed_overflow_list = _overflow_list;
8294    oop cur_overflow_list = observed_overflow_list;
8295    bool attached = false;
8296    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8297      observed_overflow_list =
8298        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8299      if (cur_overflow_list == observed_overflow_list) {
8300        attached = true;
8301        break;
8302      } else cur_overflow_list = observed_overflow_list;
8303    }
8304    if (!attached) {
8305      // Too bad, someone else sneaked in (at least) an element; we'll need
8306      // to do a splice. Find tail of suffix so we can prepend suffix to global
8307      // list.
8308      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8309      oop suffix_tail = cur;
8310      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8311             "Tautology");
8312      observed_overflow_list = _overflow_list;
8313      do {
8314        cur_overflow_list = observed_overflow_list;
8315        if (cur_overflow_list != BUSY) {
8316          // Do the splice ...
8317          suffix_tail->set_mark(markOop(cur_overflow_list));
8318        } else { // cur_overflow_list == BUSY
8319          suffix_tail->set_mark(NULL);
8320        }
8321        // ... and try to place spliced list back on overflow_list ...
8322        observed_overflow_list =
8323          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8324      } while (cur_overflow_list != observed_overflow_list);
8325      // ... until we have succeeded in doing so.
8326    }
8327  }
8328
8329  // Push the prefix elements on work_q
8330  assert(prefix != NULL, "control point invariant");
8331  const markOop proto = markOopDesc::prototype();
8332  oop next;
8333  NOT_PRODUCT(ssize_t n = 0;)
8334  for (cur = prefix; cur != NULL; cur = next) {
8335    next = oop(cur->mark());
8336    cur->set_mark(proto);   // until proven otherwise
8337    assert(cur->is_oop(), "Should be an oop");
8338    bool res = work_q->push(cur);
8339    assert(res, "Bit off more than we can chew?");
8340    NOT_PRODUCT(n++;)
8341  }
8342#ifndef PRODUCT
8343  assert(_num_par_pushes >= n, "Too many pops?");
8344  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8345#endif
8346  return true;
8347}
8348
8349// Single-threaded
8350void CMSCollector::push_on_overflow_list(oop p) {
8351  NOT_PRODUCT(_num_par_pushes++;)
8352  assert(p->is_oop(), "Not an oop");
8353  preserve_mark_if_necessary(p);
8354  p->set_mark((markOop)_overflow_list);
8355  _overflow_list = p;
8356}
8357
8358// Multi-threaded; use CAS to prepend to overflow list
8359void CMSCollector::par_push_on_overflow_list(oop p) {
8360  NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8361  assert(p->is_oop(), "Not an oop");
8362  par_preserve_mark_if_necessary(p);
8363  oop observed_overflow_list = _overflow_list;
8364  oop cur_overflow_list;
8365  do {
8366    cur_overflow_list = observed_overflow_list;
8367    if (cur_overflow_list != BUSY) {
8368      p->set_mark(markOop(cur_overflow_list));
8369    } else {
8370      p->set_mark(NULL);
8371    }
8372    observed_overflow_list =
8373      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8374  } while (cur_overflow_list != observed_overflow_list);
8375}
8376#undef BUSY
8377
8378// Single threaded
8379// General Note on GrowableArray: pushes may silently fail
8380// because we are (temporarily) out of C-heap for expanding
8381// the stack. The problem is quite ubiquitous and affects
8382// a lot of code in the JVM. The prudent thing for GrowableArray
8383// to do (for now) is to exit with an error. However, that may
8384// be too draconian in some cases because the caller may be
8385// able to recover without much harm. For such cases, we
8386// should probably introduce a "soft_push" method which returns
8387// an indication of success or failure with the assumption that
8388// the caller may be able to recover from a failure; code in
8389// the VM can then be changed, incrementally, to deal with such
8390// failures where possible, thus, incrementally hardening the VM
8391// in such low resource situations.
8392void CMSCollector::preserve_mark_work(oop p, markOop m) {
8393  _preserved_oop_stack.push(p);
8394  _preserved_mark_stack.push(m);
8395  assert(m == p->mark(), "Mark word changed");
8396  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8397         "bijection");
8398}
8399
8400// Single threaded
8401void CMSCollector::preserve_mark_if_necessary(oop p) {
8402  markOop m = p->mark();
8403  if (m->must_be_preserved(p)) {
8404    preserve_mark_work(p, m);
8405  }
8406}
8407
8408void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8409  markOop m = p->mark();
8410  if (m->must_be_preserved(p)) {
8411    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8412    // Even though we read the mark word without holding
8413    // the lock, we are assured that it will not change
8414    // because we "own" this oop, so no other thread can
8415    // be trying to push it on the overflow list; see
8416    // the assertion in preserve_mark_work() that checks
8417    // that m == p->mark().
8418    preserve_mark_work(p, m);
8419  }
8420}
8421
8422// We should be able to do this multi-threaded,
8423// a chunk of stack being a task (this is
8424// correct because each oop only ever appears
8425// once in the overflow list. However, it's
8426// not very easy to completely overlap this with
8427// other operations, so will generally not be done
8428// until all work's been completed. Because we
8429// expect the preserved oop stack (set) to be small,
8430// it's probably fine to do this single-threaded.
8431// We can explore cleverer concurrent/overlapped/parallel
8432// processing of preserved marks if we feel the
8433// need for this in the future. Stack overflow should
8434// be so rare in practice and, when it happens, its
8435// effect on performance so great that this will
8436// likely just be in the noise anyway.
8437void CMSCollector::restore_preserved_marks_if_any() {
8438  assert(SafepointSynchronize::is_at_safepoint(),
8439         "world should be stopped");
8440  assert(Thread::current()->is_ConcurrentGC_thread() ||
8441         Thread::current()->is_VM_thread(),
8442         "should be single-threaded");
8443  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8444         "bijection");
8445
8446  while (!_preserved_oop_stack.is_empty()) {
8447    oop p = _preserved_oop_stack.pop();
8448    assert(p->is_oop(), "Should be an oop");
8449    assert(_span.contains(p), "oop should be in _span");
8450    assert(p->mark() == markOopDesc::prototype(),
8451           "Set when taken from overflow list");
8452    markOop m = _preserved_mark_stack.pop();
8453    p->set_mark(m);
8454  }
8455  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8456         "stacks were cleared above");
8457}
8458
8459#ifndef PRODUCT
8460bool CMSCollector::no_preserved_marks() const {
8461  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8462}
8463#endif
8464
8465// Transfer some number of overflown objects to usual marking
8466// stack. Return true if some objects were transferred.
8467bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8468  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8469                    (size_t)ParGCDesiredObjsFromOverflowList);
8470
8471  bool res = _collector->take_from_overflow_list(num, _mark_stack);
8472  assert(_collector->overflow_list_is_empty() || res,
8473         "If list is not empty, we should have taken something");
8474  assert(!res || !_mark_stack->isEmpty(),
8475         "If we took something, it should now be on our stack");
8476  return res;
8477}
8478
8479size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8480  size_t res = _sp->block_size_no_stall(addr, _collector);
8481  if (_sp->block_is_obj(addr)) {
8482    if (_live_bit_map->isMarked(addr)) {
8483      // It can't have been dead in a previous cycle
8484      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8485    } else {
8486      _dead_bit_map->mark(addr);      // mark the dead object
8487    }
8488  }
8489  // Could be 0, if the block size could not be computed without stalling.
8490  return res;
8491}
8492
8493TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8494
8495  switch (phase) {
8496    case CMSCollector::InitialMarking:
8497      initialize(true  /* fullGC */ ,
8498                 cause /* cause of the GC */,
8499                 true  /* recordGCBeginTime */,
8500                 true  /* recordPreGCUsage */,
8501                 false /* recordPeakUsage */,
8502                 false /* recordPostGCusage */,
8503                 true  /* recordAccumulatedGCTime */,
8504                 false /* recordGCEndTime */,
8505                 false /* countCollection */  );
8506      break;
8507
8508    case CMSCollector::FinalMarking:
8509      initialize(true  /* fullGC */ ,
8510                 cause /* cause of the GC */,
8511                 false /* recordGCBeginTime */,
8512                 false /* recordPreGCUsage */,
8513                 false /* recordPeakUsage */,
8514                 false /* recordPostGCusage */,
8515                 true  /* recordAccumulatedGCTime */,
8516                 false /* recordGCEndTime */,
8517                 false /* countCollection */  );
8518      break;
8519
8520    case CMSCollector::Sweeping:
8521      initialize(true  /* fullGC */ ,
8522                 cause /* cause of the GC */,
8523                 false /* recordGCBeginTime */,
8524                 false /* recordPreGCUsage */,
8525                 true  /* recordPeakUsage */,
8526                 true  /* recordPostGCusage */,
8527                 false /* recordAccumulatedGCTime */,
8528                 true  /* recordGCEndTime */,
8529                 true  /* countCollection */  );
8530      break;
8531
8532    default:
8533      ShouldNotReachHere();
8534  }
8535}
8536